diff options
Diffstat (limited to 'drivers')
792 files changed, 20115 insertions, 12023 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index b811f2173f6..88681aca88c 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -105,7 +105,7 @@ config ACPI_EC_DEBUGFS Be aware that using this interface can confuse your Embedded Controller in a way that a normal reboot is not enough. You then - have to power of your system, and remove the laptop battery for + have to power off your system, and remove the laptop battery for some seconds. An Embedded Controller typically is available on laptops and reads sensor values like battery state and temperature. diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index b76848c80be..6afceb3d403 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -30,18 +30,13 @@ #include <linux/slab.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> +#include <asm/mwait.h> #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 static DEFINE_MUTEX(isolated_cpus_lock); -#define MWAIT_SUBSTATE_MASK (0xf) -#define MWAIT_CSTATE_MASK (0xf) -#define MWAIT_SUBSTATE_SIZE (4) -#define CPUID_MWAIT_LEAF (5) -#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) -#define CPUID5_ECX_INTERRUPT_BREAK (0x2) static unsigned long power_saving_mwait_eax; static unsigned char tsc_detected_unstable; @@ -382,31 +377,32 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device) device_remove_file(&device->dev, &dev_attr_rrtime); } -/* Query firmware how many CPUs should be idle */ -static int acpi_pad_pur(acpi_handle handle, int *num_cpus) +/* + * Query firmware how many CPUs should be idle + * return -1 on failure + */ +static int acpi_pad_pur(acpi_handle handle) { struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *package; - int rev, num, ret = -EINVAL; + int num = -1; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) - return -EINVAL; + return num; if (!buffer.length || !buffer.pointer) - return -EINVAL; + return num; package = buffer.pointer; - if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) - goto out; - rev = package->package.elements[0].integer.value; - num = package->package.elements[1].integer.value; - if (rev != 1 || num < 0) - goto out; - *num_cpus = num; - ret = 0; -out: + + if (package->type == ACPI_TYPE_PACKAGE && + package->package.count == 2 && + package->package.elements[0].integer.value == 1) /* rev 1 */ + + num = package->package.elements[1].integer.value; + kfree(buffer.pointer); - return ret; + return num; } /* Notify firmware how many CPUs are idle */ @@ -433,7 +429,8 @@ static void acpi_pad_handle_notify(acpi_handle handle) uint32_t idle_cpus; mutex_lock(&isolated_cpus_lock); - if (acpi_pad_pur(handle, &num_cpus)) { + num_cpus = acpi_pad_pur(handle); + if (num_cpus < 0) { mutex_unlock(&isolated_cpus_lock); return; } diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index df85b53a674..7dad9160f20 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -854,6 +854,7 @@ struct acpi_bit_register_info { ACPI_BITMASK_POWER_BUTTON_STATUS | \ ACPI_BITMASK_SLEEP_BUTTON_STATUS | \ ACPI_BITMASK_RT_CLOCK_STATUS | \ + ACPI_BITMASK_PCIEXP_WAKE_DISABLE | \ ACPI_BITMASK_WAKE_STATUS) #define ACPI_BITMASK_TIMER_ENABLE 0x0001 diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 74c24d517f8..4093522eed4 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -109,7 +109,7 @@ void acpi_ex_enter_interpreter(void) * * DESCRIPTION: Reacquire the interpreter execution region from within the * interpreter code. Failure to enter the interpreter region is a - * fatal system error. Used in conjuction with + * fatal system error. Used in conjunction with * relinquish_interpreter * ******************************************************************************/ diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c index 22cfcfbd9ff..491191e6cf6 100644 --- a/drivers/acpi/acpica/rsutils.c +++ b/drivers/acpi/acpica/rsutils.c @@ -149,7 +149,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type) /* * 16-, 32-, and 64-bit cases must use the move macros that perform - * endian conversion and/or accomodate hardware that cannot perform + * endian conversion and/or accommodate hardware that cannot perform * misaligned memory transfers */ case ACPI_RSC_MOVE16: diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index 907e350f1c7..fca34ccfd29 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -34,6 +34,6 @@ config ACPI_APEI_ERST_DEBUG depends on ACPI_APEI help ERST is a way provided by APEI to save and retrieve hardware - error infomation to and from a persistent store. Enable this + error information to and from a persistent store. Enable this if you want to debugging and testing the ERST kernel support and firmware implementation. diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 73fd0c7487c..4a904a4bf05 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -445,11 +445,15 @@ EXPORT_SYMBOL_GPL(apei_resources_sub); int apei_resources_request(struct apei_resources *resources, const char *desc) { - struct apei_res *res, *res_bak; + struct apei_res *res, *res_bak = NULL; struct resource *r; + int rc; - apei_resources_sub(resources, &apei_resources_all); + rc = apei_resources_sub(resources, &apei_resources_all); + if (rc) + return rc; + rc = -EINVAL; list_for_each_entry(res, &resources->iomem, list) { r = request_mem_region(res->start, res->end - res->start, desc); @@ -475,7 +479,11 @@ int apei_resources_request(struct apei_resources *resources, } } - apei_resources_merge(&apei_resources_all, resources); + rc = apei_resources_merge(&apei_resources_all, resources); + if (rc) { + pr_err(APEI_PFX "Fail to merge resources!\n"); + goto err_unmap_ioport; + } return 0; err_unmap_ioport: @@ -491,12 +499,13 @@ err_unmap_iomem: break; release_mem_region(res->start, res->end - res->start); } - return -EINVAL; + return rc; } EXPORT_SYMBOL_GPL(apei_resources_request); void apei_resources_release(struct apei_resources *resources) { + int rc; struct apei_res *res; list_for_each_entry(res, &resources->iomem, list) @@ -504,7 +513,9 @@ void apei_resources_release(struct apei_resources *resources) list_for_each_entry(res, &resources->ioport, list) release_region(res->start, res->end - res->start); - apei_resources_sub(&apei_resources_all, resources); + rc = apei_resources_sub(&apei_resources_all, resources); + if (rc) + pr_err(APEI_PFX "Fail to sub resources!\n"); } EXPORT_SYMBOL_GPL(apei_resources_release); diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 465c885938e..cf29df69380 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -426,7 +426,9 @@ DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL, static int einj_check_table(struct acpi_table_einj *einj_tab) { - if (einj_tab->header_length != sizeof(struct acpi_table_einj)) + if ((einj_tab->header_length != + (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header))) + && (einj_tab->header_length != sizeof(struct acpi_table_einj))) return -EINVAL; if (einj_tab->header.length < sizeof(struct acpi_table_einj)) return -EINVAL; diff --git a/drivers/acpi/apei/erst-dbg.c b/drivers/acpi/apei/erst-dbg.c index 5281ddda277..de73caf3ceb 100644 --- a/drivers/acpi/apei/erst-dbg.c +++ b/drivers/acpi/apei/erst-dbg.c @@ -2,7 +2,7 @@ * APEI Error Record Serialization Table debug support * * ERST is a way provided by APEI to save and retrieve hardware error - * infomation to and from a persistent store. This file provide the + * information to and from a persistent store. This file provide the * debugging/testing support for ERST kernel support and firmware * implementation. * @@ -111,11 +111,13 @@ retry: goto out; } if (len > erst_dbg_buf_len) { - kfree(erst_dbg_buf); + void *p; rc = -ENOMEM; - erst_dbg_buf = kmalloc(len, GFP_KERNEL); - if (!erst_dbg_buf) + p = kmalloc(len, GFP_KERNEL); + if (!p) goto out; + kfree(erst_dbg_buf); + erst_dbg_buf = p; erst_dbg_buf_len = len; goto retry; } @@ -150,11 +152,13 @@ static ssize_t erst_dbg_write(struct file *filp, const char __user *ubuf, if (mutex_lock_interruptible(&erst_dbg_mutex)) return -EINTR; if (usize > erst_dbg_buf_len) { - kfree(erst_dbg_buf); + void *p; rc = -ENOMEM; - erst_dbg_buf = kmalloc(usize, GFP_KERNEL); - if (!erst_dbg_buf) + p = kmalloc(usize, GFP_KERNEL); + if (!p) goto out; + kfree(erst_dbg_buf); + erst_dbg_buf = p; erst_dbg_buf_len = usize; } rc = copy_from_user(erst_dbg_buf, ubuf, usize); @@ -180,6 +184,7 @@ static const struct file_operations erst_dbg_ops = { .read = erst_dbg_read, .write = erst_dbg_write, .unlocked_ioctl = erst_dbg_ioctl, + .llseek = no_llseek, }; static struct miscdevice erst_dbg_dev = { diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 18645f4e83c..1211c03149e 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -2,7 +2,7 @@ * APEI Error Record Serialization Table support * * ERST is a way provided by APEI to save and retrieve hardware error - * infomation to and from a persistent store. + * information to and from a persistent store. * * For more information about ERST, please refer to ACPI Specification * version 4.0, section 17.4. @@ -266,13 +266,30 @@ static int erst_exec_move_data(struct apei_exec_context *ctx, { int rc; u64 offset; + void *src, *dst; + + /* ioremap does not work in interrupt context */ + if (in_interrupt()) { + pr_warning(ERST_PFX + "MOVE_DATA can not be used in interrupt context"); + return -EBUSY; + } rc = __apei_exec_read_register(entry, &offset); if (rc) return rc; - memmove((void *)ctx->dst_base + offset, - (void *)ctx->src_base + offset, - ctx->var2); + + src = ioremap(ctx->src_base + offset, ctx->var2); + if (!src) + return -ENOMEM; + dst = ioremap(ctx->dst_base + offset, ctx->var2); + if (!dst) + return -ENOMEM; + + memmove(dst, src, ctx->var2); + + iounmap(src); + iounmap(dst); return 0; } @@ -750,7 +767,9 @@ __setup("erst_disable", setup_erst_disable); static int erst_check_table(struct acpi_table_erst *erst_tab) { - if (erst_tab->header_length != sizeof(struct acpi_table_erst)) + if ((erst_tab->header_length != + (sizeof(struct acpi_table_erst) - sizeof(erst_tab->header))) + && (erst_tab->header_length != sizeof(struct acpi_table_einj))) return -EINVAL; if (erst_tab->header.length < sizeof(struct acpi_table_erst)) return -EINVAL; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 385a6059714..0d505e59214 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -302,7 +302,7 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) struct ghes *ghes = NULL; int rc = -EINVAL; - generic = ghes_dev->dev.platform_data; + generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; if (!generic->enabled) return -ENODEV; diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 343168d1826..1a3508a7fe0 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -137,20 +137,23 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) { - struct acpi_hest_generic *generic; struct platform_device *ghes_dev; struct ghes_arr *ghes_arr = data; int rc; if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR) return 0; - generic = (struct acpi_hest_generic *)hest_hdr; - if (!generic->enabled) + + if (!((struct acpi_hest_generic *)hest_hdr)->enabled) return 0; ghes_dev = platform_device_alloc("GHES", hest_hdr->source_id); if (!ghes_dev) return -ENOMEM; - ghes_dev->dev.platform_data = generic; + + rc = platform_device_add_data(ghes_dev, &hest_hdr, sizeof(void *)); + if (rc) + goto err; + rc = platform_device_add(ghes_dev); if (rc) goto err; diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c index 8f8bd736d4f..542e5390389 100644 --- a/drivers/acpi/atomicio.c +++ b/drivers/acpi/atomicio.c @@ -142,7 +142,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr, list_add_tail_rcu(&map->list, &acpi_iomaps); spin_unlock_irqrestore(&acpi_iomaps_lock, flags); - return vaddr + (paddr - pg_off); + return map->vaddr + (paddr - map->paddr); err_unmap: iounmap(vaddr); return NULL; diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index dc58402b0a1..98417201e9c 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -273,7 +273,6 @@ static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_CYCLE_COUNT, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, - POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_POWER_NOW, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 2bb28b9d91c..af308d03f49 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -183,6 +183,8 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d) { printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); acpi_osi_setup("!Windows 2006"); + acpi_osi_setup("!Windows 2006 SP1"); + acpi_osi_setup("!Windows 2006 SP2"); return 0; } static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) @@ -202,6 +204,23 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { }, }, { + /* + * There have a NVIF method in MSI GX723 DSDT need call by Nvidia + * driver (e.g. nouveau) when user press brightness hotkey. + * Currently, nouveau driver didn't do the job and it causes there + * have a infinite while loop in DSDT when user press hotkey. + * We add MSI GX723's dmi information to this table for workaround + * this issue. + * Will remove MSI GX723 from the table after nouveau grows support. + */ + .callback = dmi_disable_osi_vista, + .ident = "MSI GX723", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star International"), + DMI_MATCH(DMI_PRODUCT_NAME, "GX723"), + }, + }, + { .callback = dmi_disable_osi_vista, .ident = "Sony VGN-NS10J_S", .matches = { @@ -226,6 +245,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { }, }, { + .callback = dmi_disable_osi_vista, + .ident = "Toshiba Satellite L355", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Satellite L355"), + }, + }, + { .callback = dmi_disable_osi_win7, .ident = "ASUS K50IJ", .matches = { @@ -233,6 +260,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), }, }, + { + .callback = dmi_disable_osi_vista, + .ident = "Toshiba P305D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"), + }, + }, /* * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5c221ab535d..310e3b9749c 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -55,7 +55,7 @@ EXPORT_SYMBOL(acpi_root_dir); static int set_power_nocheck(const struct dmi_system_id *id) { printk(KERN_NOTICE PREFIX "%s detected - " - "disable power check in power transistion\n", id->ident); + "disable power check in power transition\n", id->ident); acpi_power_nocheck = 1; return 0; } @@ -80,23 +80,15 @@ static int set_copy_dsdt(const struct dmi_system_id *id) static struct dmi_system_id dsdt_dmi_table[] __initdata = { /* - * Insyde BIOS on some TOSHIBA machines corrupt the DSDT. + * Invoke DSDT corruption work-around on all Toshiba Satellite. * https://bugzilla.kernel.org/show_bug.cgi?id=14679 */ { .callback = set_copy_dsdt, - .ident = "TOSHIBA Satellite A505", + .ident = "TOSHIBA Satellite", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"), - }, - }, - { - .callback = set_copy_dsdt, - .ident = "TOSHIBA Satellite L505D", - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), - DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"), + DMI_MATCH(DMI_PRODUCT_NAME, "Satellite"), }, }, {} @@ -1027,7 +1019,7 @@ static int __init acpi_init(void) /* * If the laptop falls into the DMI check table, the power state check - * will be disabled in the course of device power transistion. + * will be disabled in the course of device power transition. */ dmi_check_system(power_nocheck_dmi_table); diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c index 7de27d49c4b..6355b575ee5 100644 --- a/drivers/acpi/debugfs.c +++ b/drivers/acpi/debugfs.c @@ -69,6 +69,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, static const struct file_operations cm_fops = { .write = cm_write, + .llseek = default_llseek, }; int __init acpi_debugfs_init(void) diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c index 0e869b3f81c..411620ef84c 100644 --- a/drivers/acpi/ec_sys.c +++ b/drivers/acpi/ec_sys.c @@ -101,6 +101,7 @@ static struct file_operations acpi_ec_io_ops = { .open = acpi_ec_open_io, .read = acpi_ec_read_io, .write = acpi_ec_write_io, + .llseek = default_llseek, }; int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count) diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index d439314a75d..85d90899380 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c @@ -110,6 +110,7 @@ static const struct file_operations acpi_system_event_ops = { .read = acpi_system_read_event, .release = acpi_system_close_event, .poll = acpi_system_poll_event, + .llseek = default_llseek, }; #endif /* CONFIG_ACPI_PROC_EVENT */ diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 8a3b840c0bb..d94d2953c97 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -369,7 +369,9 @@ static void __exit acpi_fan_exit(void) acpi_bus_unregister_driver(&acpi_fan_driver); +#ifdef CONFIG_ACPI_PROCFS remove_proc_entry(ACPI_FAN_CLASS, acpi_root_dir); +#endif return; } diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index e9699aaed10..bec561c14be 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -29,12 +29,6 @@ static int set_no_mwait(const struct dmi_system_id *id) static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { { - set_no_mwait, "IFL91 board", { - DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), - DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), - DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, - { set_no_mwait, "Extensa 5220", { DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -352,4 +346,5 @@ void __init acpi_early_processor_set_pdc(void) acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, early_init_pdc, NULL, NULL, NULL); + acpi_get_devices("ACPI0007", early_init_pdc, NULL, NULL); } diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 15602189238..347eb21b235 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -850,7 +850,7 @@ static int __init acpi_processor_init(void) printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", acpi_idle_driver.name); } else { - printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s", + printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", cpuidle_get_driver()->name); } diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index ba1bd263d90..3a73a93596e 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -447,8 +447,8 @@ int acpi_processor_notify_smm(struct module *calling_module) if (!try_module_get(calling_module)) return -EINVAL; - /* is_done is set to negative if an error occured, - * and to postitive if _no_ error occured, but SMM + /* is_done is set to negative if an error occurred, + * and to postitive if _no_ error occurred, but SMM * was already notified. This avoids double notification * which might lead to unexpected results... */ diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index cf82989ae75..4754ff6e70e 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -363,6 +363,12 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d) return 0; } +static int __init init_nvs_nosave(const struct dmi_system_id *d) +{ + acpi_nvs_nosave(); + return 0; +} + static struct dmi_system_id __initdata acpisleep_dmi_table[] = { { .callback = init_old_suspend_ordering, @@ -397,6 +403,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), }, }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-SR11M", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), + }, + }, + { + .callback = init_nvs_nosave, + .ident = "Everex StepNote Series", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), + }, + }, {}, }; #endif /* CONFIG_SUSPEND */ diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 68e2e4582fa..f8588f81048 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -100,7 +100,7 @@ static const struct acpi_dlevel acpi_debug_levels[] = { ACPI_DEBUG_INIT(ACPI_LV_EVENTS), }; -static int param_get_debug_layer(char *buffer, struct kernel_param *kp) +static int param_get_debug_layer(char *buffer, const struct kernel_param *kp) { int result = 0; int i; @@ -128,7 +128,7 @@ static int param_get_debug_layer(char *buffer, struct kernel_param *kp) return result; } -static int param_get_debug_level(char *buffer, struct kernel_param *kp) +static int param_get_debug_level(char *buffer, const struct kernel_param *kp) { int result = 0; int i; @@ -149,10 +149,18 @@ static int param_get_debug_level(char *buffer, struct kernel_param *kp) return result; } -module_param_call(debug_layer, param_set_uint, param_get_debug_layer, - &acpi_dbg_layer, 0644); -module_param_call(debug_level, param_set_uint, param_get_debug_level, - &acpi_dbg_level, 0644); +static struct kernel_param_ops param_ops_debug_layer = { + .set = param_set_uint, + .get = param_get_debug_layer, +}; + +static struct kernel_param_ops param_ops_debug_level = { + .set = param_set_uint, + .get = param_get_debug_level, +}; + +module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644); +module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644); static char trace_method_name[6]; module_param_string(trace_method_name, trace_method_name, 6, 0644); diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index c5fef01b3c9..b8367612659 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -59,8 +59,8 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context, "support\n")); *cap |= ACPI_VIDEO_BACKLIGHT; if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy))) - printk(KERN_WARNING FW_BUG PREFIX "ACPI brightness " - "control misses _BQC function\n"); + printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, " + "cannot determine initial brightness\n"); /* We have backlight support, no need to scan further */ return AE_CTRL_TERMINATE; } diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index d31590e7011..2737b975220 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -298,7 +298,7 @@ int amba_device_register(struct amba_device *dev, struct resource *parent) amba_put_disable_pclk(dev); - if (cid == 0xb105f00d) + if (cid == AMBA_CID) dev->periphid = pid; if (!dev->periphid) diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index d5df04a395c..c501af5b12b 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -99,7 +99,7 @@ obj-$(CONFIG_ATA_GENERIC) += ata_generic.o # Should be last libata driver obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o -libata-objs := libata-core.o libata-scsi.o libata-eh.o +libata-y := libata-core.o libata-scsi.o libata-eh.o libata-transport.o libata-$(CONFIG_ATA_SFF) += libata-sff.o libata-$(CONFIG_SATA_PMP) += libata-pmp.o libata-$(CONFIG_ATA_ACPI) += libata-acpi.o diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index ff1c945fba9..328826381a2 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -90,6 +90,10 @@ static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); static int ahci_pci_device_resume(struct pci_dev *pdev); #endif +static struct scsi_host_template ahci_sht = { + AHCI_SHT("ahci"), +}; + static struct ata_port_operations ahci_vt8251_ops = { .inherits = &ahci_ops, .hardreset = ahci_vt8251_hardreset, @@ -1204,9 +1208,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ata_port_pbar_desc(ap, AHCI_PCI_BAR, 0x100 + ap->port_no * 0x80, "port"); - /* set initial link pm policy */ - ap->pm_policy = NOT_AVAILABLE; - /* set enclosure management message type */ if (ap->flags & ATA_FLAG_EM) ap->em_message_type = hpriv->em_msg_type; diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 474427b6f99..329cbbb9128 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -72,6 +72,7 @@ enum { AHCI_CMD_RESET = (1 << 8), AHCI_CMD_CLR_BUSY = (1 << 10), + RX_FIS_PIO_SETUP = 0x20, /* offset of PIO Setup FIS data */ RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */ RX_FIS_SDB = 0x58, /* offset of SDB FIS data */ RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */ @@ -201,7 +202,6 @@ enum { AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */ AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ - AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */ @@ -216,7 +216,7 @@ enum { AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | ATA_FLAG_ACPI_SATA | ATA_FLAG_AN | - ATA_FLAG_IPM, + ATA_FLAG_LPM, ICH_MAP = 0x90, /* ICH MAP register */ @@ -298,7 +298,17 @@ struct ahci_host_priv { extern int ahci_ignore_sss; -extern struct scsi_host_template ahci_sht; +extern struct device_attribute *ahci_shost_attrs[]; +extern struct device_attribute *ahci_sdev_attrs[]; + +#define AHCI_SHT(drv_name) \ + ATA_NCQ_SHT(drv_name), \ + .can_queue = AHCI_MAX_CMDS - 1, \ + .sg_tablesize = AHCI_MAX_SG, \ + .dma_boundary = AHCI_DMA_BOUNDARY, \ + .shost_attrs = ahci_shost_attrs, \ + .sdev_attrs = ahci_sdev_attrs + extern struct ata_port_operations ahci_ops; void ahci_save_initial_config(struct device *dev, diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 4e97f33cca4..6fef1fa75c5 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -23,6 +23,10 @@ #include <linux/ahci_platform.h> #include "ahci.h" +static struct scsi_host_template ahci_platform_sht = { + AHCI_SHT("ahci_platform"), +}; + static int __init ahci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -125,9 +129,6 @@ static int __init ahci_probe(struct platform_device *pdev) ata_port_desc(ap, "mmio %pR", mem); ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80); - /* set initial link pm policy */ - ap->pm_policy = NOT_AVAILABLE; - /* set enclosure management message type */ if (ap->flags & ATA_FLAG_EM) ap->em_message_type = hpriv->em_msg_type; @@ -145,7 +146,7 @@ static int __init ahci_probe(struct platform_device *pdev) ahci_print_info(host, "platform"); rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED, - &ahci_sht); + &ahci_platform_sht); if (rc) goto err0; diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c index cc5f7726bde..6981f7680a0 100644 --- a/drivers/ata/ata_generic.c +++ b/drivers/ata/ata_generic.c @@ -35,6 +35,7 @@ enum { ATA_GEN_CLASS_MATCH = (1 << 0), ATA_GEN_FORCE_DMA = (1 << 1), + ATA_GEN_INTEL_IDER = (1 << 2), }; /** @@ -109,6 +110,49 @@ static struct ata_port_operations generic_port_ops = { static int all_generic_ide; /* Set to claim all devices */ /** + * is_intel_ider - identify intel IDE-R devices + * @dev: PCI device + * + * Distinguish Intel IDE-R controller devices from other Intel IDE + * devices. IDE-R devices have no timing registers and are in + * most respects virtual. They should be driven by the ata_generic + * driver. + * + * IDE-R devices have PCI offset 0xF8.L as zero, later Intel ATA has + * it non zero. All Intel ATA has 0x40 writable (timing), but it is + * not writable on IDE-R devices (this is guaranteed). + */ + +static int is_intel_ider(struct pci_dev *dev) +{ + /* For Intel IDE the value at 0xF8 is only zero on IDE-R + interfaces */ + u32 r; + u16 t; + + /* Check the manufacturing ID, it will be zero for IDE-R */ + pci_read_config_dword(dev, 0xF8, &r); + /* Not IDE-R: punt so that ata_(old)piix gets it */ + if (r != 0) + return 0; + /* 0xF8 will also be zero on some early Intel IDE devices + but they will have a sane timing register */ + pci_read_config_word(dev, 0x40, &t); + if (t != 0) + return 0; + /* Finally check if the timing register is writable so that + we eliminate any early devices hot-docked in a docking + station */ + pci_write_config_word(dev, 0x40, 1); + pci_read_config_word(dev, 0x40, &t); + if (t) { + pci_write_config_word(dev, 0x40, 0); + return 0; + } + return 1; +} + +/** * ata_generic_init - attach generic IDE * @dev: PCI device found * @id: match entry @@ -134,6 +178,10 @@ static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id if ((id->driver_data & ATA_GEN_CLASS_MATCH) && all_generic_ide == 0) return -ENODEV; + if (id->driver_data & ATA_GEN_INTEL_IDER) + if (!is_intel_ider(dev)) + return -ENODEV; + /* Devices that need care */ if (dev->vendor == PCI_VENDOR_ID_UMC && dev->device == PCI_DEVICE_ID_UMC_UM8886A && @@ -186,7 +234,11 @@ static struct pci_device_id ata_generic[] = { { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_3), }, { PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_5), }, -#endif +#endif + /* Intel, IDE class device */ + { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, + .driver_data = ATA_GEN_INTEL_IDER }, /* Must come last. If you add entries adjust this table appropriately */ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL), .driver_data = ATA_GEN_CLASS_MATCH }, diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index d712675d0a9..6cb14ca8ee8 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -158,7 +158,6 @@ struct piix_map_db { struct piix_host_priv { const int *map; u32 saved_iocfg; - spinlock_t sidpr_lock; /* FIXME: remove once locking in EH is fixed */ void __iomem *sidpr; }; @@ -175,6 +174,8 @@ static int piix_sidpr_scr_read(struct ata_link *link, unsigned int reg, u32 *val); static int piix_sidpr_scr_write(struct ata_link *link, unsigned int reg, u32 val); +static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints); static bool piix_irq_check(struct ata_port *ap); #ifdef CONFIG_PM static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); @@ -209,6 +210,8 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH3 (E7500/1) UDMA 100 */ { 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, + /* Intel ICH4-L */ + { 0x8086, 0x24C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, /* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */ { 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, { 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 }, @@ -348,11 +351,22 @@ static struct ata_port_operations ich_pata_ops = { .set_dmamode = ich_set_dmamode, }; +static struct device_attribute *piix_sidpr_shost_attrs[] = { + &dev_attr_link_power_management_policy, + NULL +}; + +static struct scsi_host_template piix_sidpr_sht = { + ATA_BMDMA_SHT(DRV_NAME), + .shost_attrs = piix_sidpr_shost_attrs, +}; + static struct ata_port_operations piix_sidpr_sata_ops = { .inherits = &piix_sata_ops, .hardreset = sata_std_hardreset, .scr_read = piix_sidpr_scr_read, .scr_write = piix_sidpr_scr_write, + .set_lpm = piix_sidpr_set_lpm, }; static const struct piix_map_db ich5_map_db = { @@ -956,15 +970,12 @@ static int piix_sidpr_scr_read(struct ata_link *link, unsigned int reg, u32 *val) { struct piix_host_priv *hpriv = link->ap->host->private_data; - unsigned long flags; if (reg >= ARRAY_SIZE(piix_sidx_map)) return -EINVAL; - spin_lock_irqsave(&hpriv->sidpr_lock, flags); piix_sidpr_sel(link, reg); *val = ioread32(hpriv->sidpr + PIIX_SIDPR_DATA); - spin_unlock_irqrestore(&hpriv->sidpr_lock, flags); return 0; } @@ -972,18 +983,21 @@ static int piix_sidpr_scr_write(struct ata_link *link, unsigned int reg, u32 val) { struct piix_host_priv *hpriv = link->ap->host->private_data; - unsigned long flags; if (reg >= ARRAY_SIZE(piix_sidx_map)) return -EINVAL; - spin_lock_irqsave(&hpriv->sidpr_lock, flags); piix_sidpr_sel(link, reg); iowrite32(val, hpriv->sidpr + PIIX_SIDPR_DATA); - spin_unlock_irqrestore(&hpriv->sidpr_lock, flags); return 0; } +static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints) +{ + return sata_link_scr_lpm(link, policy, false); +} + static bool piix_irq_check(struct ata_port *ap) { if (unlikely(!ap->ioaddr.bmdma_addr)) @@ -1543,6 +1557,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, struct device *dev = &pdev->dev; struct ata_port_info port_info[2]; const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] }; + struct scsi_host_template *sht = &piix_sht; unsigned long port_flags; struct ata_host *host; struct piix_host_priv *hpriv; @@ -1577,7 +1592,6 @@ static int __devinit piix_init_one(struct pci_dev *pdev, hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); if (!hpriv) return -ENOMEM; - spin_lock_init(&hpriv->sidpr_lock); /* Save IOCFG, this will be used for cable detection, quirk * detection and restoration on detach. This is necessary @@ -1612,6 +1626,8 @@ static int __devinit piix_init_one(struct pci_dev *pdev, rc = piix_init_sidpr(host); if (rc) return rc; + if (host->ports[0]->ops == &piix_sidpr_sata_ops) + sht = &piix_sidpr_sht; } /* apply IOCFG bit18 quirk */ @@ -1638,7 +1654,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, host->flags |= ATA_HOST_PARALLEL_SCAN; pci_set_master(pdev); - return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &piix_sht); + return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht); } static void piix_remove_one(struct pci_dev *pdev) diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 68dc6785472..ebc08d65b3d 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -56,9 +56,8 @@ MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip) module_param_named(ignore_sss, ahci_ignore_sss, int, 0444); MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)"); -static int ahci_enable_alpm(struct ata_port *ap, - enum link_pm policy); -static void ahci_disable_alpm(struct ata_port *ap); +static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints); static ssize_t ahci_led_show(struct ata_port *ap, char *buf); static ssize_t ahci_led_store(struct ata_port *ap, const char *buf, size_t size); @@ -121,7 +120,7 @@ static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL); static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO, ahci_read_em_buffer, ahci_store_em_buffer); -static struct device_attribute *ahci_shost_attrs[] = { +struct device_attribute *ahci_shost_attrs[] = { &dev_attr_link_power_management_policy, &dev_attr_em_message_type, &dev_attr_em_message, @@ -132,22 +131,14 @@ static struct device_attribute *ahci_shost_attrs[] = { &dev_attr_em_buffer, NULL }; +EXPORT_SYMBOL_GPL(ahci_shost_attrs); -static struct device_attribute *ahci_sdev_attrs[] = { +struct device_attribute *ahci_sdev_attrs[] = { &dev_attr_sw_activity, &dev_attr_unload_heads, NULL }; - -struct scsi_host_template ahci_sht = { - ATA_NCQ_SHT("ahci"), - .can_queue = AHCI_MAX_CMDS - 1, - .sg_tablesize = AHCI_MAX_SG, - .dma_boundary = AHCI_DMA_BOUNDARY, - .shost_attrs = ahci_shost_attrs, - .sdev_attrs = ahci_sdev_attrs, -}; -EXPORT_SYMBOL_GPL(ahci_sht); +EXPORT_SYMBOL_GPL(ahci_sdev_attrs); struct ata_port_operations ahci_ops = { .inherits = &sata_pmp_port_ops, @@ -172,8 +163,7 @@ struct ata_port_operations ahci_ops = { .pmp_attach = ahci_pmp_attach, .pmp_detach = ahci_pmp_detach, - .enable_pm = ahci_enable_alpm, - .disable_pm = ahci_disable_alpm, + .set_lpm = ahci_set_lpm, .em_show = ahci_led_show, .em_store = ahci_led_store, .sw_activity_show = ahci_activity_show, @@ -577,7 +567,7 @@ int ahci_stop_engine(struct ata_port *ap) writel(tmp, port_mmio + PORT_CMD); /* wait for engine to stop. This could be as long as 500 msec */ - tmp = ata_wait_register(port_mmio + PORT_CMD, + tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); if (tmp & PORT_CMD_LIST_ON) return -EIO; @@ -624,7 +614,7 @@ static int ahci_stop_fis_rx(struct ata_port *ap) writel(tmp, port_mmio + PORT_CMD); /* wait for completion, spec says 500ms, give it 1000 */ - tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON, + tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_FIS_ON, PORT_CMD_FIS_ON, 10, 1000); if (tmp & PORT_CMD_FIS_ON) return -EBUSY; @@ -650,127 +640,56 @@ static void ahci_power_up(struct ata_port *ap) writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD); } -static void ahci_disable_alpm(struct ata_port *ap) +static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned int hints) { + struct ata_port *ap = link->ap; struct ahci_host_priv *hpriv = ap->host->private_data; - void __iomem *port_mmio = ahci_port_base(ap); - u32 cmd; struct ahci_port_priv *pp = ap->private_data; - - /* IPM bits should be disabled by libata-core */ - /* get the existing command bits */ - cmd = readl(port_mmio + PORT_CMD); - - /* disable ALPM and ASP */ - cmd &= ~PORT_CMD_ASP; - cmd &= ~PORT_CMD_ALPE; - - /* force the interface back to active */ - cmd |= PORT_CMD_ICC_ACTIVE; - - /* write out new cmd value */ - writel(cmd, port_mmio + PORT_CMD); - cmd = readl(port_mmio + PORT_CMD); - - /* wait 10ms to be sure we've come out of any low power state */ - msleep(10); - - /* clear out any PhyRdy stuff from interrupt status */ - writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT); - - /* go ahead and clean out PhyRdy Change from Serror too */ - ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); - - /* - * Clear flag to indicate that we should ignore all PhyRdy - * state changes - */ - hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG; - - /* - * Enable interrupts on Phy Ready. - */ - pp->intr_mask |= PORT_IRQ_PHYRDY; - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); - - /* - * don't change the link pm policy - we can be called - * just to turn of link pm temporarily - */ -} - -static int ahci_enable_alpm(struct ata_port *ap, - enum link_pm policy) -{ - struct ahci_host_priv *hpriv = ap->host->private_data; void __iomem *port_mmio = ahci_port_base(ap); - u32 cmd; - struct ahci_port_priv *pp = ap->private_data; - u32 asp; - - /* Make sure the host is capable of link power management */ - if (!(hpriv->cap & HOST_CAP_ALPM)) - return -EINVAL; - switch (policy) { - case MAX_PERFORMANCE: - case NOT_AVAILABLE: + if (policy != ATA_LPM_MAX_POWER) { /* - * if we came here with NOT_AVAILABLE, - * it just means this is the first time we - * have tried to enable - default to max performance, - * and let the user go to lower power modes on request. + * Disable interrupts on Phy Ready. This keeps us from + * getting woken up due to spurious phy ready + * interrupts. */ - ahci_disable_alpm(ap); - return 0; - case MIN_POWER: - /* configure HBA to enter SLUMBER */ - asp = PORT_CMD_ASP; - break; - case MEDIUM_POWER: - /* configure HBA to enter PARTIAL */ - asp = 0; - break; - default: - return -EINVAL; + pp->intr_mask &= ~PORT_IRQ_PHYRDY; + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + + sata_link_scr_lpm(link, policy, false); } - /* - * Disable interrupts on Phy Ready. This keeps us from - * getting woken up due to spurious phy ready interrupts - * TBD - Hot plug should be done via polling now, is - * that even supported? - */ - pp->intr_mask &= ~PORT_IRQ_PHYRDY; - writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + if (hpriv->cap & HOST_CAP_ALPM) { + u32 cmd = readl(port_mmio + PORT_CMD); - /* - * Set a flag to indicate that we should ignore all PhyRdy - * state changes since these can happen now whenever we - * change link state - */ - hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG; + if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) { + cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); + cmd |= PORT_CMD_ICC_ACTIVE; - /* get the existing command bits */ - cmd = readl(port_mmio + PORT_CMD); + writel(cmd, port_mmio + PORT_CMD); + readl(port_mmio + PORT_CMD); - /* - * Set ASP based on Policy - */ - cmd |= asp; + /* wait 10ms to be sure we've come out of LPM state */ + ata_msleep(ap, 10); + } else { + cmd |= PORT_CMD_ALPE; + if (policy == ATA_LPM_MIN_POWER) + cmd |= PORT_CMD_ASP; - /* - * Setting this bit will instruct the HBA to aggressively - * enter a lower power link state when it's appropriate and - * based on the value set above for ASP - */ - cmd |= PORT_CMD_ALPE; + /* write out new cmd value */ + writel(cmd, port_mmio + PORT_CMD); + } + } - /* write out new cmd value */ - writel(cmd, port_mmio + PORT_CMD); - cmd = readl(port_mmio + PORT_CMD); + if (policy == ATA_LPM_MAX_POWER) { + sata_link_scr_lpm(link, policy, false); + + /* turn PHYRDY IRQ back on */ + pp->intr_mask |= PORT_IRQ_PHYRDY; + writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); + } - /* IPM bits should be set by libata-core */ return 0; } @@ -821,7 +740,7 @@ static void ahci_start_port(struct ata_port *ap) emp->led_state, 4); if (rc == -EBUSY) - msleep(1); + ata_msleep(ap, 1); else break; } @@ -880,7 +799,7 @@ int ahci_reset_controller(struct ata_host *host) * reset must complete within 1 second, or * the hardware should be considered fried. */ - tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET, + tmp = ata_wait_register(NULL, mmio + HOST_CTL, HOST_RESET, HOST_RESET, 10, 1000); if (tmp & HOST_RESET) { @@ -1260,7 +1179,7 @@ int ahci_kick_engine(struct ata_port *ap) writel(tmp, port_mmio + PORT_CMD); rc = 0; - tmp = ata_wait_register(port_mmio + PORT_CMD, + tmp = ata_wait_register(ap, port_mmio + PORT_CMD, PORT_CMD_CLO, PORT_CMD_CLO, 1, 500); if (tmp & PORT_CMD_CLO) rc = -EIO; @@ -1290,8 +1209,8 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, writel(1, port_mmio + PORT_CMD_ISSUE); if (timeout_msec) { - tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, - 1, timeout_msec); + tmp = ata_wait_register(ap, port_mmio + PORT_CMD_ISSUE, + 0x1, 0x1, 1, timeout_msec); if (tmp & 0x1) { ahci_kick_engine(ap); return -EBUSY; @@ -1338,7 +1257,7 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class, } /* spec says at least 5us, but be generous and sleep for 1ms */ - msleep(1); + ata_msleep(ap, 1); /* issue the second D2H Register FIS */ tf.ctl &= ~ATA_SRST; @@ -1668,15 +1587,10 @@ static void ahci_port_intr(struct ata_port *ap) if (unlikely(resetting)) status &= ~PORT_IRQ_BAD_PMP; - /* If we are getting PhyRdy, this is - * just a power state change, we should - * clear out this, plus the PhyRdy/Comm - * Wake bits from Serror - */ - if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) && - (status & PORT_IRQ_PHYRDY)) { + /* if LPM is enabled, PHYRDY doesn't mean anything */ + if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) { status &= ~PORT_IRQ_PHYRDY; - ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18))); + ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG); } if (unlikely(status & PORT_IRQ_ERROR)) { @@ -1838,12 +1752,24 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) { struct ahci_port_priv *pp = qc->ap->private_data; - u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; + u8 *rx_fis = pp->rx_fis; if (pp->fbs_enabled) - d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; + rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ; + + /* + * After a successful execution of an ATA PIO data-in command, + * the device doesn't send D2H Reg FIS to update the TF and + * the host should take TF and E_Status from the preceding PIO + * Setup FIS. + */ + if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE && + !(qc->flags & ATA_QCFLAG_FAILED)) { + ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf); + qc->result_tf.command = (rx_fis + RX_FIS_PIO_SETUP)[15]; + } else + ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf); - ata_tf_from_fis(d2h_fis, &qc->result_tf); return true; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 932eaee5024..7f77c67d267 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -68,7 +68,7 @@ #include <linux/ratelimit.h> #include "libata.h" - +#include "libata-transport.h" /* debounce timing parameters in msecs { interval, duration, timeout } */ const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; @@ -91,8 +91,6 @@ const struct ata_port_operations sata_port_ops = { static unsigned int ata_dev_init_params(struct ata_device *dev, u16 heads, u16 sectors); static unsigned int ata_dev_set_xfermode(struct ata_device *dev); -static unsigned int ata_dev_set_feature(struct ata_device *dev, - u8 enable, u8 feature); static void ata_dev_xfermask(struct ata_device *dev); static unsigned long ata_dev_blacklisted(const struct ata_device *dev); @@ -1017,7 +1015,7 @@ const char *ata_mode_string(unsigned long xfer_mask) return "<n/a>"; } -static const char *sata_spd_string(unsigned int spd) +const char *sata_spd_string(unsigned int spd) { static const char * const spd_str[] = { "1.5 Gbps", @@ -1030,182 +1028,6 @@ static const char *sata_spd_string(unsigned int spd) return spd_str[spd - 1]; } -static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) -{ - struct ata_link *link = dev->link; - struct ata_port *ap = link->ap; - u32 scontrol; - unsigned int err_mask; - int rc; - - /* - * disallow DIPM for drivers which haven't set - * ATA_FLAG_IPM. This is because when DIPM is enabled, - * phy ready will be set in the interrupt status on - * state changes, which will cause some drivers to - * think there are errors - additionally drivers will - * need to disable hot plug. - */ - if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) { - ap->pm_policy = NOT_AVAILABLE; - return -EINVAL; - } - - /* - * For DIPM, we will only enable it for the - * min_power setting. - * - * Why? Because Disks are too stupid to know that - * If the host rejects a request to go to SLUMBER - * they should retry at PARTIAL, and instead it - * just would give up. So, for medium_power to - * work at all, we need to only allow HIPM. - */ - rc = sata_scr_read(link, SCR_CONTROL, &scontrol); - if (rc) - return rc; - - switch (policy) { - case MIN_POWER: - /* no restrictions on IPM transitions */ - scontrol &= ~(0x3 << 8); - rc = sata_scr_write(link, SCR_CONTROL, scontrol); - if (rc) - return rc; - - /* enable DIPM */ - if (dev->flags & ATA_DFLAG_DIPM) - err_mask = ata_dev_set_feature(dev, - SETFEATURES_SATA_ENABLE, SATA_DIPM); - break; - case MEDIUM_POWER: - /* allow IPM to PARTIAL */ - scontrol &= ~(0x1 << 8); - scontrol |= (0x2 << 8); - rc = sata_scr_write(link, SCR_CONTROL, scontrol); - if (rc) - return rc; - - /* - * we don't have to disable DIPM since IPM flags - * disallow transitions to SLUMBER, which effectively - * disable DIPM if it does not support PARTIAL - */ - break; - case NOT_AVAILABLE: - case MAX_PERFORMANCE: - /* disable all IPM transitions */ - scontrol |= (0x3 << 8); - rc = sata_scr_write(link, SCR_CONTROL, scontrol); - if (rc) - return rc; - - /* - * we don't have to disable DIPM since IPM flags - * disallow all transitions which effectively - * disable DIPM anyway. - */ - break; - } - - /* FIXME: handle SET FEATURES failure */ - (void) err_mask; - - return 0; -} - -/** - * ata_dev_enable_pm - enable SATA interface power management - * @dev: device to enable power management - * @policy: the link power management policy - * - * Enable SATA Interface power management. This will enable - * Device Interface Power Management (DIPM) for min_power - * policy, and then call driver specific callbacks for - * enabling Host Initiated Power management. - * - * Locking: Caller. - * Returns: -EINVAL if IPM is not supported, 0 otherwise. - */ -void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy) -{ - int rc = 0; - struct ata_port *ap = dev->link->ap; - - /* set HIPM first, then DIPM */ - if (ap->ops->enable_pm) - rc = ap->ops->enable_pm(ap, policy); - if (rc) - goto enable_pm_out; - rc = ata_dev_set_dipm(dev, policy); - -enable_pm_out: - if (rc) - ap->pm_policy = MAX_PERFORMANCE; - else - ap->pm_policy = policy; - return /* rc */; /* hopefully we can use 'rc' eventually */ -} - -#ifdef CONFIG_PM -/** - * ata_dev_disable_pm - disable SATA interface power management - * @dev: device to disable power management - * - * Disable SATA Interface power management. This will disable - * Device Interface Power Management (DIPM) without changing - * policy, call driver specific callbacks for disabling Host - * Initiated Power management. - * - * Locking: Caller. - * Returns: void - */ -static void ata_dev_disable_pm(struct ata_device *dev) -{ - struct ata_port *ap = dev->link->ap; - - ata_dev_set_dipm(dev, MAX_PERFORMANCE); - if (ap->ops->disable_pm) - ap->ops->disable_pm(ap); -} -#endif /* CONFIG_PM */ - -void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy) -{ - ap->pm_policy = policy; - ap->link.eh_info.action |= ATA_EH_LPM; - ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY; - ata_port_schedule_eh(ap); -} - -#ifdef CONFIG_PM -static void ata_lpm_enable(struct ata_host *host) -{ - struct ata_link *link; - struct ata_port *ap; - struct ata_device *dev; - int i; - - for (i = 0; i < host->n_ports; i++) { - ap = host->ports[i]; - ata_for_each_link(link, ap, EDGE) { - ata_for_each_dev(dev, link, ALL) - ata_dev_disable_pm(dev); - } - } -} - -static void ata_lpm_disable(struct ata_host *host) -{ - int i; - - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; - ata_lpm_schedule(ap, ap->pm_policy); - } -} -#endif /* CONFIG_PM */ - /** * ata_dev_classify - determine device type based on ATA-spec signature * @tf: ATA taskfile register set for device to be identified @@ -1806,8 +1628,14 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, } } + if (ap->ops->error_handler) + ata_eh_release(ap); + rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); + if (ap->ops->error_handler) + ata_eh_acquire(ap); + ata_sff_flush_pio_task(ap); if (!rc) { @@ -2564,13 +2392,6 @@ int ata_dev_configure(struct ata_device *dev) if (dev->flags & ATA_DFLAG_LBA48) dev->max_sectors = ATA_MAX_SECTORS_LBA48; - if (!(dev->horkage & ATA_HORKAGE_IPM)) { - if (ata_id_has_hipm(dev->id)) - dev->flags |= ATA_DFLAG_HIPM; - if (ata_id_has_dipm(dev->id)) - dev->flags |= ATA_DFLAG_DIPM; - } - /* Limit PATA drive on SATA cable bridge transfers to udma5, 200 sectors */ if (ata_dev_knobble(dev)) { @@ -2591,13 +2412,6 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); - if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) { - dev->horkage |= ATA_HORKAGE_IPM; - - /* reset link pm_policy for this port to no pm */ - ap->pm_policy = MAX_PERFORMANCE; - } - if (ap->ops->dev_config) ap->ops->dev_config(dev); @@ -3596,7 +3410,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, warned = 1; } - msleep(50); + ata_msleep(link->ap, 50); } } @@ -3617,7 +3431,7 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)) { - msleep(ATA_WAIT_AFTER_RESET); + ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); return ata_wait_ready(link, deadline, check_ready); } @@ -3628,7 +3442,7 @@ int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, * @params: timing parameters { interval, duratinon, timeout } in msec * @deadline: deadline jiffies for the operation * -* Make sure SStatus of @link reaches stable state, determined by + * Make sure SStatus of @link reaches stable state, determined by * holding the same value where DET is not 1 for @duration polled * every @interval, before @timeout. Timeout constraints the * beginning of the stable state. Because DET gets stuck at 1 on @@ -3665,7 +3479,7 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, last_jiffies = jiffies; while (1) { - msleep(interval); + ata_msleep(link->ap, interval); if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) return rc; cur &= 0xf; @@ -3730,7 +3544,7 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params, * immediately after resuming. Delay 200ms before * debouncing. */ - msleep(200); + ata_msleep(link->ap, 200); /* is SControl restored correctly? */ if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) @@ -3760,6 +3574,72 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params, } /** + * sata_link_scr_lpm - manipulate SControl IPM and SPM fields + * @link: ATA link to manipulate SControl for + * @policy: LPM policy to configure + * @spm_wakeup: initiate LPM transition to active state + * + * Manipulate the IPM field of the SControl register of @link + * according to @policy. If @policy is ATA_LPM_MAX_POWER and + * @spm_wakeup is %true, the SPM field is manipulated to wake up + * the link. This function also clears PHYRDY_CHG before + * returning. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on succes, -errno otherwise. + */ +int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, + bool spm_wakeup) +{ + struct ata_eh_context *ehc = &link->eh_context; + bool woken_up = false; + u32 scontrol; + int rc; + + rc = sata_scr_read(link, SCR_CONTROL, &scontrol); + if (rc) + return rc; + + switch (policy) { + case ATA_LPM_MAX_POWER: + /* disable all LPM transitions */ + scontrol |= (0x3 << 8); + /* initiate transition to active state */ + if (spm_wakeup) { + scontrol |= (0x4 << 12); + woken_up = true; + } + break; + case ATA_LPM_MED_POWER: + /* allow LPM to PARTIAL */ + scontrol &= ~(0x1 << 8); + scontrol |= (0x2 << 8); + break; + case ATA_LPM_MIN_POWER: + /* no restrictions on LPM transitions */ + scontrol &= ~(0x3 << 8); + break; + default: + WARN_ON(1); + } + + rc = sata_scr_write(link, SCR_CONTROL, scontrol); + if (rc) + return rc; + + /* give the link time to transit out of LPM state */ + if (woken_up) + msleep(10); + + /* clear PHYRDY_CHG from SError */ + ehc->i.serror &= ~SERR_PHYRDY_CHG; + return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); +} + +/** * ata_std_prereset - prepare for reset * @link: ATA link to be reset * @deadline: deadline jiffies for the operation @@ -3868,7 +3748,7 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 * 10.4.2 says at least 1 ms. */ - msleep(1); + ata_msleep(link->ap, 1); /* bring link back */ rc = sata_link_resume(link, timing, deadline); @@ -4551,6 +4431,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; } + /** * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES * @dev: Device to which command will be sent @@ -4566,8 +4447,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) * RETURNS: * 0 on success, AC_ERR_* mask otherwise. */ -static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, - u8 feature) +unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) { struct ata_taskfile tf; unsigned int err_mask; @@ -4943,8 +4823,13 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc) * ata_qc_complete - Complete an active ATA command * @qc: Command to complete * - * Indicate to the mid and upper layers that an ATA - * command has completed, with either an ok or not-ok status. + * Indicate to the mid and upper layers that an ATA command has + * completed, with either an ok or not-ok status. + * + * Refrain from calling this function multiple times when + * successfully completing multiple NCQ commands. + * ata_qc_complete_multiple() should be used instead, which will + * properly update IRQ expect state. * * LOCKING: * spin_lock_irqsave(host lock) @@ -5037,6 +4922,10 @@ void ata_qc_complete(struct ata_queued_cmd *qc) * requests normally. ap->qc_active and @qc_active is compared * and commands are completed accordingly. * + * Always use this function when completing multiple NCQ commands + * from IRQ handlers instead of calling ata_qc_complete() + * multiple times to keep IRQ expect status properly in sync. + * * LOCKING: * spin_lock_irqsave(host lock) * @@ -5422,12 +5311,6 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg) int rc; /* - * disable link pm on all ports before requesting - * any pm activity - */ - ata_lpm_enable(host); - - /* * On some hardware, device fails to respond after spun down * for suspend. As the device won't be used before being * resumed, we don't need to touch the device. Ask EH to skip @@ -5460,9 +5343,6 @@ void ata_host_resume(struct ata_host *host) ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); host->dev->power.power_state = PMSG_ON; - - /* reenable link pm */ - ata_lpm_disable(host); } #endif @@ -5517,7 +5397,8 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) int i; /* clear everything except for devices */ - memset(link, 0, offsetof(struct ata_link, device[0])); + memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, + ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); link->ap = ap; link->pmp = pmp; @@ -5591,7 +5472,7 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ap = kzalloc(sizeof(*ap), GFP_KERNEL); if (!ap) return NULL; - + ap->pflags |= ATA_PFLAG_INITIALIZING; ap->lock = &host->lock; ap->print_id = -1; @@ -5695,6 +5576,7 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports) dev_set_drvdata(dev, host); spin_lock_init(&host->lock); + mutex_init(&host->eh_mutex); host->dev = dev; host->n_ports = max_ports; @@ -5992,6 +5874,7 @@ void ata_host_init(struct ata_host *host, struct device *dev, unsigned long flags, struct ata_port_operations *ops) { spin_lock_init(&host->lock); + mutex_init(&host->eh_mutex); host->dev = dev; host->flags = flags; host->ops = ops; @@ -6022,7 +5905,7 @@ static void async_port_probe(void *data, async_cookie_t cookie) spin_lock_irqsave(ap->lock, flags); ehi->probe_mask |= ATA_ALL_DEVICES; - ehi->action |= ATA_EH_RESET | ATA_EH_LPM; + ehi->action |= ATA_EH_RESET; ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; ap->pflags &= ~ATA_PFLAG_INITIALIZING; @@ -6093,9 +5976,18 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) for (i = 0; i < host->n_ports; i++) host->ports[i]->print_id = ata_print_id++; + + /* Create associated sysfs transport objects */ + for (i = 0; i < host->n_ports; i++) { + rc = ata_tport_add(host->dev,host->ports[i]); + if (rc) { + goto err_tadd; + } + } + rc = ata_scsi_add_hosts(host, sht); if (rc) - return rc; + goto err_tadd; /* associate with ACPI nodes */ ata_acpi_associate(host); @@ -6136,6 +6028,13 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) } return 0; + + err_tadd: + while (--i >= 0) { + ata_tport_delete(host->ports[i]); + } + return rc; + } /** @@ -6226,6 +6125,13 @@ static void ata_port_detach(struct ata_port *ap) cancel_rearming_delayed_work(&ap->hotplug_task); skip_eh: + if (ap->pmp_link) { + int i; + for (i = 0; i < SATA_PMP_MAX_PORTS; i++) + ata_tlink_delete(&ap->pmp_link[i]); + } + ata_tport_delete(ap); + /* remove the associated SCSI host */ scsi_remove_host(ap->scsi_host); } @@ -6542,7 +6448,7 @@ static void __init ata_parse_force_param(void) static int __init ata_init(void) { - int rc = -ENOMEM; + int rc; ata_parse_force_param(); @@ -6552,12 +6458,25 @@ static int __init ata_init(void) return rc; } + libata_transport_init(); + ata_scsi_transport_template = ata_attach_transport(); + if (!ata_scsi_transport_template) { + ata_sff_exit(); + rc = -ENOMEM; + goto err_out; + } + printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); return 0; + +err_out: + return rc; } static void __exit ata_exit(void) { + ata_release_transport(ata_scsi_transport_template); + libata_transport_exit(); ata_sff_exit(); kfree(ata_force_tbl); } @@ -6573,7 +6492,35 @@ int ata_ratelimit(void) } /** + * ata_msleep - ATA EH owner aware msleep + * @ap: ATA port to attribute the sleep to + * @msecs: duration to sleep in milliseconds + * + * Sleeps @msecs. If the current task is owner of @ap's EH, the + * ownership is released before going to sleep and reacquired + * after the sleep is complete. IOW, other ports sharing the + * @ap->host will be allowed to own the EH while this task is + * sleeping. + * + * LOCKING: + * Might sleep. + */ +void ata_msleep(struct ata_port *ap, unsigned int msecs) +{ + bool owns_eh = ap && ap->host->eh_owner == current; + + if (owns_eh) + ata_eh_release(ap); + + msleep(msecs); + + if (owns_eh) + ata_eh_acquire(ap); +} + +/** * ata_wait_register - wait until register value changes + * @ap: ATA port to wait register for, can be NULL * @reg: IO-mapped register * @mask: Mask to apply to read register value * @val: Wait condition @@ -6595,7 +6542,7 @@ int ata_ratelimit(void) * RETURNS: * The final register value. */ -u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, +u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, unsigned long interval, unsigned long timeout) { unsigned long deadline; @@ -6610,7 +6557,7 @@ u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, deadline = ata_deadline(jiffies, timeout); while ((tmp & mask) == val && time_before(jiffies, deadline)) { - msleep(interval); + ata_msleep(ap, interval); tmp = ioread32(reg); } @@ -6686,6 +6633,7 @@ EXPORT_SYMBOL_GPL(sata_set_spd); EXPORT_SYMBOL_GPL(ata_wait_after_reset); EXPORT_SYMBOL_GPL(sata_link_debounce); EXPORT_SYMBOL_GPL(sata_link_resume); +EXPORT_SYMBOL_GPL(sata_link_scr_lpm); EXPORT_SYMBOL_GPL(ata_std_prereset); EXPORT_SYMBOL_GPL(sata_link_hardreset); EXPORT_SYMBOL_GPL(sata_std_hardreset); @@ -6693,6 +6641,7 @@ EXPORT_SYMBOL_GPL(ata_std_postreset); EXPORT_SYMBOL_GPL(ata_dev_classify); EXPORT_SYMBOL_GPL(ata_dev_pair); EXPORT_SYMBOL_GPL(ata_ratelimit); +EXPORT_SYMBOL_GPL(ata_msleep); EXPORT_SYMBOL_GPL(ata_wait_register); EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); EXPORT_SYMBOL_GPL(ata_scsi_slave_config); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index e48302eae55..5e590504f3a 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -57,6 +57,7 @@ enum { /* error flags */ ATA_EFLAG_IS_IO = (1 << 0), ATA_EFLAG_DUBIOUS_XFER = (1 << 1), + ATA_EFLAG_OLD_ER = (1 << 31), /* error categories */ ATA_ECAT_NONE = 0, @@ -396,14 +397,9 @@ static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering) return NULL; } -static void ata_ering_clear(struct ata_ering *ering) -{ - memset(ering, 0, sizeof(*ering)); -} - -static int ata_ering_map(struct ata_ering *ering, - int (*map_fn)(struct ata_ering_entry *, void *), - void *arg) +int ata_ering_map(struct ata_ering *ering, + int (*map_fn)(struct ata_ering_entry *, void *), + void *arg) { int idx, rc = 0; struct ata_ering_entry *ent; @@ -422,6 +418,17 @@ static int ata_ering_map(struct ata_ering *ering, return rc; } +int ata_ering_clear_cb(struct ata_ering_entry *ent, void *void_arg) +{ + ent->eflags |= ATA_EFLAG_OLD_ER; + return 0; +} + +static void ata_ering_clear(struct ata_ering *ering) +{ + ata_ering_map(ering, ata_ering_clear_cb, NULL); +} + static unsigned int ata_eh_dev_action(struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; @@ -456,6 +463,41 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev, } /** + * ata_eh_acquire - acquire EH ownership + * @ap: ATA port to acquire EH ownership for + * + * Acquire EH ownership for @ap. This is the basic exclusion + * mechanism for ports sharing a host. Only one port hanging off + * the same host can claim the ownership of EH. + * + * LOCKING: + * EH context. + */ +void ata_eh_acquire(struct ata_port *ap) +{ + mutex_lock(&ap->host->eh_mutex); + WARN_ON_ONCE(ap->host->eh_owner); + ap->host->eh_owner = current; +} + +/** + * ata_eh_release - release EH ownership + * @ap: ATA port to release EH ownership for + * + * Release EH ownership for @ap if the caller. The caller must + * have acquired EH ownership using ata_eh_acquire() previously. + * + * LOCKING: + * EH context. + */ +void ata_eh_release(struct ata_port *ap) +{ + WARN_ON_ONCE(ap->host->eh_owner != current); + ap->host->eh_owner = NULL; + mutex_unlock(&ap->host->eh_mutex); +} + +/** * ata_scsi_timed_out - SCSI layer time out callback * @cmd: timed out SCSI command * @@ -572,19 +614,19 @@ void ata_scsi_error(struct Scsi_Host *host) int nr_timedout = 0; spin_lock_irqsave(ap->lock, flags); - + /* This must occur under the ap->lock as we don't want a polled recovery to race the real interrupt handler - + The lost_interrupt handler checks for any completed but non-notified command and completes much like an IRQ handler. - + We then fall into the error recovery code which will treat this as if normal completion won the race */ if (ap->ops->lost_interrupt) ap->ops->lost_interrupt(ap); - + list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) { struct ata_queued_cmd *qc; @@ -628,15 +670,17 @@ void ata_scsi_error(struct Scsi_Host *host) ap->eh_tries = ATA_EH_MAX_TRIES; } else spin_unlock_wait(ap->lock); - + /* If we timed raced normal completion and there is nothing to recover nr_timedout == 0 why exactly are we doing error recovery ? */ - repeat: /* invoke error handler */ if (ap->ops->error_handler) { struct ata_link *link; + /* acquire EH ownership */ + ata_eh_acquire(ap); + repeat: /* kill fast drain timer */ del_timer_sync(&ap->fastdrain_timer); @@ -711,6 +755,7 @@ void ata_scsi_error(struct Scsi_Host *host) host->host_eh_scheduled = 0; spin_unlock_irqrestore(ap->lock, flags); + ata_eh_release(ap); } else { WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL); ap->ops->eng_timeout(ap); @@ -772,7 +817,7 @@ void ata_port_wait_eh(struct ata_port *ap) /* make sure SCSI EH is complete */ if (scsi_host_in_recovery(ap->scsi_host)) { - msleep(10); + ata_msleep(ap, 10); goto retry; } } @@ -1573,9 +1618,9 @@ static void ata_eh_analyze_serror(struct ata_link *link) * host links. For disabled PMP links, only N bit is * considered as X bit is left at 1 for link plugging. */ - hotplug_mask = 0; - - if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) + if (link->lpm_policy != ATA_LPM_MAX_POWER) + hotplug_mask = 0; /* hotplug doesn't work w/ LPM */ + else if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link)) hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG; else hotplug_mask = SERR_PHYRDY_CHG; @@ -1755,7 +1800,7 @@ static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg) struct speed_down_verdict_arg *arg = void_arg; int cat; - if (ent->timestamp < arg->since) + if ((ent->eflags & ATA_EFLAG_OLD_ER) || (ent->timestamp < arg->since)) return -1; cat = ata_eh_categorize_error(ent->eflags, ent->err_mask, @@ -2777,8 +2822,9 @@ int ata_eh_reset(struct ata_link *link, int classify, ata_eh_done(link, NULL, ATA_EH_RESET); if (slave) ata_eh_done(slave, NULL, ATA_EH_RESET); - ehc->last_reset = jiffies; /* update to completion time */ + ehc->last_reset = jiffies; /* update to completion time */ ehc->i.action |= ATA_EH_REVALIDATE; + link->lpm_policy = ATA_LPM_UNKNOWN; /* reset LPM state */ rc = 0; out: @@ -2810,8 +2856,10 @@ int ata_eh_reset(struct ata_link *link, int classify, "reset failed (errno=%d), retrying in %u secs\n", rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000)); + ata_eh_release(ap); while (delta) delta = schedule_timeout_uninterruptible(delta); + ata_eh_acquire(ap); } if (try == max_tries - 1) { @@ -3204,6 +3252,124 @@ static int ata_eh_maybe_retry_flush(struct ata_device *dev) return rc; } +/** + * ata_eh_set_lpm - configure SATA interface power management + * @link: link to configure power management + * @policy: the link power management policy + * @r_failed_dev: out parameter for failed device + * + * Enable SATA Interface power management. This will enable + * Device Interface Power Management (DIPM) for min_power + * policy, and then call driver specific callbacks for + * enabling Host Initiated Power management. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + struct ata_device **r_failed_dev) +{ + struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; + struct ata_eh_context *ehc = &link->eh_context; + struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; + unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; + unsigned int err_mask; + int rc; + + /* if the link or host doesn't do LPM, noop */ + if ((link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) + return 0; + + /* + * DIPM is enabled only for MIN_POWER as some devices + * misbehave when the host NACKs transition to SLUMBER. Order + * device and link configurations such that the host always + * allows DIPM requests. + */ + ata_for_each_dev(dev, link, ENABLED) { + bool hipm = ata_id_has_hipm(dev->id); + bool dipm = ata_id_has_dipm(dev->id); + + /* find the first enabled and LPM enabled devices */ + if (!link_dev) + link_dev = dev; + + if (!lpm_dev && (hipm || dipm)) + lpm_dev = dev; + + hints &= ~ATA_LPM_EMPTY; + if (!hipm) + hints &= ~ATA_LPM_HIPM; + + /* disable DIPM before changing link config */ + if (policy != ATA_LPM_MIN_POWER && dipm) { + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_DISABLE, SATA_DIPM); + if (err_mask && err_mask != AC_ERR_DEV) { + ata_dev_printk(dev, KERN_WARNING, + "failed to disable DIPM, Emask 0x%x\n", + err_mask); + rc = -EIO; + goto fail; + } + } + } + + if (ap) { + rc = ap->ops->set_lpm(link, policy, hints); + if (!rc && ap->slave_link) + rc = ap->ops->set_lpm(ap->slave_link, policy, hints); + } else + rc = sata_pmp_set_lpm(link, policy, hints); + + /* + * Attribute link config failure to the first (LPM) enabled + * device on the link. + */ + if (rc) { + if (rc == -EOPNOTSUPP) { + link->flags |= ATA_LFLAG_NO_LPM; + return 0; + } + dev = lpm_dev ? lpm_dev : link_dev; + goto fail; + } + + /* host config updated, enable DIPM if transitioning to MIN_POWER */ + ata_for_each_dev(dev, link, ENABLED) { + if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) { + err_mask = ata_dev_set_feature(dev, + SETFEATURES_SATA_ENABLE, SATA_DIPM); + if (err_mask && err_mask != AC_ERR_DEV) { + ata_dev_printk(dev, KERN_WARNING, + "failed to enable DIPM, Emask 0x%x\n", + err_mask); + rc = -EIO; + goto fail; + } + } + } + + link->lpm_policy = policy; + if (ap && ap->slave_link) + ap->slave_link->lpm_policy = policy; + return 0; + +fail: + /* if no device or only one more chance is left, disable LPM */ + if (!dev || ehc->tries[dev->devno] <= 2) { + ata_link_printk(link, KERN_WARNING, + "disabling LPM on the link\n"); + link->flags |= ATA_LFLAG_NO_LPM; + } + if (r_failed_dev) + *r_failed_dev = dev; + return rc; +} + static int ata_link_nr_enabled(struct ata_link *link) { struct ata_device *dev; @@ -3288,6 +3454,16 @@ static int ata_eh_schedule_probe(struct ata_device *dev) ehc->saved_xfer_mode[dev->devno] = 0; ehc->saved_ncq_enabled &= ~(1 << dev->devno); + /* the link maybe in a deep sleep, wake it up */ + if (link->lpm_policy > ATA_LPM_MAX_POWER) { + if (ata_is_host_link(link)) + link->ap->ops->set_lpm(link, ATA_LPM_MAX_POWER, + ATA_LPM_EMPTY); + else + sata_pmp_set_lpm(link, ATA_LPM_MAX_POWER, + ATA_LPM_EMPTY); + } + /* Record and count probe trials on the ering. The specific * error mask used is irrelevant. Because a successful device * detection clears the ering, this count accumulates only if @@ -3389,8 +3565,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, { struct ata_link *link; struct ata_device *dev; - int nr_failed_devs; - int rc; + int rc, nr_fails; unsigned long flags, deadline; DPRINTK("ENTER\n"); @@ -3431,7 +3606,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, retry: rc = 0; - nr_failed_devs = 0; /* if UNLOADING, finish immediately */ if (ap->pflags & ATA_PFLAG_UNLOADING) @@ -3501,8 +3675,10 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, if (time_before_eq(deadline, now)) break; + ata_eh_release(ap); deadline = wait_for_completion_timeout(&ap->park_req_pending, deadline - now); + ata_eh_acquire(ap); } while (deadline); ata_for_each_link(link, ap, EDGE) { ata_for_each_dev(dev, link, ALL) { @@ -3516,13 +3692,17 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, } /* the rest */ - ata_for_each_link(link, ap, EDGE) { + nr_fails = 0; + ata_for_each_link(link, ap, PMP_FIRST) { struct ata_eh_context *ehc = &link->eh_context; + if (sata_pmp_attached(ap) && ata_is_host_link(link)) + goto config_lpm; + /* revalidate existing devices and attach new ones */ rc = ata_eh_revalidate_and_attach(link, &dev); if (rc) - goto dev_fail; + goto rest_fail; /* if PMP got attached, return, pmp EH will take care of it */ if (link->device->class == ATA_DEV_PMP) { @@ -3534,7 +3714,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, if (ehc->i.flags & ATA_EHI_SETMODE) { rc = ata_set_mode(link, &dev); if (rc) - goto dev_fail; + goto rest_fail; ehc->i.flags &= ~ATA_EHI_SETMODE; } @@ -3547,7 +3727,7 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, continue; rc = atapi_eh_clear_ua(dev); if (rc) - goto dev_fail; + goto rest_fail; } } @@ -3557,21 +3737,25 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, continue; rc = ata_eh_maybe_retry_flush(dev); if (rc) - goto dev_fail; + goto rest_fail; } + config_lpm: /* configure link power saving */ - if (ehc->i.action & ATA_EH_LPM) - ata_for_each_dev(dev, link, ALL) - ata_dev_enable_pm(dev, ap->pm_policy); + if (link->lpm_policy != ap->target_lpm_policy) { + rc = ata_eh_set_lpm(link, ap->target_lpm_policy, &dev); + if (rc) + goto rest_fail; + } /* this link is okay now */ ehc->i.flags = 0; continue; -dev_fail: - nr_failed_devs++; - ata_eh_handle_dev_fail(dev, rc); + rest_fail: + nr_fails++; + if (dev) + ata_eh_handle_dev_fail(dev, rc); if (ap->pflags & ATA_PFLAG_FROZEN) { /* PMP reset requires working host port. @@ -3583,7 +3767,7 @@ dev_fail: } } - if (nr_failed_devs) + if (nr_fails) goto retry; out: diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 224faabd7b7..3120596d4af 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -11,6 +11,7 @@ #include <linux/libata.h> #include <linux/slab.h> #include "libata.h" +#include "libata-transport.h" const struct ata_port_operations sata_pmp_port_ops = { .inherits = &sata_port_ops, @@ -185,6 +186,27 @@ int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val) } /** + * sata_pmp_set_lpm - configure LPM for a PMP link + * @link: PMP link to configure LPM for + * @policy: target LPM policy + * @hints: LPM hints + * + * Configure LPM for @link. This function will contain any PMP + * specific workarounds if necessary. + * + * LOCKING: + * EH context. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int sata_pmp_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints) +{ + return sata_link_scr_lpm(link, policy, true); +} + +/** * sata_pmp_read_gscr - read GSCR block of SATA PMP * @dev: PMP device * @gscr: buffer to read GSCR block into @@ -312,10 +334,10 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info) return rc; } -static int sata_pmp_init_links(struct ata_port *ap, int nr_ports) +static int sata_pmp_init_links (struct ata_port *ap, int nr_ports) { struct ata_link *pmp_link = ap->pmp_link; - int i; + int i, err; if (!pmp_link) { pmp_link = kzalloc(sizeof(pmp_link[0]) * SATA_PMP_MAX_PORTS, @@ -327,6 +349,13 @@ static int sata_pmp_init_links(struct ata_port *ap, int nr_ports) ata_link_init(ap, &pmp_link[i], i); ap->pmp_link = pmp_link; + + for (i = 0; i < SATA_PMP_MAX_PORTS; i++) { + err = ata_tlink_add(&pmp_link[i]); + if (err) { + goto err_tlink; + } + } } for (i = 0; i < nr_ports; i++) { @@ -339,6 +368,12 @@ static int sata_pmp_init_links(struct ata_port *ap, int nr_ports) } return 0; + err_tlink: + while (--i >= 0) + ata_tlink_delete(&pmp_link[i]); + kfree(pmp_link); + ap->pmp_link = NULL; + return err; } static void sata_pmp_quirks(struct ata_port *ap) @@ -351,6 +386,9 @@ static void sata_pmp_quirks(struct ata_port *ap) if (vendor == 0x1095 && devid == 0x3726) { /* sil3726 quirks */ ata_for_each_link(link, ap, EDGE) { + /* link reports offline after LPM */ + link->flags |= ATA_LFLAG_NO_LPM; + /* Class code report is unreliable and SRST * times out under certain configurations. */ @@ -366,6 +404,9 @@ static void sata_pmp_quirks(struct ata_port *ap) } else if (vendor == 0x1095 && devid == 0x4723) { /* sil4723 quirks */ ata_for_each_link(link, ap, EDGE) { + /* link reports offline after LPM */ + link->flags |= ATA_LFLAG_NO_LPM; + /* class code report is unreliable */ if (link->pmp < 2) link->flags |= ATA_LFLAG_ASSUME_ATA; @@ -378,6 +419,9 @@ static void sata_pmp_quirks(struct ata_port *ap) } else if (vendor == 0x1095 && devid == 0x4726) { /* sil4726 quirks */ ata_for_each_link(link, ap, EDGE) { + /* link reports offline after LPM */ + link->flags |= ATA_LFLAG_NO_LPM; + /* Class code report is unreliable and SRST * times out under certain configurations. * Config device can be at port 0 or 5 and @@ -938,15 +982,25 @@ static int sata_pmp_eh_recover(struct ata_port *ap) if (rc) goto link_fail; - /* Connection status might have changed while resetting other - * links, check SATA_PMP_GSCR_ERROR before returning. - */ - /* clear SNotification */ rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); if (rc == 0) sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); + /* + * If LPM is active on any fan-out port, hotplug wouldn't + * work. Return w/ PHY event notification disabled. + */ + ata_for_each_link(link, ap, EDGE) + if (link->lpm_policy > ATA_LPM_MAX_POWER) + return 0; + + /* + * Connection status might have changed while resetting other + * links, enable notification and check SATA_PMP_GSCR_ERROR + * before returning. + */ + /* enable notification */ if (pmp_dev->flags & ATA_DFLAG_AN) { gscr[SATA_PMP_GSCR_FEAT_EN] |= SATA_PMP_FEAT_NOTIFY; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index a89172c100f..d050e073e57 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -51,8 +51,8 @@ #include <asm/unaligned.h> #include "libata.h" +#include "libata-transport.h" -#define SECTOR_SIZE 512 #define ATA_SCSI_RBUF_SIZE 4096 static DEFINE_SPINLOCK(ata_scsi_rbuf_lock); @@ -64,9 +64,6 @@ static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); -static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, - unsigned int id, unsigned int lun); - #define RW_RECOVERY_MPAGE 0x1 #define RW_RECOVERY_MPAGE_LEN 12 @@ -106,83 +103,55 @@ static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = { 0, 30 /* extended self test time, see 05-359r1 */ }; -/* - * libata transport template. libata doesn't do real transport stuff. - * It just needs the eh_timed_out hook. - */ -static struct scsi_transport_template ata_scsi_transport_template = { - .eh_strategy_handler = ata_scsi_error, - .eh_timed_out = ata_scsi_timed_out, - .user_scan = ata_scsi_user_scan, -}; - - -static const struct { - enum link_pm value; - const char *name; -} link_pm_policy[] = { - { NOT_AVAILABLE, "max_performance" }, - { MIN_POWER, "min_power" }, - { MAX_PERFORMANCE, "max_performance" }, - { MEDIUM_POWER, "medium_power" }, +static const char *ata_lpm_policy_names[] = { + [ATA_LPM_UNKNOWN] = "max_performance", + [ATA_LPM_MAX_POWER] = "max_performance", + [ATA_LPM_MED_POWER] = "medium_power", + [ATA_LPM_MIN_POWER] = "min_power", }; -static const char *ata_scsi_lpm_get(enum link_pm policy) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(link_pm_policy); i++) - if (link_pm_policy[i].value == policy) - return link_pm_policy[i].name; - - return NULL; -} - -static ssize_t ata_scsi_lpm_put(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t ata_scsi_lpm_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); struct ata_port *ap = ata_shost_to_port(shost); - enum link_pm policy = 0; - int i; + enum ata_lpm_policy policy; + unsigned long flags; - /* - * we are skipping array location 0 on purpose - this - * is because a value of NOT_AVAILABLE is displayed - * to the user as max_performance, but when the user - * writes "max_performance", they actually want the - * value to match MAX_PERFORMANCE. - */ - for (i = 1; i < ARRAY_SIZE(link_pm_policy); i++) { - const int len = strlen(link_pm_policy[i].name); - if (strncmp(link_pm_policy[i].name, buf, len) == 0) { - policy = link_pm_policy[i].value; + /* UNKNOWN is internal state, iterate from MAX_POWER */ + for (policy = ATA_LPM_MAX_POWER; + policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) { + const char *name = ata_lpm_policy_names[policy]; + + if (strncmp(name, buf, strlen(name)) == 0) break; - } } - if (!policy) + if (policy == ARRAY_SIZE(ata_lpm_policy_names)) return -EINVAL; - ata_lpm_schedule(ap, policy); + spin_lock_irqsave(ap->lock, flags); + ap->target_lpm_policy = policy; + ata_port_schedule_eh(ap); + spin_unlock_irqrestore(ap->lock, flags); + return count; } -static ssize_t -ata_scsi_lpm_show(struct device *dev, struct device_attribute *attr, char *buf) +static ssize_t ata_scsi_lpm_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct Scsi_Host *shost = class_to_shost(dev); struct ata_port *ap = ata_shost_to_port(shost); - const char *policy = - ata_scsi_lpm_get(ap->pm_policy); - if (!policy) + if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) return -EINVAL; - return snprintf(buf, 23, "%s\n", policy); + return snprintf(buf, PAGE_SIZE, "%s\n", + ata_lpm_policy_names[ap->target_lpm_policy]); } DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, - ata_scsi_lpm_show, ata_scsi_lpm_put); + ata_scsi_lpm_show, ata_scsi_lpm_store); EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); static ssize_t ata_scsi_park_show(struct device *device, @@ -516,7 +485,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) memset(scsi_cmd, 0, sizeof(scsi_cmd)); if (args[3]) { - argsize = SECTOR_SIZE * args[3]; + argsize = ATA_SECT_SIZE * args[3]; argbuf = kmalloc(argsize, GFP_KERNEL); if (argbuf == NULL) { rc = -ENOMEM; @@ -1150,8 +1119,9 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); } else { /* ATA devices must be sector aligned */ + sdev->sector_size = ata_id_logical_sector_size(dev->id); blk_queue_update_dma_alignment(sdev->request_queue, - ATA_SECT_SIZE - 1); + sdev->sector_size - 1); sdev->manage_start_stop = 1; } @@ -1166,6 +1136,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); } + dev->sdev = sdev; return 0; } @@ -1696,7 +1667,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) goto nothing_to_do; qc->flags |= ATA_QCFLAG_IO; - qc->nbytes = n_block * ATA_SECT_SIZE; + qc->nbytes = n_block * scmd->device->sector_size; rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, qc->tag); @@ -2001,6 +1972,7 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf) 0x89, /* page 0x89, ata info page */ 0xb0, /* page 0xb0, block limits page */ 0xb1, /* page 0xb1, block device characteristics page */ + 0xb2, /* page 0xb2, thin provisioning page */ }; rbuf[3] = sizeof(pages); /* number of supported VPD pages */ @@ -2123,7 +2095,7 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf) static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) { - u32 min_io_sectors; + u16 min_io_sectors; rbuf[1] = 0xb0; rbuf[3] = 0x3c; /* required VPD size with unmap support */ @@ -2135,10 +2107,7 @@ static unsigned int ata_scsiop_inq_b0(struct ata_scsi_args *args, u8 *rbuf) * logical than physical sector size we need to figure out what the * latter is. */ - if (ata_id_has_large_logical_sectors(args->id)) - min_io_sectors = ata_id_logical_per_physical_sectors(args->id); - else - min_io_sectors = 1; + min_io_sectors = 1 << ata_id_log2_per_physical_sector(args->id); put_unaligned_be16(min_io_sectors, &rbuf[6]); /* @@ -2172,6 +2141,16 @@ static unsigned int ata_scsiop_inq_b1(struct ata_scsi_args *args, u8 *rbuf) return 0; } +static unsigned int ata_scsiop_inq_b2(struct ata_scsi_args *args, u8 *rbuf) +{ + /* SCSI Thin Provisioning VPD page: SBC-3 rev 22 or later */ + rbuf[1] = 0xb2; + rbuf[3] = 0x4; + rbuf[5] = 1 << 6; /* TPWS */ + + return 0; +} + /** * ata_scsiop_noop - Command handler that simply returns success. * @args: device IDENTIFY data / SCSI command of interest. @@ -2397,21 +2376,13 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) { struct ata_device *dev = args->dev; u64 last_lba = dev->n_sectors - 1; /* LBA of the last block */ - u8 log_per_phys = 0; - u16 lowest_aligned = 0; - u16 word_106 = dev->id[106]; - u16 word_209 = dev->id[209]; - - if ((word_106 & 0xc000) == 0x4000) { - /* Number and offset of logical sectors per physical sector */ - if (word_106 & (1 << 13)) - log_per_phys = word_106 & 0xf; - if ((word_209 & 0xc000) == 0x4000) { - u16 first = dev->id[209] & 0x3fff; - if (first > 0) - lowest_aligned = (1 << log_per_phys) - first; - } - } + u32 sector_size; /* physical sector size in bytes */ + u8 log2_per_phys; + u16 lowest_aligned; + + sector_size = ata_id_logical_sector_size(dev->id); + log2_per_phys = ata_id_log2_per_physical_sector(dev->id); + lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys); VPRINTK("ENTER\n"); @@ -2426,8 +2397,10 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) rbuf[3] = last_lba; /* sector size */ - rbuf[6] = ATA_SECT_SIZE >> 8; - rbuf[7] = ATA_SECT_SIZE & 0xff; + rbuf[4] = sector_size >> (8 * 3); + rbuf[5] = sector_size >> (8 * 2); + rbuf[6] = sector_size >> (8 * 1); + rbuf[7] = sector_size; } else { /* sector count, 64-bit */ rbuf[0] = last_lba >> (8 * 7); @@ -2440,11 +2413,13 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) rbuf[7] = last_lba; /* sector size */ - rbuf[10] = ATA_SECT_SIZE >> 8; - rbuf[11] = ATA_SECT_SIZE & 0xff; + rbuf[ 8] = sector_size >> (8 * 3); + rbuf[ 9] = sector_size >> (8 * 2); + rbuf[10] = sector_size >> (8 * 1); + rbuf[11] = sector_size; rbuf[12] = 0; - rbuf[13] = log_per_phys; + rbuf[13] = log2_per_phys; rbuf[14] = (lowest_aligned >> 8) & 0x3f; rbuf[15] = lowest_aligned; @@ -2888,9 +2863,8 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) tf->device = dev->devno ? tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; - /* READ/WRITE LONG use a non-standard sect_size */ - qc->sect_size = ATA_SECT_SIZE; switch (tf->command) { + /* READ/WRITE LONG use a non-standard sect_size */ case ATA_CMD_READ_LONG: case ATA_CMD_READ_LONG_ONCE: case ATA_CMD_WRITE_LONG: @@ -2898,6 +2872,45 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) goto invalid_fld; qc->sect_size = scsi_bufflen(scmd); + break; + + /* commands using reported Logical Block size (e.g. 512 or 4K) */ + case ATA_CMD_CFA_WRITE_NE: + case ATA_CMD_CFA_TRANS_SECT: + case ATA_CMD_CFA_WRITE_MULT_NE: + /* XXX: case ATA_CMD_CFA_WRITE_SECTORS_WITHOUT_ERASE: */ + case ATA_CMD_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ_QUEUED: + /* XXX: case ATA_CMD_READ_QUEUED_EXT: */ + case ATA_CMD_FPDMA_READ: + case ATA_CMD_READ_MULTI: + case ATA_CMD_READ_MULTI_EXT: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_READ_STREAM_DMA_EXT: + case ATA_CMD_READ_STREAM_EXT: + case ATA_CMD_VERIFY: + case ATA_CMD_VERIFY_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE_FUA_EXT: + case ATA_CMD_WRITE_QUEUED: + case ATA_CMD_WRITE_QUEUED_FUA_EXT: + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_WRITE_MULTI: + case ATA_CMD_WRITE_MULTI_EXT: + case ATA_CMD_WRITE_MULTI_FUA_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + case ATA_CMD_WRITE_STREAM_DMA_EXT: + case ATA_CMD_WRITE_STREAM_EXT: + qc->sect_size = scmd->device->sector_size; + break; + + /* Everything else uses 512 byte "sectors" */ + default: + qc->sect_size = ATA_SECT_SIZE; } /* @@ -3250,6 +3263,9 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, case 0xb1: ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b1); break; + case 0xb2: + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2); + break; default: ata_scsi_invalid_field(cmd, done); break; @@ -3334,7 +3350,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) *(struct ata_port **)&shost->hostdata[0] = ap; ap->scsi_host = shost; - shost->transportt = &ata_scsi_transport_template; + shost->transportt = ata_scsi_transport_template; shost->unique_id = ap->print_id; shost->max_id = 16; shost->max_lun = 1; @@ -3393,6 +3409,8 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync) if (!IS_ERR(sdev)) { dev->sdev = sdev; scsi_device_put(sdev); + } else { + dev->sdev = NULL; } } } @@ -3616,8 +3634,8 @@ void ata_scsi_hotplug(struct work_struct *work) * RETURNS: * Zero. */ -static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, - unsigned int id, unsigned int lun) +int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, + unsigned int id, unsigned int lun) { struct ata_port *ap = ata_shost_to_port(shost); unsigned long flags; diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index f5296bb19ec..d05387d1e14 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -222,7 +222,7 @@ int ata_sff_busy_sleep(struct ata_port *ap, timeout = ata_deadline(timer_start, tmout_pat); while (status != 0xff && (status & ATA_BUSY) && time_before(jiffies, timeout)) { - msleep(50); + ata_msleep(ap, 50); status = ata_sff_busy_wait(ap, ATA_BUSY, 3); } @@ -234,7 +234,7 @@ int ata_sff_busy_sleep(struct ata_port *ap, timeout = ata_deadline(timer_start, tmout); while (status != 0xff && (status & ATA_BUSY) && time_before(jiffies, timeout)) { - msleep(50); + ata_msleep(ap, 50); status = ap->ops->sff_check_status(ap); } @@ -360,7 +360,7 @@ static void ata_dev_select(struct ata_port *ap, unsigned int device, if (wait) { if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) - msleep(150); + ata_msleep(ap, 150); ata_wait_idle(ap); } } @@ -1356,7 +1356,7 @@ fsm_start: */ status = ata_sff_busy_wait(ap, ATA_BUSY, 5); if (status & ATA_BUSY) { - msleep(2); + ata_msleep(ap, 2); status = ata_sff_busy_wait(ap, ATA_BUSY, 10); if (status & ATA_BUSY) { ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); @@ -1937,7 +1937,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, unsigned int dev1 = devmask & (1 << 1); int rc, ret = 0; - msleep(ATA_WAIT_AFTER_RESET); + ata_msleep(ap, ATA_WAIT_AFTER_RESET); /* always check readiness of the master device */ rc = ata_sff_wait_ready(link, deadline); @@ -1966,7 +1966,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, lbal = ioread8(ioaddr->lbal_addr); if ((nsect == 1) && (lbal == 1)) break; - msleep(50); /* give drive a breather */ + ata_msleep(ap, 50); /* give drive a breather */ } rc = ata_sff_wait_ready(link, deadline); @@ -3342,7 +3342,7 @@ int __init ata_sff_init(void) return 0; } -void __exit ata_sff_exit(void) +void ata_sff_exit(void) { destroy_workqueue(ata_sff_wq); } diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c new file mode 100644 index 00000000000..ce9dc6207f3 --- /dev/null +++ b/drivers/ata/libata-transport.c @@ -0,0 +1,774 @@ +/* + * Copyright 2008 ioogle, Inc. All rights reserved. + * Released under GPL v2. + * + * Libata transport class. + * + * The ATA transport class contains common code to deal with ATA HBAs, + * an approximated representation of ATA topologies in the driver model, + * and various sysfs attributes to expose these topologies and management + * interfaces to user-space. + * + * There are 3 objects defined in in this class: + * - ata_port + * - ata_link + * - ata_device + * Each port has a link object. Each link can have up to two devices for PATA + * and generally one for SATA. + * If there is SATA port multiplier [PMP], 15 additional ata_link object are + * created. + * + * These objects are created when the ata host is initialized and when a PMP is + * found. They are removed only when the HBA is removed, cleaned before the + * error handler runs. + */ + + +#include <linux/kernel.h> +#include <linux/blkdev.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <scsi/scsi_transport.h> +#include <linux/libata.h> +#include <linux/hdreg.h> +#include <linux/uaccess.h> + +#include "libata.h" +#include "libata-transport.h" + +#define ATA_PORT_ATTRS 2 +#define ATA_LINK_ATTRS 3 +#define ATA_DEV_ATTRS 9 + +struct scsi_transport_template; +struct scsi_transport_template *ata_scsi_transport_template; + +struct ata_internal { + struct scsi_transport_template t; + + struct device_attribute private_port_attrs[ATA_PORT_ATTRS]; + struct device_attribute private_link_attrs[ATA_LINK_ATTRS]; + struct device_attribute private_dev_attrs[ATA_DEV_ATTRS]; + + struct transport_container link_attr_cont; + struct transport_container dev_attr_cont; + + /* + * The array of null terminated pointers to attributes + * needed by scsi_sysfs.c + */ + struct device_attribute *link_attrs[ATA_LINK_ATTRS + 1]; + struct device_attribute *port_attrs[ATA_PORT_ATTRS + 1]; + struct device_attribute *dev_attrs[ATA_DEV_ATTRS + 1]; +}; +#define to_ata_internal(tmpl) container_of(tmpl, struct ata_internal, t) + + +#define tdev_to_device(d) \ + container_of((d), struct ata_device, tdev) +#define transport_class_to_dev(dev) \ + tdev_to_device((dev)->parent) + +#define tdev_to_link(d) \ + container_of((d), struct ata_link, tdev) +#define transport_class_to_link(dev) \ + tdev_to_link((dev)->parent) + +#define tdev_to_port(d) \ + container_of((d), struct ata_port, tdev) +#define transport_class_to_port(dev) \ + tdev_to_port((dev)->parent) + + +/* Device objects are always created whit link objects */ +static int ata_tdev_add(struct ata_device *dev); +static void ata_tdev_delete(struct ata_device *dev); + + +/* + * Hack to allow attributes of the same name in different objects. + */ +#define ATA_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ + struct device_attribute device_attr_##_prefix##_##_name = \ + __ATTR(_name,_mode,_show,_store) + +#define ata_bitfield_name_match(title, table) \ +static ssize_t \ +get_ata_##title##_names(u32 table_key, char *buf) \ +{ \ + char *prefix = ""; \ + ssize_t len = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + if (table[i].value & table_key) { \ + len += sprintf(buf + len, "%s%s", \ + prefix, table[i].name); \ + prefix = ", "; \ + } \ + } \ + len += sprintf(buf + len, "\n"); \ + return len; \ +} + +#define ata_bitfield_name_search(title, table) \ +static ssize_t \ +get_ata_##title##_names(u32 table_key, char *buf) \ +{ \ + ssize_t len = 0; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(table); i++) { \ + if (table[i].value == table_key) { \ + len += sprintf(buf + len, "%s", \ + table[i].name); \ + break; \ + } \ + } \ + len += sprintf(buf + len, "\n"); \ + return len; \ +} + +static struct { + u32 value; + char *name; +} ata_class_names[] = { + { ATA_DEV_UNKNOWN, "unknown" }, + { ATA_DEV_ATA, "ata" }, + { ATA_DEV_ATA_UNSUP, "ata" }, + { ATA_DEV_ATAPI, "atapi" }, + { ATA_DEV_ATAPI_UNSUP, "atapi" }, + { ATA_DEV_PMP, "pmp" }, + { ATA_DEV_PMP_UNSUP, "pmp" }, + { ATA_DEV_SEMB, "semb" }, + { ATA_DEV_SEMB_UNSUP, "semb" }, + { ATA_DEV_NONE, "none" } +}; +ata_bitfield_name_search(class, ata_class_names) + + +static struct { + u32 value; + char *name; +} ata_err_names[] = { + { AC_ERR_DEV, "DeviceError" }, + { AC_ERR_HSM, "HostStateMachineError" }, + { AC_ERR_TIMEOUT, "Timeout" }, + { AC_ERR_MEDIA, "MediaError" }, + { AC_ERR_ATA_BUS, "BusError" }, + { AC_ERR_HOST_BUS, "HostBusError" }, + { AC_ERR_SYSTEM, "SystemError" }, + { AC_ERR_INVALID, "InvalidArg" }, + { AC_ERR_OTHER, "Unknown" }, + { AC_ERR_NODEV_HINT, "NoDeviceHint" }, + { AC_ERR_NCQ, "NCQError" } +}; +ata_bitfield_name_match(err, ata_err_names) + +static struct { + u32 value; + char *name; +} ata_xfer_names[] = { + { XFER_UDMA_7, "XFER_UDMA_7" }, + { XFER_UDMA_6, "XFER_UDMA_6" }, + { XFER_UDMA_5, "XFER_UDMA_5" }, + { XFER_UDMA_4, "XFER_UDMA_4" }, + { XFER_UDMA_3, "XFER_UDMA_3" }, + { XFER_UDMA_2, "XFER_UDMA_2" }, + { XFER_UDMA_1, "XFER_UDMA_1" }, + { XFER_UDMA_0, "XFER_UDMA_0" }, + { XFER_MW_DMA_4, "XFER_MW_DMA_4" }, + { XFER_MW_DMA_3, "XFER_MW_DMA_3" }, + { XFER_MW_DMA_2, "XFER_MW_DMA_2" }, + { XFER_MW_DMA_1, "XFER_MW_DMA_1" }, + { XFER_MW_DMA_0, "XFER_MW_DMA_0" }, + { XFER_SW_DMA_2, "XFER_SW_DMA_2" }, + { XFER_SW_DMA_1, "XFER_SW_DMA_1" }, + { XFER_SW_DMA_0, "XFER_SW_DMA_0" }, + { XFER_PIO_6, "XFER_PIO_6" }, + { XFER_PIO_5, "XFER_PIO_5" }, + { XFER_PIO_4, "XFER_PIO_4" }, + { XFER_PIO_3, "XFER_PIO_3" }, + { XFER_PIO_2, "XFER_PIO_2" }, + { XFER_PIO_1, "XFER_PIO_1" }, + { XFER_PIO_0, "XFER_PIO_0" }, + { XFER_PIO_SLOW, "XFER_PIO_SLOW" } +}; +ata_bitfield_name_match(xfer,ata_xfer_names) + +/* + * ATA Port attributes + */ +#define ata_port_show_simple(field, name, format_string, cast) \ +static ssize_t \ +show_ata_port_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ata_port *ap = transport_class_to_port(dev); \ + \ + return snprintf(buf, 20, format_string, cast ap->field); \ +} + +#define ata_port_simple_attr(field, name, format_string, type) \ + ata_port_show_simple(field, name, format_string, (type)) \ +static DEVICE_ATTR(name, S_IRUGO, show_ata_port_##name, NULL) + +ata_port_simple_attr(nr_pmp_links, nr_pmp_links, "%d\n", int); +ata_port_simple_attr(stats.idle_irq, idle_irq, "%ld\n", unsigned long); + +static DECLARE_TRANSPORT_CLASS(ata_port_class, + "ata_port", NULL, NULL, NULL); + +static void ata_tport_release(struct device *dev) +{ + put_device(dev->parent); +} + +/** + * ata_is_port -- check if a struct device represents a ATA port + * @dev: device to check + * + * Returns: + * %1 if the device represents a ATA Port, %0 else + */ +int ata_is_port(const struct device *dev) +{ + return dev->release == ata_tport_release; +} + +static int ata_tport_match(struct attribute_container *cont, + struct device *dev) +{ + if (!ata_is_port(dev)) + return 0; + return &ata_scsi_transport_template->host_attrs.ac == cont; +} + +/** + * ata_tport_delete -- remove ATA PORT + * @port: ATA PORT to remove + * + * Removes the specified ATA PORT. Remove the associated link as well. + */ +void ata_tport_delete(struct ata_port *ap) +{ + struct device *dev = &ap->tdev; + + ata_tlink_delete(&ap->link); + + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); + put_device(dev); +} + +/** ata_tport_add - initialize a transport ATA port structure + * + * @parent: parent device + * @ap: existing ata_port structure + * + * Initialize a ATA port structure for sysfs. It will be added to the device + * tree below the device specified by @parent which could be a PCI device. + * + * Returns %0 on success + */ +int ata_tport_add(struct device *parent, + struct ata_port *ap) +{ + int error; + struct device *dev = &ap->tdev; + + device_initialize(dev); + + dev->parent = get_device(parent); + dev->release = ata_tport_release; + dev_set_name(dev, "ata%d", ap->print_id); + transport_setup_device(dev); + error = device_add(dev); + if (error) { + goto tport_err; + } + + transport_add_device(dev); + transport_configure_device(dev); + + error = ata_tlink_add(&ap->link); + if (error) { + goto tport_link_err; + } + return 0; + + tport_link_err: + transport_remove_device(dev); + device_del(dev); + + tport_err: + transport_destroy_device(dev); + put_device(dev); + return error; +} + + +/* + * ATA link attributes + */ + + +#define ata_link_show_linkspeed(field) \ +static ssize_t \ +show_ata_link_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ata_link *link = transport_class_to_link(dev); \ + \ + return sprintf(buf,"%s\n", sata_spd_string(fls(link->field))); \ +} + +#define ata_link_linkspeed_attr(field) \ + ata_link_show_linkspeed(field) \ +static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL) + +ata_link_linkspeed_attr(hw_sata_spd_limit); +ata_link_linkspeed_attr(sata_spd_limit); +ata_link_linkspeed_attr(sata_spd); + + +static DECLARE_TRANSPORT_CLASS(ata_link_class, + "ata_link", NULL, NULL, NULL); + +static void ata_tlink_release(struct device *dev) +{ + put_device(dev->parent); +} + +/** + * ata_is_link -- check if a struct device represents a ATA link + * @dev: device to check + * + * Returns: + * %1 if the device represents a ATA link, %0 else + */ +int ata_is_link(const struct device *dev) +{ + return dev->release == ata_tlink_release; +} + +static int ata_tlink_match(struct attribute_container *cont, + struct device *dev) +{ + struct ata_internal* i = to_ata_internal(ata_scsi_transport_template); + if (!ata_is_link(dev)) + return 0; + return &i->link_attr_cont.ac == cont; +} + +/** + * ata_tlink_delete -- remove ATA LINK + * @port: ATA LINK to remove + * + * Removes the specified ATA LINK. remove associated ATA device(s) as well. + */ +void ata_tlink_delete(struct ata_link *link) +{ + struct device *dev = &link->tdev; + struct ata_device *ata_dev; + + ata_for_each_dev(ata_dev, link, ALL) { + ata_tdev_delete(ata_dev); + } + + transport_remove_device(dev); + device_del(dev); + transport_destroy_device(dev); + put_device(dev); +} + +/** + * ata_tlink_add -- initialize a transport ATA link structure + * @link: allocated ata_link structure. + * + * Initialize an ATA LINK structure for sysfs. It will be added in the + * device tree below the ATA PORT it belongs to. + * + * Returns %0 on success + */ +int ata_tlink_add(struct ata_link *link) +{ + struct device *dev = &link->tdev; + struct ata_port *ap = link->ap; + struct ata_device *ata_dev; + int error; + + device_initialize(dev); + dev->parent = get_device(&ap->tdev); + dev->release = ata_tlink_release; + if (ata_is_host_link(link)) + dev_set_name(dev, "link%d", ap->print_id); + else + dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp); + + transport_setup_device(dev); + + error = device_add(dev); + if (error) { + goto tlink_err; + } + + transport_add_device(dev); + transport_configure_device(dev); + + ata_for_each_dev(ata_dev, link, ALL) { + error = ata_tdev_add(ata_dev); + if (error) { + goto tlink_dev_err; + } + } + return 0; + tlink_dev_err: + while (--ata_dev >= link->device) { + ata_tdev_delete(ata_dev); + } + transport_remove_device(dev); + device_del(dev); + tlink_err: + transport_destroy_device(dev); + put_device(dev); + return error; +} + +/* + * ATA device attributes + */ + +#define ata_dev_show_class(title, field) \ +static ssize_t \ +show_ata_dev_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ata_device *ata_dev = transport_class_to_dev(dev); \ + \ + return get_ata_##title##_names(ata_dev->field, buf); \ +} + +#define ata_dev_attr(title, field) \ + ata_dev_show_class(title, field) \ +static DEVICE_ATTR(field, S_IRUGO, show_ata_dev_##field, NULL) + +ata_dev_attr(class, class); +ata_dev_attr(xfer, pio_mode); +ata_dev_attr(xfer, dma_mode); +ata_dev_attr(xfer, xfer_mode); + + +#define ata_dev_show_simple(field, format_string, cast) \ +static ssize_t \ +show_ata_dev_##field(struct device *dev, \ + struct device_attribute *attr, char *buf) \ +{ \ + struct ata_device *ata_dev = transport_class_to_dev(dev); \ + \ + return snprintf(buf, 20, format_string, cast ata_dev->field); \ +} + +#define ata_dev_simple_attr(field, format_string, type) \ + ata_dev_show_simple(field, format_string, (type)) \ +static DEVICE_ATTR(field, S_IRUGO, \ + show_ata_dev_##field, NULL) + +ata_dev_simple_attr(spdn_cnt, "%d\n", int); + +struct ata_show_ering_arg { + char* buf; + int written; +}; + +static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg) +{ + struct ata_show_ering_arg* arg = void_arg; + struct timespec time; + + jiffies_to_timespec(ent->timestamp,&time); + arg->written += sprintf(arg->buf + arg->written, + "[%5lu.%06lu]", + time.tv_sec, time.tv_nsec); + arg->written += get_ata_err_names(ent->err_mask, + arg->buf + arg->written); + return 0; +} + +static ssize_t +show_ata_dev_ering(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ata_device *ata_dev = transport_class_to_dev(dev); + struct ata_show_ering_arg arg = { buf, 0 }; + + ata_ering_map(&ata_dev->ering, ata_show_ering, &arg); + return arg.written; +} + + +static DEVICE_ATTR(ering, S_IRUGO, show_ata_dev_ering, NULL); + +static ssize_t +show_ata_dev_id(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ata_device *ata_dev = transport_class_to_dev(dev); + int written = 0, i = 0; + + if (ata_dev->class == ATA_DEV_PMP) + return 0; + for(i=0;i<ATA_ID_WORDS;i++) { + written += snprintf(buf+written, 20, "%04x%c", + ata_dev->id[i], + ((i+1) & 7) ? ' ' : '\n'); + } + return written; +} + +static DEVICE_ATTR(id, S_IRUGO, show_ata_dev_id, NULL); + +static ssize_t +show_ata_dev_gscr(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ata_device *ata_dev = transport_class_to_dev(dev); + int written = 0, i = 0; + + if (ata_dev->class != ATA_DEV_PMP) + return 0; + for(i=0;i<SATA_PMP_GSCR_DWORDS;i++) { + written += snprintf(buf+written, 20, "%08x%c", + ata_dev->gscr[i], + ((i+1) & 3) ? ' ' : '\n'); + } + if (SATA_PMP_GSCR_DWORDS & 3) + buf[written-1] = '\n'; + return written; +} + +static DEVICE_ATTR(gscr, S_IRUGO, show_ata_dev_gscr, NULL); + +static DECLARE_TRANSPORT_CLASS(ata_dev_class, + "ata_device", NULL, NULL, NULL); + +static void ata_tdev_release(struct device *dev) +{ + put_device(dev->parent); +} + +/** + * ata_is_ata_dev -- check if a struct device represents a ATA device + * @dev: device to check + * + * Returns: + * %1 if the device represents a ATA device, %0 else + */ +int ata_is_ata_dev(const struct device *dev) +{ + return dev->release == ata_tdev_release; +} + +static int ata_tdev_match(struct attribute_container *cont, + struct device *dev) +{ + struct ata_internal* i = to_ata_internal(ata_scsi_transport_template); + if (!ata_is_ata_dev(dev)) + return 0; + return &i->dev_attr_cont.ac == cont; +} + +/** + * ata_tdev_free -- free a ATA LINK + * @dev: ATA PHY to free + * + * Frees the specified ATA PHY. + * + * Note: + * This function must only be called on a PHY that has not + * successfully been added using ata_tdev_add(). + */ +static void ata_tdev_free(struct ata_device *dev) +{ + transport_destroy_device(&dev->tdev); + put_device(&dev->tdev); +} + +/** + * ata_tdev_delete -- remove ATA device + * @port: ATA PORT to remove + * + * Removes the specified ATA device. + */ +static void ata_tdev_delete(struct ata_device *ata_dev) +{ + struct device *dev = &ata_dev->tdev; + + transport_remove_device(dev); + device_del(dev); + ata_tdev_free(ata_dev); +} + + +/** + * ata_tdev_add -- initialize a transport ATA device structure. + * @ata_dev: ata_dev structure. + * + * Initialize an ATA device structure for sysfs. It will be added in the + * device tree below the ATA LINK device it belongs to. + * + * Returns %0 on success + */ +static int ata_tdev_add(struct ata_device *ata_dev) +{ + struct device *dev = &ata_dev->tdev; + struct ata_link *link = ata_dev->link; + struct ata_port *ap = link->ap; + int error; + + device_initialize(dev); + dev->parent = get_device(&link->tdev); + dev->release = ata_tdev_release; + if (ata_is_host_link(link)) + dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno); + else + dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp); + + transport_setup_device(dev); + error = device_add(dev); + if (error) { + ata_tdev_free(ata_dev); + return error; + } + + transport_add_device(dev); + transport_configure_device(dev); + return 0; +} + + +/* + * Setup / Teardown code + */ + +#define SETUP_TEMPLATE(attrb, field, perm, test) \ + i->private_##attrb[count] = dev_attr_##field; \ + i->private_##attrb[count].attr.mode = perm; \ + i->attrb[count] = &i->private_##attrb[count]; \ + if (test) \ + count++ + +#define SETUP_LINK_ATTRIBUTE(field) \ + SETUP_TEMPLATE(link_attrs, field, S_IRUGO, 1) + +#define SETUP_PORT_ATTRIBUTE(field) \ + SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) + +#define SETUP_DEV_ATTRIBUTE(field) \ + SETUP_TEMPLATE(dev_attrs, field, S_IRUGO, 1) + +/** + * ata_attach_transport -- instantiate ATA transport template + */ +struct scsi_transport_template *ata_attach_transport(void) +{ + struct ata_internal *i; + int count; + + i = kzalloc(sizeof(struct ata_internal), GFP_KERNEL); + if (!i) + return NULL; + + i->t.eh_strategy_handler = ata_scsi_error; + i->t.eh_timed_out = ata_scsi_timed_out; + i->t.user_scan = ata_scsi_user_scan; + + i->t.host_attrs.ac.attrs = &i->port_attrs[0]; + i->t.host_attrs.ac.class = &ata_port_class.class; + i->t.host_attrs.ac.match = ata_tport_match; + transport_container_register(&i->t.host_attrs); + + i->link_attr_cont.ac.class = &ata_link_class.class; + i->link_attr_cont.ac.attrs = &i->link_attrs[0]; + i->link_attr_cont.ac.match = ata_tlink_match; + transport_container_register(&i->link_attr_cont); + + i->dev_attr_cont.ac.class = &ata_dev_class.class; + i->dev_attr_cont.ac.attrs = &i->dev_attrs[0]; + i->dev_attr_cont.ac.match = ata_tdev_match; + transport_container_register(&i->dev_attr_cont); + + count = 0; + SETUP_PORT_ATTRIBUTE(nr_pmp_links); + SETUP_PORT_ATTRIBUTE(idle_irq); + BUG_ON(count > ATA_PORT_ATTRS); + i->port_attrs[count] = NULL; + + count = 0; + SETUP_LINK_ATTRIBUTE(hw_sata_spd_limit); + SETUP_LINK_ATTRIBUTE(sata_spd_limit); + SETUP_LINK_ATTRIBUTE(sata_spd); + BUG_ON(count > ATA_LINK_ATTRS); + i->link_attrs[count] = NULL; + + count = 0; + SETUP_DEV_ATTRIBUTE(class); + SETUP_DEV_ATTRIBUTE(pio_mode); + SETUP_DEV_ATTRIBUTE(dma_mode); + SETUP_DEV_ATTRIBUTE(xfer_mode); + SETUP_DEV_ATTRIBUTE(spdn_cnt); + SETUP_DEV_ATTRIBUTE(ering); + SETUP_DEV_ATTRIBUTE(id); + SETUP_DEV_ATTRIBUTE(gscr); + BUG_ON(count > ATA_DEV_ATTRS); + i->dev_attrs[count] = NULL; + + return &i->t; +} + +/** + * ata_release_transport -- release ATA transport template instance + * @t: transport template instance + */ +void ata_release_transport(struct scsi_transport_template *t) +{ + struct ata_internal *i = to_ata_internal(t); + + transport_container_unregister(&i->t.host_attrs); + transport_container_unregister(&i->link_attr_cont); + transport_container_unregister(&i->dev_attr_cont); + + kfree(i); +} + +__init int libata_transport_init(void) +{ + int error; + + error = transport_class_register(&ata_link_class); + if (error) + goto out_unregister_transport; + error = transport_class_register(&ata_port_class); + if (error) + goto out_unregister_link; + error = transport_class_register(&ata_dev_class); + if (error) + goto out_unregister_port; + return 0; + + out_unregister_port: + transport_class_unregister(&ata_port_class); + out_unregister_link: + transport_class_unregister(&ata_link_class); + out_unregister_transport: + return error; + +} + +void __exit libata_transport_exit(void) +{ + transport_class_unregister(&ata_link_class); + transport_class_unregister(&ata_port_class); + transport_class_unregister(&ata_dev_class); +} diff --git a/drivers/ata/libata-transport.h b/drivers/ata/libata-transport.h new file mode 100644 index 00000000000..2820cf864f1 --- /dev/null +++ b/drivers/ata/libata-transport.h @@ -0,0 +1,18 @@ +#ifndef _LIBATA_TRANSPORT_H +#define _LIBATA_TRANSPORT_H + + +extern struct scsi_transport_template *ata_scsi_transport_template; + +int ata_tlink_add(struct ata_link *link); +void ata_tlink_delete(struct ata_link *link); + +int ata_tport_add(struct device *parent, struct ata_port *ap); +void ata_tport_delete(struct ata_port *ap); + +struct scsi_transport_template *ata_attach_transport(void); +void ata_release_transport(struct scsi_transport_template *t); + +__init int libata_transport_init(void); +void __exit libata_transport_exit(void); +#endif diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 9ce1ecc63e3..a9be110dbf5 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -86,6 +86,8 @@ extern int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, extern int ata_dev_configure(struct ata_device *dev); extern int sata_down_spd_limit(struct ata_link *link, u32 spd_limit); extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); +extern unsigned int ata_dev_set_feature(struct ata_device *dev, + u8 enable, u8 feature); extern void ata_sg_clean(struct ata_queued_cmd *qc); extern void ata_qc_free(struct ata_queued_cmd *qc); extern void ata_qc_issue(struct ata_queued_cmd *qc); @@ -100,8 +102,7 @@ extern int sata_link_init_spd(struct ata_link *link); extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); extern struct ata_port *ata_port_alloc(struct ata_host *host); -extern void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy); -extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm); +extern const char *sata_spd_string(unsigned int spd); /* libata-acpi.c */ #ifdef CONFIG_ATA_ACPI @@ -137,10 +138,15 @@ extern void ata_scsi_hotplug(struct work_struct *work); extern void ata_schedule_scsi_eh(struct Scsi_Host *shost); extern void ata_scsi_dev_rescan(struct work_struct *work); extern int ata_bus_probe(struct ata_port *ap); +extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, + unsigned int id, unsigned int lun); + /* libata-eh.c */ extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd); extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); +extern void ata_eh_acquire(struct ata_port *ap); +extern void ata_eh_release(struct ata_port *ap); extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd); extern void ata_scsi_error(struct Scsi_Host *host); extern void ata_port_wait_eh(struct ata_port *ap); @@ -164,11 +170,16 @@ extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset, ata_postreset_fn_t postreset, struct ata_link **r_failed_disk); extern void ata_eh_finish(struct ata_port *ap); +extern int ata_ering_map(struct ata_ering *ering, + int (*map_fn)(struct ata_ering_entry *, void *), + void *arg); /* libata-pmp.c */ #ifdef CONFIG_SATA_PMP extern int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val); extern int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val); +extern int sata_pmp_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, + unsigned hints); extern int sata_pmp_attach(struct ata_device *dev); #else /* CONFIG_SATA_PMP */ static inline int sata_pmp_scr_read(struct ata_link *link, int reg, u32 *val) @@ -181,6 +192,12 @@ static inline int sata_pmp_scr_write(struct ata_link *link, int reg, u32 val) return -EINVAL; } +static inline int sata_pmp_set_lpm(struct ata_link *link, + enum ata_lpm_policy policy, unsigned hints) +{ + return -EINVAL; +} + static inline int sata_pmp_attach(struct ata_device *dev) { return -EINVAL; diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index 9cae65de750..ec2c777fcdb 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c @@ -826,7 +826,7 @@ static void bfin_dev_select(struct ata_port *ap, unsigned int device) * @ctl: value to write */ -static u8 bfin_set_devctl(struct ata_port *ap, u8 ctl) +static void bfin_set_devctl(struct ata_port *ap, u8 ctl) { void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr; write_atapi_register(base, ATA_REG_CTRL, ctl); @@ -1046,7 +1046,7 @@ static void bfin_bus_post_reset(struct ata_port *ap, unsigned int devmask) dev1 = 0; break; } - msleep(50); /* give drive a breather */ + ata_msleep(ap, 50); /* give drive a breather */ } if (dev1) ata_sff_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); @@ -1087,7 +1087,7 @@ static unsigned int bfin_bus_softreset(struct ata_port *ap, * * Old drivers/ide uses the 2mS rule and then waits for ready */ - msleep(150); + ata_msleep(ap, 150); /* Before we perform post reset processing we want to see if * the bus shows 0xFF because the odd clown forgets the D7 diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c index e5f289f59ca..549d28dbf90 100644 --- a/drivers/ata/pata_cmd640.c +++ b/drivers/ata/pata_cmd640.c @@ -161,6 +161,17 @@ static int cmd640_port_start(struct ata_port *ap) return 0; } +static bool cmd640_sff_irq_check(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + int irq_reg = ap->port_no ? ARTIM23 : CFR; + u8 irq_stat, irq_mask = ap->port_no ? 0x10 : 0x04; + + pci_read_config_byte(pdev, irq_reg, &irq_stat); + + return irq_stat & irq_mask; +} + static struct scsi_host_template cmd640_sht = { ATA_PIO_SHT(DRV_NAME), }; @@ -169,6 +180,7 @@ static struct ata_port_operations cmd640_port_ops = { .inherits = &ata_sff_port_ops, /* In theory xfer_noirq is not needed once we kill the prefetcher */ .sff_data_xfer = ata_sff_data_xfer_noirq, + .sff_irq_check = cmd640_sff_irq_check, .qc_issue = cmd640_qc_issue, .cable_detect = ata_cable_40wire, .set_piomode = cmd640_set_piomode, diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index e944aa0c551..806292160b3 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c @@ -34,7 +34,6 @@ #include <linux/ata.h> #include <linux/libata.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> @@ -168,63 +167,26 @@ static struct ata_port_operations pcmcia_8bit_port_ops = { }; -struct pcmcia_config_check { - unsigned long ctl_base; - int skip_vcc; - int is_kme; -}; - -static int pcmcia_check_one_config(struct pcmcia_device *pdev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data) { - struct pcmcia_config_check *stk = priv_data; - - /* Check for matching Vcc, unless we're desperate */ - if (!stk->skip_vcc) { - if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) - return -ENODEV; - } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000) - return -ENODEV; - } + int *is_kme = priv_data; + + if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { + pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; } + pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; - if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) - pdev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; - else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) - pdev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; - - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - pdev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - pdev->resource[0]->start = io->win[0].base; - if (!(io->flags & CISTPL_IO_16BIT)) { - pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - } - if (io->nwin == 2) { - pdev->resource[0]->end = 8; - pdev->resource[1]->start = io->win[1].base; - pdev->resource[1]->end = (stk->is_kme) ? 2 : 1; - if (pcmcia_request_io(pdev) != 0) - return -ENODEV; - stk->ctl_base = pdev->resource[1]->start; - } else if ((io->nwin == 1) && (io->win[0].len >= 16)) { - pdev->resource[0]->end = io->win[0].len; - pdev->resource[1]->end = 0; - if (pcmcia_request_io(pdev) != 0) - return -ENODEV; - stk->ctl_base = pdev->resource[0]->start + 0x0e; - } else + if (pdev->resource[1]->end) { + pdev->resource[0]->end = 8; + pdev->resource[1]->end = (*is_kme) ? 2 : 1; + } else { + if (pdev->resource[0]->end < 16) return -ENODEV; - /* If we've got this far, we're done */ - return 0; } - return -ENODEV; + + return pcmcia_request_io(pdev); } /** @@ -239,7 +201,6 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) { struct ata_host *host; struct ata_port *ap; - struct pcmcia_config_check *stk = NULL; int is_kme = 0, ret = -ENOMEM, p; unsigned long io_base, ctl_base; void __iomem *io_addr, *ctl_addr; @@ -247,10 +208,8 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) struct ata_port_operations *ops = &pcmcia_port_ops; /* Set up attributes in order to probe card and get resources */ - pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; - pdev->conf.Attributes = CONF_ENABLE_IRQ; - pdev->conf.IntType = INT_MEMORY_AND_IO; + pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO | + CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; /* See if we have a manufacturer identifier. Use it to set is_kme for vendor quirks */ @@ -258,25 +217,21 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) ((pdev->card_id == PRODID_KME_KXLC005_A) || (pdev->card_id == PRODID_KME_KXLC005_B))); - /* Allocate resoure probing structures */ - - stk = kzalloc(sizeof(*stk), GFP_KERNEL); - if (!stk) - goto out1; - stk->is_kme = is_kme; - stk->skip_vcc = io_base = ctl_base = 0; - - if (pcmcia_loop_config(pdev, pcmcia_check_one_config, stk)) { - stk->skip_vcc = 1; - if (pcmcia_loop_config(pdev, pcmcia_check_one_config, stk)) + if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) { + pdev->config_flags &= ~CONF_AUTO_CHECK_VCC; + if (pcmcia_loop_config(pdev, pcmcia_check_one_config, &is_kme)) goto failed; /* No suitable config found */ } io_base = pdev->resource[0]->start; - ctl_base = stk->ctl_base; + if (pdev->resource[1]->end) + ctl_base = pdev->resource[1]->start; + else + ctl_base = pdev->resource[0]->start + 0x0e; + if (!pdev->irq) goto failed; - ret = pcmcia_request_configuration(pdev, &pdev->conf); + ret = pcmcia_enable_device(pdev); if (ret) goto failed; @@ -329,13 +284,10 @@ static int pcmcia_init_one(struct pcmcia_device *pdev) goto failed; pdev->priv = host; - kfree(stk); return 0; failed: - kfree(stk); pcmcia_disable_device(pdev); -out1: return ret; } @@ -430,9 +382,7 @@ MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices); static struct pcmcia_driver pcmcia_driver = { .owner = THIS_MODULE, - .drv = { - .name = DRV_NAME, - }, + .name = DRV_NAME, .id_table = pcmcia_devices, .probe = pcmcia_init_one, .remove = pcmcia_remove_one, diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c index c39f213e1bb..c2ed5868dda 100644 --- a/drivers/ata/pata_pdc202xx_old.c +++ b/drivers/ata/pata_pdc202xx_old.c @@ -44,6 +44,27 @@ static void pdc202xx_exec_command(struct ata_port *ap, ndelay(400); } +static bool pdc202xx_irq_check(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + unsigned long master = pci_resource_start(pdev, 4); + u8 sc1d = inb(master + 0x1d); + + if (ap->port_no) { + /* + * bit 7: error, bit 6: interrupting, + * bit 5: FIFO full, bit 4: FIFO empty + */ + return sc1d & 0x40; + } else { + /* + * bit 3: error, bit 2: interrupting, + * bit 1: FIFO full, bit 0: FIFO empty + */ + return sc1d & 0x04; + } +} + /** * pdc202xx_configure_piomode - set chip PIO timing * @ap: ATA interface @@ -282,6 +303,7 @@ static struct ata_port_operations pdc2024x_port_ops = { .set_dmamode = pdc202xx_set_dmamode, .sff_exec_command = pdc202xx_exec_command, + .sff_irq_check = pdc202xx_irq_check, }; static struct ata_port_operations pdc2026x_port_ops = { @@ -297,6 +319,7 @@ static struct ata_port_operations pdc2026x_port_ops = { .port_start = pdc2026x_port_start, .sff_exec_command = pdc202xx_exec_command, + .sff_irq_check = pdc202xx_irq_check, }; static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c index 6f9cfb24b75..8a51d673e5b 100644 --- a/drivers/ata/pata_samsung_cf.c +++ b/drivers/ata/pata_samsung_cf.c @@ -322,7 +322,7 @@ static int pata_s3c_wait_after_reset(struct ata_link *link, { int rc; - msleep(ATA_WAIT_AFTER_RESET); + ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); /* always check readiness of the master device */ rc = ata_sff_wait_ready(link, deadline); diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index fe36966f7e3..093715c3273 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c @@ -530,7 +530,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, * * Old drivers/ide uses the 2mS rule and then waits for ready. */ - msleep(150); + ata_msleep(ap, 150); /* always check readiness of the master device */ rc = ata_sff_wait_ready(link, deadline); @@ -559,7 +559,7 @@ static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask, lbal = in_be32(ioaddr->lbal_addr); if ((nsect == 1) && (lbal == 1)) break; - msleep(50); /* give drive a breather */ + ata_msleep(ap, 50); /* give drive a breather */ } rc = ata_sff_wait_ready(link, deadline); diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index d3190d7ec30..00eefbd84b3 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c @@ -202,14 +202,25 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev) * LOCKING: * spin_lock_irqsave(host lock) */ -void sil680_sff_exec_command(struct ata_port *ap, - const struct ata_taskfile *tf) +static void sil680_sff_exec_command(struct ata_port *ap, + const struct ata_taskfile *tf) { DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command); iowrite8(tf->command, ap->ioaddr.command_addr); ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); } +static bool sil680_sff_irq_check(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + unsigned long addr = sil680_selreg(ap, 1); + u8 val; + + pci_read_config_byte(pdev, addr, &val); + + return val & 0x08; +} + static struct scsi_host_template sil680_sht = { ATA_BMDMA_SHT(DRV_NAME), }; @@ -218,6 +229,7 @@ static struct scsi_host_template sil680_sht = { static struct ata_port_operations sil680_port_ops = { .inherits = &ata_bmdma32_port_ops, .sff_exec_command = sil680_sff_exec_command, + .sff_irq_check = sil680_sff_irq_check, .cable_detect = sil680_cable_detect, .set_piomode = sil680_set_piomode, .set_dmamode = sil680_set_dmamode, diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c index 98548f640c8..7f5d020ed56 100644 --- a/drivers/ata/pata_sl82c105.c +++ b/drivers/ata/pata_sl82c105.c @@ -227,6 +227,16 @@ static int sl82c105_qc_defer(struct ata_queued_cmd *qc) return 0; } +static bool sl82c105_sff_irq_check(struct ata_port *ap) +{ + struct pci_dev *pdev = to_pci_dev(ap->host->dev); + u32 val, mask = ap->port_no ? CTRL_IDE_IRQB : CTRL_IDE_IRQA; + + pci_read_config_dword(pdev, 0x40, &val); + + return val & mask; +} + static struct scsi_host_template sl82c105_sht = { ATA_BMDMA_SHT(DRV_NAME), }; @@ -239,6 +249,7 @@ static struct ata_port_operations sl82c105_port_ops = { .cable_detect = ata_cable_40wire, .set_piomode = sl82c105_set_piomode, .prereset = sl82c105_pre_reset, + .sff_irq_check = sl82c105_sff_irq_check, }; /** diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 7325f77480d..b0214d00d50 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -678,7 +678,7 @@ static void sata_fsl_port_stop(struct ata_port *ap) iowrite32(temp, hcr_base + HCONTROL); /* Poll for controller to go offline - should happen immediately */ - ata_wait_register(hcr_base + HSTATUS, ONLINE, ONLINE, 1, 1); + ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE, 1, 1); ap->private_data = NULL; dma_free_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, @@ -729,7 +729,8 @@ try_offline_again: iowrite32(temp, hcr_base + HCONTROL); /* Poll for controller to go offline */ - temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, ONLINE, 1, 500); + temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, ONLINE, + 1, 500); if (temp & ONLINE) { ata_port_printk(ap, KERN_ERR, @@ -752,7 +753,7 @@ try_offline_again: /* * PHY reset should remain asserted for atleast 1ms */ - msleep(1); + ata_msleep(ap, 1); /* * Now, bring the host controller online again, this can take time @@ -766,7 +767,7 @@ try_offline_again: temp |= HCONTROL_PMP_ATTACHED; iowrite32(temp, hcr_base + HCONTROL); - temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, 0, 1, 500); + temp = ata_wait_register(ap, hcr_base + HSTATUS, ONLINE, 0, 1, 500); if (!(temp & ONLINE)) { ata_port_printk(ap, KERN_ERR, @@ -784,7 +785,7 @@ try_offline_again: * presence */ - temp = ata_wait_register(hcr_base + HSTATUS, 0xFF, 0, 1, 500); + temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0, 1, 500); if ((!(temp & 0x10)) || ata_link_offline(link)) { ata_port_printk(ap, KERN_WARNING, "No Device OR PHYRDY change,Hstatus = 0x%x\n", @@ -797,7 +798,7 @@ try_offline_again: * Wait for the first D2H from device,i.e,signature update notification */ start_jiffies = jiffies; - temp = ata_wait_register(hcr_base + HSTATUS, 0xFF, 0x10, + temp = ata_wait_register(ap, hcr_base + HSTATUS, 0xFF, 0x10, 500, jiffies_to_msecs(deadline - start_jiffies)); if ((temp & 0xFF) != 0x18) { @@ -880,7 +881,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, iowrite32(pmp, CQPMP + hcr_base); iowrite32(1, CQ + hcr_base); - temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000); + temp = ata_wait_register(ap, CQ + hcr_base, 0x1, 0x1, 1, 5000); if (temp & 0x1) { ata_port_printk(ap, KERN_WARNING, "ATA_SRST issue failed\n"); @@ -896,7 +897,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, goto err; } - msleep(1); + ata_msleep(ap, 1); /* * SATA device enters reset state after receving a Control register @@ -915,7 +916,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, if (pmp != SATA_PMP_CTRL_PORT) iowrite32(pmp, CQPMP + hcr_base); iowrite32(1, CQ + hcr_base); - msleep(150); /* ?? */ + ata_msleep(ap, 150); /* ?? */ /* * The above command would have signalled an interrupt on command @@ -1137,17 +1138,13 @@ static void sata_fsl_host_intr(struct ata_port *ap) ioread32(hcr_base + CE)); for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { - if (done_mask & (1 << i)) { - qc = ata_qc_from_tag(ap, i); - if (qc) { - ata_qc_complete(qc); - } + if (done_mask & (1 << i)) DPRINTK ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", i, ioread32(hcr_base + CC), ioread32(hcr_base + CA)); - } } + ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); return; } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) { diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index a36149ebf4a..83a44471b18 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -614,7 +614,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class, writew(IDMA_CTL_RST_ATA, idma_ctl); readw(idma_ctl); /* flush */ - msleep(1); + ata_msleep(ap, 1); writew(0, idma_ctl); rc = sata_link_resume(link, timing, deadline); diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index a9fd9709c26..bf74a36d3cc 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -2743,18 +2743,11 @@ static void mv_err_intr(struct ata_port *ap) } } -static void mv_process_crpb_response(struct ata_port *ap, +static bool mv_process_crpb_response(struct ata_port *ap, struct mv_crpb *response, unsigned int tag, int ncq_enabled) { u8 ata_status; u16 edma_status = le16_to_cpu(response->flags); - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); - - if (unlikely(!qc)) { - ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", - __func__, tag); - return; - } /* * edma_status from a response queue entry: @@ -2768,13 +2761,14 @@ static void mv_process_crpb_response(struct ata_port *ap, * Error will be seen/handled by * mv_err_intr(). So do nothing at all here. */ - return; + return false; } } ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; if (!ac_err_mask(ata_status)) - ata_qc_complete(qc); + return true; /* else: leave it for mv_err_intr() */ + return false; } static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) @@ -2783,6 +2777,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp struct mv_host_priv *hpriv = ap->host->private_data; u32 in_index; bool work_done = false; + u32 done_mask = 0; int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); /* Get the hardware queue position index */ @@ -2803,15 +2798,19 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp /* Gen II/IIE: get command tag from CRPB entry */ tag = le16_to_cpu(response->id) & 0x1f; } - mv_process_crpb_response(ap, response, tag, ncq_enabled); + if (mv_process_crpb_response(ap, response, tag, ncq_enabled)) + done_mask |= 1 << tag; work_done = true; } - /* Update the software queue position index in hardware */ - if (work_done) + if (work_done) { + ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); + + /* Update the software queue position index in hardware */ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), port_mmio + EDMA_RSP_Q_OUT_PTR); + } } static void mv_port_intr(struct ata_port *ap, u32 port_cause) diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index cb89ef8d99d..7254e255fd7 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -873,29 +873,11 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err) ata_port_freeze(ap); else ata_port_abort(ap); - return 1; + return -1; } - if (likely(flags & NV_CPB_RESP_DONE)) { - struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); - VPRINTK("CPB flags done, flags=0x%x\n", flags); - if (likely(qc)) { - DPRINTK("Completing qc from tag %d\n", cpb_num); - ata_qc_complete(qc); - } else { - struct ata_eh_info *ehi = &ap->link.eh_info; - /* Notifier bits set without a command may indicate the drive - is misbehaving. Raise host state machine violation on this - condition. */ - ata_port_printk(ap, KERN_ERR, - "notifier for tag %d with no cmd?\n", - cpb_num); - ehi->err_mask |= AC_ERR_HSM; - ehi->action |= ATA_EH_RESET; - ata_port_freeze(ap); - return 1; - } - } + if (likely(flags & NV_CPB_RESP_DONE)) + return 1; return 0; } @@ -1018,6 +1000,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) NV_ADMA_STAT_CPBERR | NV_ADMA_STAT_CMD_COMPLETE)) { u32 check_commands = notifier_clears[i]; + u32 done_mask = 0; int pos, rc; if (status & NV_ADMA_STAT_CPBERR) { @@ -1034,10 +1017,13 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance) pos--; rc = nv_adma_check_cpb(ap, pos, notifier_error & (1 << pos)); - if (unlikely(rc)) + if (rc > 0) + done_mask |= 1 << pos; + else if (unlikely(rc < 0)) check_commands = 0; check_commands &= ~(1 << pos); } + ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); } } @@ -2132,7 +2118,6 @@ static int nv_swncq_sdbfis(struct ata_port *ap) struct ata_eh_info *ehi = &ap->link.eh_info; u32 sactive; u32 done_mask; - int i; u8 host_stat; u8 lack_dhfis = 0; @@ -2152,27 +2137,11 @@ static int nv_swncq_sdbfis(struct ata_port *ap) sactive = readl(pp->sactive_block); done_mask = pp->qc_active ^ sactive; - if (unlikely(done_mask & sactive)) { - ata_ehi_clear_desc(ehi); - ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition" - "(%08x->%08x)", pp->qc_active, sactive); - ehi->err_mask |= AC_ERR_HSM; - ehi->action |= ATA_EH_RESET; - return -EINVAL; - } - for (i = 0; i < ATA_MAX_QUEUE; i++) { - if (!(done_mask & (1 << i))) - continue; - - qc = ata_qc_from_tag(ap, i); - if (qc) { - ata_qc_complete(qc); - pp->qc_active &= ~(1 << i); - pp->dhfis_bits &= ~(1 << i); - pp->dmafis_bits &= ~(1 << i); - pp->sdbfis_bits |= (1 << i); - } - } + pp->qc_active &= ~done_mask; + pp->dhfis_bits &= ~done_mask; + pp->dmafis_bits &= ~done_mask; + pp->sdbfis_bits |= done_mask; + ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); if (!ap->qc_active) { DPRINTK("over\n"); diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index be7726d7686..af41c6fd125 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -589,9 +589,9 @@ static int sil24_init_port(struct ata_port *ap) sil24_clear_pmp(ap); writel(PORT_CS_INIT, port + PORT_CTRL_STAT); - ata_wait_register(port + PORT_CTRL_STAT, + ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_INIT, PORT_CS_INIT, 10, 100); - tmp = ata_wait_register(port + PORT_CTRL_STAT, + tmp = ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0, 10, 100); if ((tmp & (PORT_CS_INIT | PORT_CS_RDY)) != PORT_CS_RDY) { @@ -631,7 +631,7 @@ static int sil24_exec_polled_cmd(struct ata_port *ap, int pmp, writel((u64)paddr >> 32, port + PORT_CMD_ACTIVATE + 4); irq_mask = (PORT_IRQ_COMPLETE | PORT_IRQ_ERROR) << PORT_IRQ_RAW_SHIFT; - irq_stat = ata_wait_register(port + PORT_IRQ_STAT, irq_mask, 0x0, + irq_stat = ata_wait_register(ap, port + PORT_IRQ_STAT, irq_mask, 0x0, 10, timeout_msec); writel(irq_mask, port + PORT_IRQ_STAT); /* clear IRQs */ @@ -719,9 +719,9 @@ static int sil24_hardreset(struct ata_link *link, unsigned int *class, "state, performing PORT_RST\n"); writel(PORT_CS_PORT_RST, port + PORT_CTRL_STAT); - msleep(10); + ata_msleep(ap, 10); writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); - ata_wait_register(port + PORT_CTRL_STAT, PORT_CS_RDY, 0, + ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_RDY, 0, 10, 5000); /* restore port configuration */ @@ -740,7 +740,7 @@ static int sil24_hardreset(struct ata_link *link, unsigned int *class, tout_msec = 5000; writel(PORT_CS_DEV_RST, port + PORT_CTRL_STAT); - tmp = ata_wait_register(port + PORT_CTRL_STAT, + tmp = ata_wait_register(ap, port + PORT_CTRL_STAT, PORT_CS_DEV_RST, PORT_CS_DEV_RST, 10, tout_msec); @@ -1253,7 +1253,7 @@ static void sil24_init_controller(struct ata_host *host) tmp = readl(port + PORT_CTRL_STAT); if (tmp & PORT_CS_PORT_RST) { writel(PORT_CS_PORT_RST, port + PORT_CTRL_CLR); - tmp = ata_wait_register(port + PORT_CTRL_STAT, + tmp = ata_wait_register(NULL, port + PORT_CTRL_STAT, PORT_CS_PORT_RST, PORT_CS_PORT_RST, 10, 100); if (tmp & PORT_CS_PORT_RST) diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 4730c42a5ee..c21589986c6 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -349,7 +349,7 @@ static int vt6420_prereset(struct ata_link *link, unsigned long deadline) /* wait for phy to become ready, if necessary */ do { - msleep(200); + ata_msleep(link->ap, 200); svia_scr_read(link, SCR_STATUS, &sstatus); if ((sstatus & 0xf) != 1) break; diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index ee9ddeb5341..8cb0347dec2 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -3156,7 +3156,6 @@ static int __devinit ia_init_one(struct pci_dev *pdev, { struct atm_dev *dev; IADEV *iadev; - unsigned long flags; int ret; iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); @@ -3188,19 +3187,14 @@ static int __devinit ia_init_one(struct pci_dev *pdev, ia_dev[iadev_count] = iadev; _ia_dev[iadev_count] = dev; iadev_count++; - spin_lock_init(&iadev->misc_lock); - /* First fixes first. I don't want to think about this now. */ - spin_lock_irqsave(&iadev->misc_lock, flags); if (ia_init(dev) || ia_start(dev)) { IF_INIT(printk("IA register failed!\n");) iadev_count--; ia_dev[iadev_count] = NULL; _ia_dev[iadev_count] = NULL; - spin_unlock_irqrestore(&iadev->misc_lock, flags); ret = -EINVAL; goto err_out_deregister_dev; } - spin_unlock_irqrestore(&iadev->misc_lock, flags); IF_EVENT(printk("iadev_count = %d\n", iadev_count);) iadev->next_board = ia_boards; diff --git a/drivers/atm/iphase.h b/drivers/atm/iphase.h index b2cd20f549c..077735e0e04 100644 --- a/drivers/atm/iphase.h +++ b/drivers/atm/iphase.h @@ -1022,7 +1022,7 @@ typedef struct iadev_t { struct dle_q rx_dle_q; struct free_desc_q *rx_free_desc_qhead; struct sk_buff_head rx_dma_q; - spinlock_t rx_lock, misc_lock; + spinlock_t rx_lock; struct atm_vcc **rx_open; /* list of all open VCs */ u16 num_rx_desc, rx_buf_sz, rxing; u32 rx_pkt_ram, rx_tmp_cnt; diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index f916ddf6393..f46138ab38b 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -444,6 +444,7 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr, struct atm_dev *atmdev = container_of(dev, struct atm_dev, class_dev); struct solos_card *card = atmdev->dev_data; struct sk_buff *skb; + unsigned int len; spin_lock(&card->cli_queue_lock); skb = skb_dequeue(&card->cli_queue[SOLOS_CHAN(atmdev)]); @@ -451,11 +452,12 @@ static ssize_t console_show(struct device *dev, struct device_attribute *attr, if(skb == NULL) return sprintf(buf, "No data.\n"); - memcpy(buf, skb->data, skb->len); - dev_dbg(&card->dev->dev, "len: %d\n", skb->len); + len = skb->len; + memcpy(buf, skb->data, len); + dev_dbg(&card->dev->dev, "len: %d\n", len); kfree_skb(skb); - return skb->len; + return len; } static int send_command(struct solos_card *card, int dev, const char *buf, size_t size) diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index cbccf9a3cee..abe46edfe5b 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_OPS) += generic_ops.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o +obj-$(CONFIG_PM_OPP) += opp.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 4b29d498125..81f2c84697f 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -46,7 +46,7 @@ int pm_generic_runtime_suspend(struct device *dev) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int ret; - ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : -EINVAL; + ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0; return ret; } @@ -65,7 +65,7 @@ int pm_generic_runtime_resume(struct device *dev) const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int ret; - ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : -EINVAL; + ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0; return ret; } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 276d5a701dc..31b526661ec 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -51,6 +51,8 @@ static pm_message_t pm_transition; */ static bool transition_started; +static int async_error; + /** * device_pm_init - Initialize the PM-related part of a device object. * @dev: Device object being initialized. @@ -60,7 +62,8 @@ void device_pm_init(struct device *dev) dev->power.status = DPM_ON; init_completion(&dev->power.completion); complete_all(&dev->power.completion); - dev->power.wakeup_count = 0; + dev->power.wakeup = NULL; + spin_lock_init(&dev->power.lock); pm_runtime_init(dev); } @@ -120,6 +123,7 @@ void device_pm_remove(struct device *dev) mutex_lock(&dpm_list_mtx); list_del_init(&dev->power.entry); mutex_unlock(&dpm_list_mtx); + device_wakeup_disable(dev); pm_runtime_remove(dev); } @@ -407,7 +411,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info, static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) { ktime_t calltime; - s64 usecs64; + u64 usecs64; int usecs; calltime = ktime_get(); @@ -600,6 +604,7 @@ static void dpm_resume(pm_message_t state) INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); pm_transition = state; + async_error = 0; list_for_each_entry(dev, &dpm_list, power.entry) { if (dev->power.status < DPM_OFF) @@ -829,8 +834,6 @@ static int legacy_suspend(struct device *dev, pm_message_t state, return error; } -static int async_error; - /** * device_suspend - Execute "suspend" callbacks for given device. * @dev: Device to handle. @@ -885,6 +888,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) device_unlock(dev); complete_all(&dev->power.completion); + if (error) + async_error = error; + return error; } @@ -894,10 +900,8 @@ static void async_suspend(void *data, async_cookie_t cookie) int error; error = __device_suspend(dev, pm_transition, true); - if (error) { + if (error) pm_dev_err(dev, pm_transition, " async", error); - async_error = error; - } put_device(dev); } @@ -1085,8 +1089,9 @@ EXPORT_SYMBOL_GPL(__suspend_report_result); * @dev: Device to wait for. * @subordinate: Device that needs to wait for @dev. */ -void device_pm_wait_for_dev(struct device *subordinate, struct device *dev) +int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) { dpm_wait(dev, subordinate->power.async_suspend); + return async_error; } EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c new file mode 100644 index 00000000000..2bb9b4cf59d --- /dev/null +++ b/drivers/base/power/opp.c @@ -0,0 +1,628 @@ +/* + * Generic OPP Interface + * + * Copyright (C) 2009-2010 Texas Instruments Incorporated. + * Nishanth Menon + * Romit Dasgupta + * Kevin Hilman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/cpufreq.h> +#include <linux/list.h> +#include <linux/rculist.h> +#include <linux/rcupdate.h> +#include <linux/opp.h> + +/* + * Internal data structure organization with the OPP layer library is as + * follows: + * dev_opp_list (root) + * |- device 1 (represents voltage domain 1) + * | |- opp 1 (availability, freq, voltage) + * | |- opp 2 .. + * ... ... + * | `- opp n .. + * |- device 2 (represents the next voltage domain) + * ... + * `- device m (represents mth voltage domain) + * device 1, 2.. are represented by dev_opp structure while each opp + * is represented by the opp structure. + */ + +/** + * struct opp - Generic OPP description structure + * @node: opp list node. The nodes are maintained throughout the lifetime + * of boot. It is expected only an optimal set of OPPs are + * added to the library by the SoC framework. + * RCU usage: opp list is traversed with RCU locks. node + * modification is possible realtime, hence the modifications + * are protected by the dev_opp_list_lock for integrity. + * IMPORTANT: the opp nodes should be maintained in increasing + * order. + * @available: true/false - marks if this OPP as available or not + * @rate: Frequency in hertz + * @u_volt: Nominal voltage in microvolts corresponding to this OPP + * @dev_opp: points back to the device_opp struct this opp belongs to + * + * This structure stores the OPP information for a given device. + */ +struct opp { + struct list_head node; + + bool available; + unsigned long rate; + unsigned long u_volt; + + struct device_opp *dev_opp; +}; + +/** + * struct device_opp - Device opp structure + * @node: list node - contains the devices with OPPs that + * have been registered. Nodes once added are not modified in this + * list. + * RCU usage: nodes are not modified in the list of device_opp, + * however addition is possible and is secured by dev_opp_list_lock + * @dev: device pointer + * @opp_list: list of opps + * + * This is an internal data structure maintaining the link to opps attached to + * a device. This structure is not meant to be shared to users as it is + * meant for book keeping and private to OPP library + */ +struct device_opp { + struct list_head node; + + struct device *dev; + struct list_head opp_list; +}; + +/* + * The root of the list of all devices. All device_opp structures branch off + * from here, with each device_opp containing the list of opp it supports in + * various states of availability. + */ +static LIST_HEAD(dev_opp_list); +/* Lock to allow exclusive modification to the device and opp lists */ +static DEFINE_MUTEX(dev_opp_list_lock); + +/** + * find_device_opp() - find device_opp struct using device pointer + * @dev: device pointer used to lookup device OPPs + * + * Search list of device OPPs for one containing matching device. Does a RCU + * reader operation to grab the pointer needed. + * + * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or + * -EINVAL based on type of error. + * + * Locking: This function must be called under rcu_read_lock(). device_opp + * is a RCU protected pointer. This means that device_opp is valid as long + * as we are under RCU lock. + */ +static struct device_opp *find_device_opp(struct device *dev) +{ + struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); + + if (unlikely(IS_ERR_OR_NULL(dev))) { + pr_err("%s: Invalid parameters\n", __func__); + return ERR_PTR(-EINVAL); + } + + list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { + if (tmp_dev_opp->dev == dev) { + dev_opp = tmp_dev_opp; + break; + } + } + + return dev_opp; +} + +/** + * opp_get_voltage() - Gets the voltage corresponding to an available opp + * @opp: opp for which voltage has to be returned for + * + * Return voltage in micro volt corresponding to the opp, else + * return 0 + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +unsigned long opp_get_voltage(struct opp *opp) +{ + struct opp *tmp_opp; + unsigned long v = 0; + + tmp_opp = rcu_dereference(opp); + if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + pr_err("%s: Invalid parameters\n", __func__); + else + v = tmp_opp->u_volt; + + return v; +} + +/** + * opp_get_freq() - Gets the frequency corresponding to an available opp + * @opp: opp for which frequency has to be returned for + * + * Return frequency in hertz corresponding to the opp, else + * return 0 + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +unsigned long opp_get_freq(struct opp *opp) +{ + struct opp *tmp_opp; + unsigned long f = 0; + + tmp_opp = rcu_dereference(opp); + if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) + pr_err("%s: Invalid parameters\n", __func__); + else + f = tmp_opp->rate; + + return f; +} + +/** + * opp_get_opp_count() - Get number of opps available in the opp list + * @dev: device for which we do this operation + * + * This function returns the number of available opps if there are any, + * else returns 0 if none or the corresponding error value. + * + * Locking: This function must be called under rcu_read_lock(). This function + * internally references two RCU protected structures: device_opp and opp which + * are safe as long as we are under a common RCU locked section. + */ +int opp_get_opp_count(struct device *dev) +{ + struct device_opp *dev_opp; + struct opp *temp_opp; + int count = 0; + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int r = PTR_ERR(dev_opp); + dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); + return r; + } + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available) + count++; + } + + return count; +} + +/** + * opp_find_freq_exact() - search for an exact frequency + * @dev: device for which we do this operation + * @freq: frequency to search for + * @is_available: true/false - match for available opp + * + * Searches for exact match in the opp list and returns pointer to the matching + * opp if found, else returns ERR_PTR in case of error and should be handled + * using IS_ERR. + * + * Note: available is a modifier for the search. if available=true, then the + * match is for exact matching frequency and is available in the stored OPP + * table. if false, the match is for exact frequency which is not available. + * + * This provides a mechanism to enable an opp which is not available currently + * or the opposite as well. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, + bool available) +{ + struct device_opp *dev_opp; + struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int r = PTR_ERR(dev_opp); + dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); + return ERR_PTR(r); + } + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available == available && + temp_opp->rate == freq) { + opp = temp_opp; + break; + } + } + + return opp; +} + +/** + * opp_find_freq_ceil() - Search for an rounded ceil freq + * @dev: device for which we do this operation + * @freq: Start frequency + * + * Search for the matching ceil *available* OPP from a starting freq + * for a device. + * + * Returns matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) +{ + struct device_opp *dev_opp; + struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + + if (!dev || !freq) { + dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); + return ERR_PTR(-EINVAL); + } + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) + return opp; + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available && temp_opp->rate >= *freq) { + opp = temp_opp; + *freq = opp->rate; + break; + } + } + + return opp; +} + +/** + * opp_find_freq_floor() - Search for a rounded floor freq + * @dev: device for which we do this operation + * @freq: Start frequency + * + * Search for the matching floor *available* OPP from a starting freq + * for a device. + * + * Returns matching *opp and refreshes *freq accordingly, else returns + * ERR_PTR in case of error and should be handled using IS_ERR. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. The reason for the same is that the opp pointer which is + * returned will remain valid for use with opp_get_{voltage, freq} only while + * under the locked area. The pointer returned must be used prior to unlocking + * with rcu_read_unlock() to maintain the integrity of the pointer. + */ +struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) +{ + struct device_opp *dev_opp; + struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); + + if (!dev || !freq) { + dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); + return ERR_PTR(-EINVAL); + } + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) + return opp; + + list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { + if (temp_opp->available) { + /* go to the next node, before choosing prev */ + if (temp_opp->rate > *freq) + break; + else + opp = temp_opp; + } + } + if (!IS_ERR(opp)) + *freq = opp->rate; + + return opp; +} + +/** + * opp_add() - Add an OPP table from a table definitions + * @dev: device for which we do this operation + * @freq: Frequency in Hz for this OPP + * @u_volt: Voltage in uVolts for this OPP + * + * This function adds an opp definition to the opp list and returns status. + * The opp is made available by default and it can be controlled using + * opp_enable/disable functions. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks + * to keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex cannot be locked. + */ +int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) +{ + struct device_opp *dev_opp = NULL; + struct opp *opp, *new_opp; + struct list_head *head; + + /* allocate new OPP node */ + new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); + if (!new_opp) { + dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); + return -ENOMEM; + } + + /* Hold our list modification lock here */ + mutex_lock(&dev_opp_list_lock); + + /* Check for existing list for 'dev' */ + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + /* + * Allocate a new device OPP table. In the infrequent case + * where a new device is needed to be added, we pay this + * penalty. + */ + dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL); + if (!dev_opp) { + mutex_unlock(&dev_opp_list_lock); + kfree(new_opp); + dev_warn(dev, + "%s: Unable to create device OPP structure\n", + __func__); + return -ENOMEM; + } + + dev_opp->dev = dev; + INIT_LIST_HEAD(&dev_opp->opp_list); + + /* Secure the device list modification */ + list_add_rcu(&dev_opp->node, &dev_opp_list); + } + + /* populate the opp table */ + new_opp->dev_opp = dev_opp; + new_opp->rate = freq; + new_opp->u_volt = u_volt; + new_opp->available = true; + + /* Insert new OPP in order of increasing frequency */ + head = &dev_opp->opp_list; + list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { + if (new_opp->rate < opp->rate) + break; + else + head = &opp->node; + } + + list_add_rcu(&new_opp->node, head); + mutex_unlock(&dev_opp_list_lock); + + return 0; +} + +/** + * opp_set_availability() - helper to set the availability of an opp + * @dev: device for which we do this operation + * @freq: OPP frequency to modify availability + * @availability_req: availability status requested for this opp + * + * Set the availability of an OPP with an RCU operation, opp_{enable,disable} + * share a common logic which is isolated here. + * + * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the + * copy operation, returns 0 if no modifcation was done OR modification was + * successful. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function internally uses RCU updater strategy with mutex locks to + * keep the integrity of the internal data structures. Callers should ensure + * that this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +static int opp_set_availability(struct device *dev, unsigned long freq, + bool availability_req) +{ + struct device_opp *tmp_dev_opp, *dev_opp = NULL; + struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); + int r = 0; + + /* keep the node allocated */ + new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); + if (!new_opp) { + dev_warn(dev, "%s: Unable to create OPP\n", __func__); + return -ENOMEM; + } + + mutex_lock(&dev_opp_list_lock); + + /* Find the device_opp */ + list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) { + if (dev == tmp_dev_opp->dev) { + dev_opp = tmp_dev_opp; + break; + } + } + if (IS_ERR(dev_opp)) { + r = PTR_ERR(dev_opp); + dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); + goto unlock; + } + + /* Do we have the frequency? */ + list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { + if (tmp_opp->rate == freq) { + opp = tmp_opp; + break; + } + } + if (IS_ERR(opp)) { + r = PTR_ERR(opp); + goto unlock; + } + + /* Is update really needed? */ + if (opp->available == availability_req) + goto unlock; + /* copy the old data over */ + *new_opp = *opp; + + /* plug in new node */ + new_opp->available = availability_req; + + list_replace_rcu(&opp->node, &new_opp->node); + mutex_unlock(&dev_opp_list_lock); + synchronize_rcu(); + + /* clean up old opp */ + new_opp = opp; + goto out; + +unlock: + mutex_unlock(&dev_opp_list_lock); +out: + kfree(new_opp); + return r; +} + +/** + * opp_enable() - Enable a specific OPP + * @dev: device for which we do this operation + * @freq: OPP frequency to enable + * + * Enables a provided opp. If the operation is valid, this returns 0, else the + * corresponding error value. It is meant to be used for users an OPP available + * after being temporarily made unavailable with opp_disable. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function indirectly uses RCU and mutex locks to keep the + * integrity of the internal data structures. Callers should ensure that + * this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +int opp_enable(struct device *dev, unsigned long freq) +{ + return opp_set_availability(dev, freq, true); +} + +/** + * opp_disable() - Disable a specific OPP + * @dev: device for which we do this operation + * @freq: OPP frequency to disable + * + * Disables a provided opp. If the operation is valid, this returns + * 0, else the corresponding error value. It is meant to be a temporary + * control by users to make this OPP not available until the circumstances are + * right to make it available again (with a call to opp_enable). + * + * Locking: The internal device_opp and opp structures are RCU protected. + * Hence this function indirectly uses RCU and mutex locks to keep the + * integrity of the internal data structures. Callers should ensure that + * this function is *NOT* called under RCU protection or in contexts where + * mutex locking or synchronize_rcu() blocking calls cannot be used. + */ +int opp_disable(struct device *dev, unsigned long freq) +{ + return opp_set_availability(dev, freq, false); +} + +#ifdef CONFIG_CPU_FREQ +/** + * opp_init_cpufreq_table() - create a cpufreq table for a device + * @dev: device for which we do this operation + * @table: Cpufreq table returned back to caller + * + * Generate a cpufreq table for a provided device- this assumes that the + * opp list is already initialized and ready for usage. + * + * This function allocates required memory for the cpufreq table. It is + * expected that the caller does the required maintenance such as freeing + * the table as required. + * + * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM + * if no memory available for the operation (table is not populated), returns 0 + * if successful and table is populated. + * + * WARNING: It is important for the callers to ensure refreshing their copy of + * the table if any of the mentioned functions have been invoked in the interim. + * + * Locking: The internal device_opp and opp structures are RCU protected. + * To simplify the logic, we pretend we are updater and hold relevant mutex here + * Callers should ensure that this function is *NOT* called under RCU protection + * or in contexts where mutex locking cannot be used. + */ +int opp_init_cpufreq_table(struct device *dev, + struct cpufreq_frequency_table **table) +{ + struct device_opp *dev_opp; + struct opp *opp; + struct cpufreq_frequency_table *freq_table; + int i = 0; + + /* Pretend as if I am an updater */ + mutex_lock(&dev_opp_list_lock); + + dev_opp = find_device_opp(dev); + if (IS_ERR(dev_opp)) { + int r = PTR_ERR(dev_opp); + mutex_unlock(&dev_opp_list_lock); + dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); + return r; + } + + freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * + (opp_get_opp_count(dev) + 1), GFP_KERNEL); + if (!freq_table) { + mutex_unlock(&dev_opp_list_lock); + dev_warn(dev, "%s: Unable to allocate frequency table\n", + __func__); + return -ENOMEM; + } + + list_for_each_entry(opp, &dev_opp->opp_list, node) { + if (opp->available) { + freq_table[i].index = i; + freq_table[i].frequency = opp->rate / 1000; + i++; + } + } + mutex_unlock(&dev_opp_list_lock); + + freq_table[i].index = i; + freq_table[i].frequency = CPUFREQ_TABLE_END; + + *table = &freq_table[0]; + + return 0; +} +#endif /* CONFIG_CPU_FREQ */ diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index c0bd03c83b9..698dde74258 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -34,6 +34,7 @@ extern void device_pm_move_last(struct device *); static inline void device_pm_init(struct device *dev) { + spin_lock_init(&dev->power.lock); pm_runtime_init(dev); } @@ -59,6 +60,7 @@ static inline void device_pm_move_last(struct device *dev) {} extern int dpm_sysfs_add(struct device *); extern void dpm_sysfs_remove(struct device *); +extern void rpm_sysfs_remove(struct device *); #else /* CONFIG_PM */ diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b78c401ffa7..1dd8676d7f5 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -2,17 +2,55 @@ * drivers/base/power/runtime.c - Helper functions for device run-time PM * * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. + * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> * * This file is released under the GPLv2. */ #include <linux/sched.h> #include <linux/pm_runtime.h> -#include <linux/jiffies.h> +#include "power.h" -static int __pm_runtime_resume(struct device *dev, bool from_wq); -static int __pm_request_idle(struct device *dev); -static int __pm_request_resume(struct device *dev); +static int rpm_resume(struct device *dev, int rpmflags); +static int rpm_suspend(struct device *dev, int rpmflags); + +/** + * update_pm_runtime_accounting - Update the time accounting of power states + * @dev: Device to update the accounting for + * + * In order to be able to have time accounting of the various power states + * (as used by programs such as PowerTOP to show the effectiveness of runtime + * PM), we need to track the time spent in each state. + * update_pm_runtime_accounting must be called each time before the + * runtime_status field is updated, to account the time in the old state + * correctly. + */ +void update_pm_runtime_accounting(struct device *dev) +{ + unsigned long now = jiffies; + int delta; + + delta = now - dev->power.accounting_timestamp; + + if (delta < 0) + delta = 0; + + dev->power.accounting_timestamp = now; + + if (dev->power.disable_depth > 0) + return; + + if (dev->power.runtime_status == RPM_SUSPENDED) + dev->power.suspended_jiffies += delta; + else + dev->power.active_jiffies += delta; +} + +static void __update_runtime_status(struct device *dev, enum rpm_status status) +{ + update_pm_runtime_accounting(dev); + dev->power.runtime_status = status; +} /** * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. @@ -40,62 +78,154 @@ static void pm_runtime_cancel_pending(struct device *dev) dev->power.request = RPM_REQ_NONE; } -/** - * __pm_runtime_idle - Notify device bus type if the device can be suspended. - * @dev: Device to notify the bus type about. +/* + * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. + * @dev: Device to handle. * - * This function must be called under dev->power.lock with interrupts disabled. + * Compute the autosuspend-delay expiration time based on the device's + * power.last_busy time. If the delay has already expired or is disabled + * (negative) or the power.use_autosuspend flag isn't set, return 0. + * Otherwise return the expiration time in jiffies (adjusted to be nonzero). + * + * This function may be called either with or without dev->power.lock held. + * Either way it can be racy, since power.last_busy may be updated at any time. */ -static int __pm_runtime_idle(struct device *dev) - __releases(&dev->power.lock) __acquires(&dev->power.lock) +unsigned long pm_runtime_autosuspend_expiration(struct device *dev) +{ + int autosuspend_delay; + long elapsed; + unsigned long last_busy; + unsigned long expires = 0; + + if (!dev->power.use_autosuspend) + goto out; + + autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); + if (autosuspend_delay < 0) + goto out; + + last_busy = ACCESS_ONCE(dev->power.last_busy); + elapsed = jiffies - last_busy; + if (elapsed < 0) + goto out; /* jiffies has wrapped around. */ + + /* + * If the autosuspend_delay is >= 1 second, align the timer by rounding + * up to the nearest second. + */ + expires = last_busy + msecs_to_jiffies(autosuspend_delay); + if (autosuspend_delay >= 1000) + expires = round_jiffies(expires); + expires += !expires; + if (elapsed >= expires - last_busy) + expires = 0; /* Already expired. */ + + out: + return expires; +} +EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); + +/** + * rpm_check_suspend_allowed - Test whether a device may be suspended. + * @dev: Device to test. + */ +static int rpm_check_suspend_allowed(struct device *dev) { int retval = 0; if (dev->power.runtime_error) retval = -EINVAL; - else if (dev->power.idle_notification) - retval = -EINPROGRESS; else if (atomic_read(&dev->power.usage_count) > 0 - || dev->power.disable_depth > 0 - || dev->power.runtime_status != RPM_ACTIVE) + || dev->power.disable_depth > 0) retval = -EAGAIN; else if (!pm_children_suspended(dev)) retval = -EBUSY; + + /* Pending resume requests take precedence over suspends. */ + else if ((dev->power.deferred_resume + && dev->power.status == RPM_SUSPENDING) + || (dev->power.request_pending + && dev->power.request == RPM_REQ_RESUME)) + retval = -EAGAIN; + else if (dev->power.runtime_status == RPM_SUSPENDED) + retval = 1; + + return retval; +} + +/** + * rpm_idle - Notify device bus type if the device can be suspended. + * @dev: Device to notify the bus type about. + * @rpmflags: Flag bits. + * + * Check if the device's run-time PM status allows it to be suspended. If + * another idle notification has been started earlier, return immediately. If + * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise + * run the ->runtime_idle() callback directly. + * + * This function must be called under dev->power.lock with interrupts disabled. + */ +static int rpm_idle(struct device *dev, int rpmflags) +{ + int (*callback)(struct device *); + int retval; + + retval = rpm_check_suspend_allowed(dev); + if (retval < 0) + ; /* Conditions are wrong. */ + + /* Idle notifications are allowed only in the RPM_ACTIVE state. */ + else if (dev->power.runtime_status != RPM_ACTIVE) + retval = -EAGAIN; + + /* + * Any pending request other than an idle notification takes + * precedence over us, except that the timer may be running. + */ + else if (dev->power.request_pending && + dev->power.request > RPM_REQ_IDLE) + retval = -EAGAIN; + + /* Act as though RPM_NOWAIT is always set. */ + else if (dev->power.idle_notification) + retval = -EINPROGRESS; if (retval) goto out; - if (dev->power.request_pending) { - /* - * If an idle notification request is pending, cancel it. Any - * other pending request takes precedence over us. - */ - if (dev->power.request == RPM_REQ_IDLE) { - dev->power.request = RPM_REQ_NONE; - } else if (dev->power.request != RPM_REQ_NONE) { - retval = -EAGAIN; - goto out; + /* Pending requests need to be canceled. */ + dev->power.request = RPM_REQ_NONE; + + if (dev->power.no_callbacks) { + /* Assume ->runtime_idle() callback would have suspended. */ + retval = rpm_suspend(dev, rpmflags); + goto out; + } + + /* Carry out an asynchronous or a synchronous idle notification. */ + if (rpmflags & RPM_ASYNC) { + dev->power.request = RPM_REQ_IDLE; + if (!dev->power.request_pending) { + dev->power.request_pending = true; + queue_work(pm_wq, &dev->power.work); } + goto out; } dev->power.idle_notification = true; - if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) { - spin_unlock_irq(&dev->power.lock); - - dev->bus->pm->runtime_idle(dev); - - spin_lock_irq(&dev->power.lock); - } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { - spin_unlock_irq(&dev->power.lock); - - dev->type->pm->runtime_idle(dev); + if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) + callback = dev->bus->pm->runtime_idle; + else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) + callback = dev->type->pm->runtime_idle; + else if (dev->class && dev->class->pm) + callback = dev->class->pm->runtime_idle; + else + callback = NULL; - spin_lock_irq(&dev->power.lock); - } else if (dev->class && dev->class->pm - && dev->class->pm->runtime_idle) { + if (callback) { spin_unlock_irq(&dev->power.lock); - dev->class->pm->runtime_idle(dev); + callback(dev); spin_lock_irq(&dev->power.lock); } @@ -108,113 +238,99 @@ static int __pm_runtime_idle(struct device *dev) } /** - * pm_runtime_idle - Notify device bus type if the device can be suspended. - * @dev: Device to notify the bus type about. + * rpm_callback - Run a given runtime PM callback for a given device. + * @cb: Runtime PM callback to run. + * @dev: Device to run the callback for. */ -int pm_runtime_idle(struct device *dev) +static int rpm_callback(int (*cb)(struct device *), struct device *dev) + __releases(&dev->power.lock) __acquires(&dev->power.lock) { int retval; - spin_lock_irq(&dev->power.lock); - retval = __pm_runtime_idle(dev); - spin_unlock_irq(&dev->power.lock); + if (!cb) + return -ENOSYS; - return retval; -} -EXPORT_SYMBOL_GPL(pm_runtime_idle); - - -/** - * update_pm_runtime_accounting - Update the time accounting of power states - * @dev: Device to update the accounting for - * - * In order to be able to have time accounting of the various power states - * (as used by programs such as PowerTOP to show the effectiveness of runtime - * PM), we need to track the time spent in each state. - * update_pm_runtime_accounting must be called each time before the - * runtime_status field is updated, to account the time in the old state - * correctly. - */ -void update_pm_runtime_accounting(struct device *dev) -{ - unsigned long now = jiffies; - int delta; - - delta = now - dev->power.accounting_timestamp; - - if (delta < 0) - delta = 0; + spin_unlock_irq(&dev->power.lock); - dev->power.accounting_timestamp = now; + retval = cb(dev); - if (dev->power.disable_depth > 0) - return; - - if (dev->power.runtime_status == RPM_SUSPENDED) - dev->power.suspended_jiffies += delta; - else - dev->power.active_jiffies += delta; -} + spin_lock_irq(&dev->power.lock); + dev->power.runtime_error = retval; -static void __update_runtime_status(struct device *dev, enum rpm_status status) -{ - update_pm_runtime_accounting(dev); - dev->power.runtime_status = status; + return retval; } /** - * __pm_runtime_suspend - Carry out run-time suspend of given device. + * rpm_suspend - Carry out run-time suspend of given device. * @dev: Device to suspend. - * @from_wq: If set, the function has been called via pm_wq. + * @rpmflags: Flag bits. * - * Check if the device can be suspended and run the ->runtime_suspend() callback - * provided by its bus type. If another suspend has been started earlier, wait - * for it to finish. If an idle notification or suspend request is pending or - * scheduled, cancel it. + * Check if the device's run-time PM status allows it to be suspended. If + * another suspend has been started earlier, either return immediately or wait + * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a + * pending idle notification. If the RPM_ASYNC flag is set then queue a + * suspend request; otherwise run the ->runtime_suspend() callback directly. + * If a deferred resume was requested while the callback was running then carry + * it out; otherwise send an idle notification for the device (if the suspend + * failed) or for its parent (if the suspend succeeded). * * This function must be called under dev->power.lock with interrupts disabled. */ -int __pm_runtime_suspend(struct device *dev, bool from_wq) +static int rpm_suspend(struct device *dev, int rpmflags) __releases(&dev->power.lock) __acquires(&dev->power.lock) { + int (*callback)(struct device *); struct device *parent = NULL; - bool notify = false; - int retval = 0; + int retval; - dev_dbg(dev, "__pm_runtime_suspend()%s!\n", - from_wq ? " from workqueue" : ""); + dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); repeat: - if (dev->power.runtime_error) { - retval = -EINVAL; - goto out; - } + retval = rpm_check_suspend_allowed(dev); - /* Pending resume requests take precedence over us. */ - if (dev->power.request_pending - && dev->power.request == RPM_REQ_RESUME) { + if (retval < 0) + ; /* Conditions are wrong. */ + + /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ + else if (dev->power.runtime_status == RPM_RESUMING && + !(rpmflags & RPM_ASYNC)) retval = -EAGAIN; + if (retval) goto out; + + /* If the autosuspend_delay time hasn't expired yet, reschedule. */ + if ((rpmflags & RPM_AUTO) + && dev->power.runtime_status != RPM_SUSPENDING) { + unsigned long expires = pm_runtime_autosuspend_expiration(dev); + + if (expires != 0) { + /* Pending requests need to be canceled. */ + dev->power.request = RPM_REQ_NONE; + + /* + * Optimization: If the timer is already running and is + * set to expire at or before the autosuspend delay, + * avoid the overhead of resetting it. Just let it + * expire; pm_suspend_timer_fn() will take care of the + * rest. + */ + if (!(dev->power.timer_expires && time_before_eq( + dev->power.timer_expires, expires))) { + dev->power.timer_expires = expires; + mod_timer(&dev->power.suspend_timer, expires); + } + dev->power.timer_autosuspends = 1; + goto out; + } } /* Other scheduled or pending requests need to be canceled. */ pm_runtime_cancel_pending(dev); - if (dev->power.runtime_status == RPM_SUSPENDED) - retval = 1; - else if (dev->power.runtime_status == RPM_RESUMING - || dev->power.disable_depth > 0 - || atomic_read(&dev->power.usage_count) > 0) - retval = -EAGAIN; - else if (!pm_children_suspended(dev)) - retval = -EBUSY; - if (retval) - goto out; - if (dev->power.runtime_status == RPM_SUSPENDING) { DEFINE_WAIT(wait); - if (from_wq) { + if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { retval = -EINPROGRESS; goto out; } @@ -236,46 +352,42 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) goto repeat; } - __update_runtime_status(dev, RPM_SUSPENDING); dev->power.deferred_resume = false; + if (dev->power.no_callbacks) + goto no_callback; /* Assume success. */ + + /* Carry out an asynchronous or a synchronous suspend. */ + if (rpmflags & RPM_ASYNC) { + dev->power.request = (rpmflags & RPM_AUTO) ? + RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; + if (!dev->power.request_pending) { + dev->power.request_pending = true; + queue_work(pm_wq, &dev->power.work); + } + goto out; + } - if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { - spin_unlock_irq(&dev->power.lock); - - retval = dev->bus->pm->runtime_suspend(dev); - - spin_lock_irq(&dev->power.lock); - dev->power.runtime_error = retval; - } else if (dev->type && dev->type->pm - && dev->type->pm->runtime_suspend) { - spin_unlock_irq(&dev->power.lock); - - retval = dev->type->pm->runtime_suspend(dev); - - spin_lock_irq(&dev->power.lock); - dev->power.runtime_error = retval; - } else if (dev->class && dev->class->pm - && dev->class->pm->runtime_suspend) { - spin_unlock_irq(&dev->power.lock); - - retval = dev->class->pm->runtime_suspend(dev); + __update_runtime_status(dev, RPM_SUSPENDING); - spin_lock_irq(&dev->power.lock); - dev->power.runtime_error = retval; - } else { - retval = -ENOSYS; - } + if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) + callback = dev->bus->pm->runtime_suspend; + else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend) + callback = dev->type->pm->runtime_suspend; + else if (dev->class && dev->class->pm) + callback = dev->class->pm->runtime_suspend; + else + callback = NULL; + retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_ACTIVE); - if (retval == -EAGAIN || retval == -EBUSY) { - if (dev->power.timer_expires == 0) - notify = true; + dev->power.deferred_resume = 0; + if (retval == -EAGAIN || retval == -EBUSY) dev->power.runtime_error = 0; - } else { + else pm_runtime_cancel_pending(dev); - } } else { + no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -287,14 +399,11 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) wake_up_all(&dev->power.wait_queue); if (dev->power.deferred_resume) { - __pm_runtime_resume(dev, false); + rpm_resume(dev, 0); retval = -EAGAIN; goto out; } - if (notify) - __pm_runtime_idle(dev); - if (parent && !parent->power.ignore_children) { spin_unlock_irq(&dev->power.lock); @@ -304,72 +413,69 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) } out: - dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); - - return retval; -} - -/** - * pm_runtime_suspend - Carry out run-time suspend of given device. - * @dev: Device to suspend. - */ -int pm_runtime_suspend(struct device *dev) -{ - int retval; - - spin_lock_irq(&dev->power.lock); - retval = __pm_runtime_suspend(dev, false); - spin_unlock_irq(&dev->power.lock); + dev_dbg(dev, "%s returns %d\n", __func__, retval); return retval; } -EXPORT_SYMBOL_GPL(pm_runtime_suspend); /** - * __pm_runtime_resume - Carry out run-time resume of given device. + * rpm_resume - Carry out run-time resume of given device. * @dev: Device to resume. - * @from_wq: If set, the function has been called via pm_wq. + * @rpmflags: Flag bits. * - * Check if the device can be woken up and run the ->runtime_resume() callback - * provided by its bus type. If another resume has been started earlier, wait - * for it to finish. If there's a suspend running in parallel with this - * function, wait for it to finish and resume the device. Cancel any scheduled - * or pending requests. + * Check if the device's run-time PM status allows it to be resumed. Cancel + * any scheduled or pending requests. If another resume has been started + * earlier, either return imediately or wait for it to finish, depending on the + * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in + * parallel with this function, either tell the other process to resume after + * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC + * flag is set then queue a resume request; otherwise run the + * ->runtime_resume() callback directly. Queue an idle notification for the + * device if the resume succeeded. * * This function must be called under dev->power.lock with interrupts disabled. */ -int __pm_runtime_resume(struct device *dev, bool from_wq) +static int rpm_resume(struct device *dev, int rpmflags) __releases(&dev->power.lock) __acquires(&dev->power.lock) { + int (*callback)(struct device *); struct device *parent = NULL; int retval = 0; - dev_dbg(dev, "__pm_runtime_resume()%s!\n", - from_wq ? " from workqueue" : ""); + dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); repeat: - if (dev->power.runtime_error) { + if (dev->power.runtime_error) retval = -EINVAL; + else if (dev->power.disable_depth > 0) + retval = -EAGAIN; + if (retval) goto out; - } - pm_runtime_cancel_pending(dev); + /* + * Other scheduled or pending requests need to be canceled. Small + * optimization: If an autosuspend timer is running, leave it running + * rather than cancelling it now only to restart it again in the near + * future. + */ + dev->power.request = RPM_REQ_NONE; + if (!dev->power.timer_autosuspends) + pm_runtime_deactivate_timer(dev); - if (dev->power.runtime_status == RPM_ACTIVE) + if (dev->power.runtime_status == RPM_ACTIVE) { retval = 1; - else if (dev->power.disable_depth > 0) - retval = -EAGAIN; - if (retval) goto out; + } if (dev->power.runtime_status == RPM_RESUMING || dev->power.runtime_status == RPM_SUSPENDING) { DEFINE_WAIT(wait); - if (from_wq) { + if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { if (dev->power.runtime_status == RPM_SUSPENDING) dev->power.deferred_resume = true; - retval = -EINPROGRESS; + else + retval = -EINPROGRESS; goto out; } @@ -391,6 +497,34 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) goto repeat; } + /* + * See if we can skip waking up the parent. This is safe only if + * power.no_callbacks is set, because otherwise we don't know whether + * the resume will actually succeed. + */ + if (dev->power.no_callbacks && !parent && dev->parent) { + spin_lock(&dev->parent->power.lock); + if (dev->parent->power.disable_depth > 0 + || dev->parent->power.ignore_children + || dev->parent->power.runtime_status == RPM_ACTIVE) { + atomic_inc(&dev->parent->power.child_count); + spin_unlock(&dev->parent->power.lock); + goto no_callback; /* Assume success. */ + } + spin_unlock(&dev->parent->power.lock); + } + + /* Carry out an asynchronous or a synchronous resume. */ + if (rpmflags & RPM_ASYNC) { + dev->power.request = RPM_REQ_RESUME; + if (!dev->power.request_pending) { + dev->power.request_pending = true; + queue_work(pm_wq, &dev->power.work); + } + retval = 0; + goto out; + } + if (!parent && dev->parent) { /* * Increment the parent's resume counter and resume it if @@ -408,7 +542,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) */ if (!parent->power.disable_depth && !parent->power.ignore_children) { - __pm_runtime_resume(parent, false); + rpm_resume(parent, 0); if (parent->power.runtime_status != RPM_ACTIVE) retval = -EBUSY; } @@ -420,39 +554,26 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) goto repeat; } - __update_runtime_status(dev, RPM_RESUMING); - - if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { - spin_unlock_irq(&dev->power.lock); - - retval = dev->bus->pm->runtime_resume(dev); - - spin_lock_irq(&dev->power.lock); - dev->power.runtime_error = retval; - } else if (dev->type && dev->type->pm - && dev->type->pm->runtime_resume) { - spin_unlock_irq(&dev->power.lock); - - retval = dev->type->pm->runtime_resume(dev); + if (dev->power.no_callbacks) + goto no_callback; /* Assume success. */ - spin_lock_irq(&dev->power.lock); - dev->power.runtime_error = retval; - } else if (dev->class && dev->class->pm - && dev->class->pm->runtime_resume) { - spin_unlock_irq(&dev->power.lock); - - retval = dev->class->pm->runtime_resume(dev); + __update_runtime_status(dev, RPM_RESUMING); - spin_lock_irq(&dev->power.lock); - dev->power.runtime_error = retval; - } else { - retval = -ENOSYS; - } + if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) + callback = dev->bus->pm->runtime_resume; + else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume) + callback = dev->type->pm->runtime_resume; + else if (dev->class && dev->class->pm) + callback = dev->class->pm->runtime_resume; + else + callback = NULL; + retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_cancel_pending(dev); } else { + no_callback: __update_runtime_status(dev, RPM_ACTIVE); if (parent) atomic_inc(&parent->power.child_count); @@ -460,7 +581,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) wake_up_all(&dev->power.wait_queue); if (!retval) - __pm_request_idle(dev); + rpm_idle(dev, RPM_ASYNC); out: if (parent) { @@ -471,28 +592,12 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) spin_lock_irq(&dev->power.lock); } - dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); + dev_dbg(dev, "%s returns %d\n", __func__, retval); return retval; } /** - * pm_runtime_resume - Carry out run-time resume of given device. - * @dev: Device to suspend. - */ -int pm_runtime_resume(struct device *dev) -{ - int retval; - - spin_lock_irq(&dev->power.lock); - retval = __pm_runtime_resume(dev, false); - spin_unlock_irq(&dev->power.lock); - - return retval; -} -EXPORT_SYMBOL_GPL(pm_runtime_resume); - -/** * pm_runtime_work - Universal run-time PM work function. * @work: Work structure used for scheduling the execution of this function. * @@ -517,13 +622,16 @@ static void pm_runtime_work(struct work_struct *work) case RPM_REQ_NONE: break; case RPM_REQ_IDLE: - __pm_runtime_idle(dev); + rpm_idle(dev, RPM_NOWAIT); break; case RPM_REQ_SUSPEND: - __pm_runtime_suspend(dev, true); + rpm_suspend(dev, RPM_NOWAIT); + break; + case RPM_REQ_AUTOSUSPEND: + rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); break; case RPM_REQ_RESUME: - __pm_runtime_resume(dev, true); + rpm_resume(dev, RPM_NOWAIT); break; } @@ -532,117 +640,10 @@ static void pm_runtime_work(struct work_struct *work) } /** - * __pm_request_idle - Submit an idle notification request for given device. - * @dev: Device to handle. - * - * Check if the device's run-time PM status is correct for suspending the device - * and queue up a request to run __pm_runtime_idle() for it. - * - * This function must be called under dev->power.lock with interrupts disabled. - */ -static int __pm_request_idle(struct device *dev) -{ - int retval = 0; - - if (dev->power.runtime_error) - retval = -EINVAL; - else if (atomic_read(&dev->power.usage_count) > 0 - || dev->power.disable_depth > 0 - || dev->power.runtime_status == RPM_SUSPENDED - || dev->power.runtime_status == RPM_SUSPENDING) - retval = -EAGAIN; - else if (!pm_children_suspended(dev)) - retval = -EBUSY; - if (retval) - return retval; - - if (dev->power.request_pending) { - /* Any requests other then RPM_REQ_IDLE take precedence. */ - if (dev->power.request == RPM_REQ_NONE) - dev->power.request = RPM_REQ_IDLE; - else if (dev->power.request != RPM_REQ_IDLE) - retval = -EAGAIN; - return retval; - } - - dev->power.request = RPM_REQ_IDLE; - dev->power.request_pending = true; - queue_work(pm_wq, &dev->power.work); - - return retval; -} - -/** - * pm_request_idle - Submit an idle notification request for given device. - * @dev: Device to handle. - */ -int pm_request_idle(struct device *dev) -{ - unsigned long flags; - int retval; - - spin_lock_irqsave(&dev->power.lock, flags); - retval = __pm_request_idle(dev); - spin_unlock_irqrestore(&dev->power.lock, flags); - - return retval; -} -EXPORT_SYMBOL_GPL(pm_request_idle); - -/** - * __pm_request_suspend - Submit a suspend request for given device. - * @dev: Device to suspend. - * - * This function must be called under dev->power.lock with interrupts disabled. - */ -static int __pm_request_suspend(struct device *dev) -{ - int retval = 0; - - if (dev->power.runtime_error) - return -EINVAL; - - if (dev->power.runtime_status == RPM_SUSPENDED) - retval = 1; - else if (atomic_read(&dev->power.usage_count) > 0 - || dev->power.disable_depth > 0) - retval = -EAGAIN; - else if (dev->power.runtime_status == RPM_SUSPENDING) - retval = -EINPROGRESS; - else if (!pm_children_suspended(dev)) - retval = -EBUSY; - if (retval < 0) - return retval; - - pm_runtime_deactivate_timer(dev); - - if (dev->power.request_pending) { - /* - * Pending resume requests take precedence over us, but we can - * overtake any other pending request. - */ - if (dev->power.request == RPM_REQ_RESUME) - retval = -EAGAIN; - else if (dev->power.request != RPM_REQ_SUSPEND) - dev->power.request = retval ? - RPM_REQ_NONE : RPM_REQ_SUSPEND; - return retval; - } else if (retval) { - return retval; - } - - dev->power.request = RPM_REQ_SUSPEND; - dev->power.request_pending = true; - queue_work(pm_wq, &dev->power.work); - - return 0; -} - -/** * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). * @data: Device pointer passed by pm_schedule_suspend(). * - * Check if the time is right and execute __pm_request_suspend() in that case. + * Check if the time is right and queue a suspend request. */ static void pm_suspend_timer_fn(unsigned long data) { @@ -656,7 +657,8 @@ static void pm_suspend_timer_fn(unsigned long data) /* If 'expire' is after 'jiffies' we've been called too early. */ if (expires > 0 && !time_after(expires, jiffies)) { dev->power.timer_expires = 0; - __pm_request_suspend(dev); + rpm_suspend(dev, dev->power.timer_autosuspends ? + (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); } spin_unlock_irqrestore(&dev->power.lock, flags); @@ -670,47 +672,25 @@ static void pm_suspend_timer_fn(unsigned long data) int pm_schedule_suspend(struct device *dev, unsigned int delay) { unsigned long flags; - int retval = 0; + int retval; spin_lock_irqsave(&dev->power.lock, flags); - if (dev->power.runtime_error) { - retval = -EINVAL; - goto out; - } - if (!delay) { - retval = __pm_request_suspend(dev); + retval = rpm_suspend(dev, RPM_ASYNC); goto out; } - pm_runtime_deactivate_timer(dev); - - if (dev->power.request_pending) { - /* - * Pending resume requests take precedence over us, but any - * other pending requests have to be canceled. - */ - if (dev->power.request == RPM_REQ_RESUME) { - retval = -EAGAIN; - goto out; - } - dev->power.request = RPM_REQ_NONE; - } - - if (dev->power.runtime_status == RPM_SUSPENDED) - retval = 1; - else if (atomic_read(&dev->power.usage_count) > 0 - || dev->power.disable_depth > 0) - retval = -EAGAIN; - else if (!pm_children_suspended(dev)) - retval = -EBUSY; + retval = rpm_check_suspend_allowed(dev); if (retval) goto out; + /* Other scheduled or pending requests need to be canceled. */ + pm_runtime_cancel_pending(dev); + dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); - if (!dev->power.timer_expires) - dev->power.timer_expires = 1; + dev->power.timer_expires += !dev->power.timer_expires; + dev->power.timer_autosuspends = 0; mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); out: @@ -721,103 +701,88 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) EXPORT_SYMBOL_GPL(pm_schedule_suspend); /** - * pm_request_resume - Submit a resume request for given device. - * @dev: Device to resume. + * __pm_runtime_idle - Entry point for run-time idle operations. + * @dev: Device to send idle notification for. + * @rpmflags: Flag bits. * - * This function must be called under dev->power.lock with interrupts disabled. + * If the RPM_GET_PUT flag is set, decrement the device's usage count and + * return immediately if it is larger than zero. Then carry out an idle + * notification, either synchronous or asynchronous. + * + * This routine may be called in atomic context if the RPM_ASYNC flag is set. */ -static int __pm_request_resume(struct device *dev) +int __pm_runtime_idle(struct device *dev, int rpmflags) { - int retval = 0; - - if (dev->power.runtime_error) - return -EINVAL; - - if (dev->power.runtime_status == RPM_ACTIVE) - retval = 1; - else if (dev->power.runtime_status == RPM_RESUMING) - retval = -EINPROGRESS; - else if (dev->power.disable_depth > 0) - retval = -EAGAIN; - if (retval < 0) - return retval; - - pm_runtime_deactivate_timer(dev); + unsigned long flags; + int retval; - if (dev->power.runtime_status == RPM_SUSPENDING) { - dev->power.deferred_resume = true; - return retval; + if (rpmflags & RPM_GET_PUT) { + if (!atomic_dec_and_test(&dev->power.usage_count)) + return 0; } - if (dev->power.request_pending) { - /* If non-resume request is pending, we can overtake it. */ - dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; - return retval; - } - if (retval) - return retval; - dev->power.request = RPM_REQ_RESUME; - dev->power.request_pending = true; - queue_work(pm_wq, &dev->power.work); + spin_lock_irqsave(&dev->power.lock, flags); + retval = rpm_idle(dev, rpmflags); + spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } +EXPORT_SYMBOL_GPL(__pm_runtime_idle); /** - * pm_request_resume - Submit a resume request for given device. - * @dev: Device to resume. + * __pm_runtime_suspend - Entry point for run-time put/suspend operations. + * @dev: Device to suspend. + * @rpmflags: Flag bits. + * + * If the RPM_GET_PUT flag is set, decrement the device's usage count and + * return immediately if it is larger than zero. Then carry out a suspend, + * either synchronous or asynchronous. + * + * This routine may be called in atomic context if the RPM_ASYNC flag is set. */ -int pm_request_resume(struct device *dev) +int __pm_runtime_suspend(struct device *dev, int rpmflags) { unsigned long flags; int retval; + if (rpmflags & RPM_GET_PUT) { + if (!atomic_dec_and_test(&dev->power.usage_count)) + return 0; + } + spin_lock_irqsave(&dev->power.lock, flags); - retval = __pm_request_resume(dev); + retval = rpm_suspend(dev, rpmflags); spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } -EXPORT_SYMBOL_GPL(pm_request_resume); +EXPORT_SYMBOL_GPL(__pm_runtime_suspend); /** - * __pm_runtime_get - Reference count a device and wake it up, if necessary. - * @dev: Device to handle. - * @sync: If set and the device is suspended, resume it synchronously. + * __pm_runtime_resume - Entry point for run-time resume operations. + * @dev: Device to resume. + * @rpmflags: Flag bits. + * + * If the RPM_GET_PUT flag is set, increment the device's usage count. Then + * carry out a resume, either synchronous or asynchronous. * - * Increment the usage count of the device and resume it or submit a resume - * request for it, depending on the value of @sync. + * This routine may be called in atomic context if the RPM_ASYNC flag is set. */ -int __pm_runtime_get(struct device *dev, bool sync) +int __pm_runtime_resume(struct device *dev, int rpmflags) { + unsigned long flags; int retval; - atomic_inc(&dev->power.usage_count); - retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); + if (rpmflags & RPM_GET_PUT) + atomic_inc(&dev->power.usage_count); - return retval; -} -EXPORT_SYMBOL_GPL(__pm_runtime_get); - -/** - * __pm_runtime_put - Decrement the device's usage counter and notify its bus. - * @dev: Device to handle. - * @sync: If the device's bus type is to be notified, do that synchronously. - * - * Decrement the usage count of the device and if it reaches zero, carry out a - * synchronous idle notification or submit an idle notification request for it, - * depending on the value of @sync. - */ -int __pm_runtime_put(struct device *dev, bool sync) -{ - int retval = 0; - - if (atomic_dec_and_test(&dev->power.usage_count)) - retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); + spin_lock_irqsave(&dev->power.lock, flags); + retval = rpm_resume(dev, rpmflags); + spin_unlock_irqrestore(&dev->power.lock, flags); return retval; } -EXPORT_SYMBOL_GPL(__pm_runtime_put); +EXPORT_SYMBOL_GPL(__pm_runtime_resume); /** * __pm_runtime_set_status - Set run-time PM status of a device. @@ -968,7 +933,7 @@ int pm_runtime_barrier(struct device *dev) if (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME) { - __pm_runtime_resume(dev, false); + rpm_resume(dev, 0); retval = 1; } @@ -1017,7 +982,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) */ pm_runtime_get_noresume(dev); - __pm_runtime_resume(dev, false); + rpm_resume(dev, 0); pm_runtime_put_noidle(dev); } @@ -1065,7 +1030,7 @@ void pm_runtime_forbid(struct device *dev) dev->power.runtime_auto = false; atomic_inc(&dev->power.usage_count); - __pm_runtime_resume(dev, false); + rpm_resume(dev, 0); out: spin_unlock_irq(&dev->power.lock); @@ -1086,7 +1051,7 @@ void pm_runtime_allow(struct device *dev) dev->power.runtime_auto = true; if (atomic_dec_and_test(&dev->power.usage_count)) - __pm_runtime_idle(dev); + rpm_idle(dev, RPM_AUTO); out: spin_unlock_irq(&dev->power.lock); @@ -1094,13 +1059,110 @@ void pm_runtime_allow(struct device *dev) EXPORT_SYMBOL_GPL(pm_runtime_allow); /** + * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device. + * @dev: Device to handle. + * + * Set the power.no_callbacks flag, which tells the PM core that this + * device is power-managed through its parent and has no run-time PM + * callbacks of its own. The run-time sysfs attributes will be removed. + * + */ +void pm_runtime_no_callbacks(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + dev->power.no_callbacks = 1; + spin_unlock_irq(&dev->power.lock); + if (device_is_registered(dev)) + rpm_sysfs_remove(dev); +} +EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); + +/** + * update_autosuspend - Handle a change to a device's autosuspend settings. + * @dev: Device to handle. + * @old_delay: The former autosuspend_delay value. + * @old_use: The former use_autosuspend value. + * + * Prevent runtime suspend if the new delay is negative and use_autosuspend is + * set; otherwise allow it. Send an idle notification if suspends are allowed. + * + * This function must be called under dev->power.lock with interrupts disabled. + */ +static void update_autosuspend(struct device *dev, int old_delay, int old_use) +{ + int delay = dev->power.autosuspend_delay; + + /* Should runtime suspend be prevented now? */ + if (dev->power.use_autosuspend && delay < 0) { + + /* If it used to be allowed then prevent it. */ + if (!old_use || old_delay >= 0) { + atomic_inc(&dev->power.usage_count); + rpm_resume(dev, 0); + } + } + + /* Runtime suspend should be allowed now. */ + else { + + /* If it used to be prevented then allow it. */ + if (old_use && old_delay < 0) + atomic_dec(&dev->power.usage_count); + + /* Maybe we can autosuspend now. */ + rpm_idle(dev, RPM_AUTO); + } +} + +/** + * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. + * @dev: Device to handle. + * @delay: Value of the new delay in milliseconds. + * + * Set the device's power.autosuspend_delay value. If it changes to negative + * and the power.use_autosuspend flag is set, prevent run-time suspends. If it + * changes the other way, allow run-time suspends. + */ +void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) +{ + int old_delay, old_use; + + spin_lock_irq(&dev->power.lock); + old_delay = dev->power.autosuspend_delay; + old_use = dev->power.use_autosuspend; + dev->power.autosuspend_delay = delay; + update_autosuspend(dev, old_delay, old_use); + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); + +/** + * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. + * @dev: Device to handle. + * @use: New value for use_autosuspend. + * + * Set the device's power.use_autosuspend flag, and allow or prevent run-time + * suspends as needed. + */ +void __pm_runtime_use_autosuspend(struct device *dev, bool use) +{ + int old_delay, old_use; + + spin_lock_irq(&dev->power.lock); + old_delay = dev->power.autosuspend_delay; + old_use = dev->power.use_autosuspend; + dev->power.use_autosuspend = use; + update_autosuspend(dev, old_delay, old_use); + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); + +/** * pm_runtime_init - Initialize run-time PM fields in given device object. * @dev: Device object to initialize. */ void pm_runtime_init(struct device *dev) { - spin_lock_init(&dev->power.lock); - dev->power.runtime_status = RPM_SUSPENDED; dev->power.idle_notification = false; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index e56b4388fe6..0b1e46bf3e5 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -75,12 +75,27 @@ * attribute is set to "enabled" by bus type code or device drivers and in * that cases it should be safe to leave the default value. * + * autosuspend_delay_ms - Report/change a device's autosuspend_delay value + * + * Some drivers don't want to carry out a runtime suspend as soon as a + * device becomes idle; they want it always to remain idle for some period + * of time before suspending it. This period is the autosuspend_delay + * value (expressed in milliseconds) and it can be controlled by the user. + * If the value is negative then the device will never be runtime + * suspended. + * + * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay + * value are used only if the driver calls pm_runtime_use_autosuspend(). + * * wakeup_count - Report the number of wakeup events related to the device */ static const char enabled[] = "enabled"; static const char disabled[] = "disabled"; +const char power_group_name[] = "power"; +EXPORT_SYMBOL_GPL(power_group_name); + #ifdef CONFIG_PM_RUNTIME static const char ctrl_auto[] = "auto"; static const char ctrl_on[] = "on"; @@ -170,6 +185,33 @@ static ssize_t rtpm_status_show(struct device *dev, } static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); + +static ssize_t autosuspend_delay_ms_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + if (!dev->power.use_autosuspend) + return -EIO; + return sprintf(buf, "%d\n", dev->power.autosuspend_delay); +} + +static ssize_t autosuspend_delay_ms_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t n) +{ + long delay; + + if (!dev->power.use_autosuspend) + return -EIO; + + if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay) + return -EINVAL; + + pm_runtime_set_autosuspend_delay(dev, delay); + return n; +} + +static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, + autosuspend_delay_ms_store); + #endif static ssize_t @@ -210,11 +252,122 @@ static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); static ssize_t wakeup_count_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%lu\n", dev->power.wakeup_count); + unsigned long count = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->event_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); } static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); -#endif + +static ssize_t wakeup_active_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long count = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->active_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); + +static ssize_t wakeup_hit_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned long count = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + count = dev->power.wakeup->hit_count; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL); + +static ssize_t wakeup_active_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + unsigned int active = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + active = dev->power.wakeup->active; + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL); + +static ssize_t wakeup_total_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + s64 msec = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->total_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL); + +static ssize_t wakeup_max_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + s64 msec = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->max_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL); + +static ssize_t wakeup_last_time_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + s64 msec = 0; + bool enabled = false; + + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + msec = ktime_to_ms(dev->power.wakeup->last_time); + enabled = true; + } + spin_unlock_irq(&dev->power.lock); + return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); +} + +static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); +#endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_ADVANCED_DEBUG #ifdef CONFIG_PM_RUNTIME @@ -279,19 +432,20 @@ static DEVICE_ATTR(async, 0644, async_show, async_store); #endif /* CONFIG_PM_ADVANCED_DEBUG */ static struct attribute * power_attrs[] = { -#ifdef CONFIG_PM_RUNTIME - &dev_attr_control.attr, - &dev_attr_runtime_status.attr, - &dev_attr_runtime_suspended_time.attr, - &dev_attr_runtime_active_time.attr, -#endif &dev_attr_wakeup.attr, #ifdef CONFIG_PM_SLEEP &dev_attr_wakeup_count.attr, + &dev_attr_wakeup_active_count.attr, + &dev_attr_wakeup_hit_count.attr, + &dev_attr_wakeup_active.attr, + &dev_attr_wakeup_total_time_ms.attr, + &dev_attr_wakeup_max_time_ms.attr, + &dev_attr_wakeup_last_time_ms.attr, #endif #ifdef CONFIG_PM_ADVANCED_DEBUG &dev_attr_async.attr, #ifdef CONFIG_PM_RUNTIME + &dev_attr_runtime_status.attr, &dev_attr_runtime_usage.attr, &dev_attr_runtime_active_kids.attr, &dev_attr_runtime_enabled.attr, @@ -300,10 +454,53 @@ static struct attribute * power_attrs[] = { NULL, }; static struct attribute_group pm_attr_group = { - .name = "power", + .name = power_group_name, .attrs = power_attrs, }; +#ifdef CONFIG_PM_RUNTIME + +static struct attribute *runtime_attrs[] = { +#ifndef CONFIG_PM_ADVANCED_DEBUG + &dev_attr_runtime_status.attr, +#endif + &dev_attr_control.attr, + &dev_attr_runtime_suspended_time.attr, + &dev_attr_runtime_active_time.attr, + &dev_attr_autosuspend_delay_ms.attr, + NULL, +}; +static struct attribute_group pm_runtime_attr_group = { + .name = power_group_name, + .attrs = runtime_attrs, +}; + +int dpm_sysfs_add(struct device *dev) +{ + int rc; + + rc = sysfs_create_group(&dev->kobj, &pm_attr_group); + if (rc == 0 && !dev->power.no_callbacks) { + rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); + if (rc) + sysfs_remove_group(&dev->kobj, &pm_attr_group); + } + return rc; +} + +void rpm_sysfs_remove(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); +} + +void dpm_sysfs_remove(struct device *dev) +{ + rpm_sysfs_remove(dev); + sysfs_remove_group(&dev->kobj, &pm_attr_group); +} + +#else /* CONFIG_PM_RUNTIME */ + int dpm_sysfs_add(struct device * dev) { return sysfs_create_group(&dev->kobj, &pm_attr_group); @@ -313,3 +510,5 @@ void dpm_sysfs_remove(struct device * dev) { sysfs_remove_group(&dev->kobj, &pm_attr_group); } + +#endif diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 0a1a2c4dbc6..9f4258df4cf 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c @@ -188,8 +188,10 @@ static int show_file_hash(unsigned int value) static int show_dev_hash(unsigned int value) { int match = 0; - struct list_head *entry = dpm_list.prev; + struct list_head *entry; + device_pm_lock(); + entry = dpm_list.prev; while (entry != &dpm_list) { struct device * dev = to_device(entry); unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); @@ -199,11 +201,43 @@ static int show_dev_hash(unsigned int value) } entry = entry->prev; } + device_pm_unlock(); return match; } static unsigned int hash_value_early_read; +int show_trace_dev_match(char *buf, size_t size) +{ + unsigned int value = hash_value_early_read / (USERHASH * FILEHASH); + int ret = 0; + struct list_head *entry; + + /* + * It's possible that multiple devices will match the hash and we can't + * tell which is the culprit, so it's best to output them all. + */ + device_pm_lock(); + entry = dpm_list.prev; + while (size && entry != &dpm_list) { + struct device *dev = to_device(entry); + unsigned int hash = hash_string(DEVSEED, dev_name(dev), + DEVHASH); + if (hash == value) { + int len = snprintf(buf, size, "%s\n", + dev_driver_string(dev)); + if (len > size) + len = size; + buf += len; + ret += len; + size -= len; + } + entry = entry->prev; + } + device_pm_unlock(); + return ret; +} + static int early_resume_init(void) { hash_value_early_read = read_magic_time(); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index eb594facfc3..71c5528e1c3 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -11,7 +11,12 @@ #include <linux/sched.h> #include <linux/capability.h> #include <linux/suspend.h> -#include <linux/pm.h> +#include <linux/seq_file.h> +#include <linux/debugfs.h> + +#include "power.h" + +#define TIMEOUT 100 /* * If set, the suspend/hibernate code will abort transitions to a sleep state @@ -20,18 +25,244 @@ bool events_check_enabled; /* The counter of registered wakeup events. */ -static unsigned long event_count; +static atomic_t event_count = ATOMIC_INIT(0); /* A preserved old value of event_count. */ -static unsigned long saved_event_count; +static unsigned int saved_count; /* The counter of wakeup events being processed. */ -static unsigned long events_in_progress; +static atomic_t events_in_progress = ATOMIC_INIT(0); static DEFINE_SPINLOCK(events_lock); static void pm_wakeup_timer_fn(unsigned long data); -static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); -static unsigned long events_timer_expires; +static LIST_HEAD(wakeup_sources); + +/** + * wakeup_source_create - Create a struct wakeup_source object. + * @name: Name of the new wakeup source. + */ +struct wakeup_source *wakeup_source_create(const char *name) +{ + struct wakeup_source *ws; + + ws = kzalloc(sizeof(*ws), GFP_KERNEL); + if (!ws) + return NULL; + + spin_lock_init(&ws->lock); + if (name) + ws->name = kstrdup(name, GFP_KERNEL); + + return ws; +} +EXPORT_SYMBOL_GPL(wakeup_source_create); + +/** + * wakeup_source_destroy - Destroy a struct wakeup_source object. + * @ws: Wakeup source to destroy. + */ +void wakeup_source_destroy(struct wakeup_source *ws) +{ + if (!ws) + return; + + spin_lock_irq(&ws->lock); + while (ws->active) { + spin_unlock_irq(&ws->lock); + + schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); + + spin_lock_irq(&ws->lock); + } + spin_unlock_irq(&ws->lock); + + kfree(ws->name); + kfree(ws); +} +EXPORT_SYMBOL_GPL(wakeup_source_destroy); + +/** + * wakeup_source_add - Add given object to the list of wakeup sources. + * @ws: Wakeup source object to add to the list. + */ +void wakeup_source_add(struct wakeup_source *ws) +{ + if (WARN_ON(!ws)) + return; + + setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); + ws->active = false; + + spin_lock_irq(&events_lock); + list_add_rcu(&ws->entry, &wakeup_sources); + spin_unlock_irq(&events_lock); + synchronize_rcu(); +} +EXPORT_SYMBOL_GPL(wakeup_source_add); + +/** + * wakeup_source_remove - Remove given object from the wakeup sources list. + * @ws: Wakeup source object to remove from the list. + */ +void wakeup_source_remove(struct wakeup_source *ws) +{ + if (WARN_ON(!ws)) + return; + + spin_lock_irq(&events_lock); + list_del_rcu(&ws->entry); + spin_unlock_irq(&events_lock); + synchronize_rcu(); +} +EXPORT_SYMBOL_GPL(wakeup_source_remove); + +/** + * wakeup_source_register - Create wakeup source and add it to the list. + * @name: Name of the wakeup source to register. + */ +struct wakeup_source *wakeup_source_register(const char *name) +{ + struct wakeup_source *ws; + + ws = wakeup_source_create(name); + if (ws) + wakeup_source_add(ws); + + return ws; +} +EXPORT_SYMBOL_GPL(wakeup_source_register); + +/** + * wakeup_source_unregister - Remove wakeup source from the list and remove it. + * @ws: Wakeup source object to unregister. + */ +void wakeup_source_unregister(struct wakeup_source *ws) +{ + wakeup_source_remove(ws); + wakeup_source_destroy(ws); +} +EXPORT_SYMBOL_GPL(wakeup_source_unregister); + +/** + * device_wakeup_attach - Attach a wakeup source object to a device object. + * @dev: Device to handle. + * @ws: Wakeup source object to attach to @dev. + * + * This causes @dev to be treated as a wakeup device. + */ +static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) +{ + spin_lock_irq(&dev->power.lock); + if (dev->power.wakeup) { + spin_unlock_irq(&dev->power.lock); + return -EEXIST; + } + dev->power.wakeup = ws; + spin_unlock_irq(&dev->power.lock); + return 0; +} + +/** + * device_wakeup_enable - Enable given device to be a wakeup source. + * @dev: Device to handle. + * + * Create a wakeup source object, register it and attach it to @dev. + */ +int device_wakeup_enable(struct device *dev) +{ + struct wakeup_source *ws; + int ret; + + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + ws = wakeup_source_register(dev_name(dev)); + if (!ws) + return -ENOMEM; + + ret = device_wakeup_attach(dev, ws); + if (ret) + wakeup_source_unregister(ws); + + return ret; +} +EXPORT_SYMBOL_GPL(device_wakeup_enable); + +/** + * device_wakeup_detach - Detach a device's wakeup source object from it. + * @dev: Device to detach the wakeup source object from. + * + * After it returns, @dev will not be treated as a wakeup device any more. + */ +static struct wakeup_source *device_wakeup_detach(struct device *dev) +{ + struct wakeup_source *ws; + + spin_lock_irq(&dev->power.lock); + ws = dev->power.wakeup; + dev->power.wakeup = NULL; + spin_unlock_irq(&dev->power.lock); + return ws; +} + +/** + * device_wakeup_disable - Do not regard a device as a wakeup source any more. + * @dev: Device to handle. + * + * Detach the @dev's wakeup source object from it, unregister this wakeup source + * object and destroy it. + */ +int device_wakeup_disable(struct device *dev) +{ + struct wakeup_source *ws; + + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + ws = device_wakeup_detach(dev); + if (ws) + wakeup_source_unregister(ws); + + return 0; +} +EXPORT_SYMBOL_GPL(device_wakeup_disable); + +/** + * device_init_wakeup - Device wakeup initialization. + * @dev: Device to handle. + * @enable: Whether or not to enable @dev as a wakeup device. + * + * By default, most devices should leave wakeup disabled. The exceptions are + * devices that everyone expects to be wakeup sources: keyboards, power buttons, + * possibly network interfaces, etc. + */ +int device_init_wakeup(struct device *dev, bool enable) +{ + int ret = 0; + + if (enable) { + device_set_wakeup_capable(dev, true); + ret = device_wakeup_enable(dev); + } else { + device_set_wakeup_capable(dev, false); + } + + return ret; +} +EXPORT_SYMBOL_GPL(device_init_wakeup); + +/** + * device_set_wakeup_enable - Enable or disable a device to wake up the system. + * @dev: Device to handle. + */ +int device_set_wakeup_enable(struct device *dev, bool enable) +{ + if (!dev || !dev->power.can_wakeup) + return -EINVAL; + + return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); +} +EXPORT_SYMBOL_GPL(device_set_wakeup_enable); /* * The functions below use the observation that each wakeup event starts a @@ -55,118 +286,259 @@ static unsigned long events_timer_expires; * knowledge, however, may not be available to it, so it can simply specify time * to wait before the system can be suspended and pass it as the second * argument of pm_wakeup_event(). + * + * It is valid to call pm_relax() after pm_wakeup_event(), in which case the + * "no suspend" period will be ended either by the pm_relax(), or by the timer + * function executed when the timer expires, whichever comes first. */ /** + * wakup_source_activate - Mark given wakeup source as active. + * @ws: Wakeup source to handle. + * + * Update the @ws' statistics and, if @ws has just been activated, notify the PM + * core of the event by incrementing the counter of of wakeup events being + * processed. + */ +static void wakeup_source_activate(struct wakeup_source *ws) +{ + ws->active = true; + ws->active_count++; + ws->timer_expires = jiffies; + ws->last_time = ktime_get(); + + atomic_inc(&events_in_progress); +} + +/** + * __pm_stay_awake - Notify the PM core of a wakeup event. + * @ws: Wakeup source object associated with the source of the event. + * + * It is safe to call this function from interrupt context. + */ +void __pm_stay_awake(struct wakeup_source *ws) +{ + unsigned long flags; + + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + ws->event_count++; + if (!ws->active) + wakeup_source_activate(ws); + spin_unlock_irqrestore(&ws->lock, flags); +} +EXPORT_SYMBOL_GPL(__pm_stay_awake); + +/** * pm_stay_awake - Notify the PM core that a wakeup event is being processed. * @dev: Device the wakeup event is related to. * - * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the - * counter of wakeup events being processed. If @dev is not NULL, the counter - * of wakeup events related to @dev is incremented too. + * Notify the PM core of a wakeup event (signaled by @dev) by calling + * __pm_stay_awake for the @dev's wakeup source object. * * Call this function after detecting of a wakeup event if pm_relax() is going * to be called directly after processing the event (and possibly passing it to * user space for further processing). - * - * It is safe to call this function from interrupt context. */ void pm_stay_awake(struct device *dev) { unsigned long flags; - spin_lock_irqsave(&events_lock, flags); - if (dev) - dev->power.wakeup_count++; + if (!dev) + return; - events_in_progress++; - spin_unlock_irqrestore(&events_lock, flags); + spin_lock_irqsave(&dev->power.lock, flags); + __pm_stay_awake(dev->power.wakeup); + spin_unlock_irqrestore(&dev->power.lock, flags); } +EXPORT_SYMBOL_GPL(pm_stay_awake); /** - * pm_relax - Notify the PM core that processing of a wakeup event has ended. + * wakup_source_deactivate - Mark given wakeup source as inactive. + * @ws: Wakeup source to handle. * - * Notify the PM core that a wakeup event has been processed by decrementing - * the counter of wakeup events being processed and incrementing the counter - * of registered wakeup events. + * Update the @ws' statistics and notify the PM core that the wakeup source has + * become inactive by decrementing the counter of wakeup events being processed + * and incrementing the counter of registered wakeup events. + */ +static void wakeup_source_deactivate(struct wakeup_source *ws) +{ + ktime_t duration; + ktime_t now; + + ws->relax_count++; + /* + * __pm_relax() may be called directly or from a timer function. + * If it is called directly right after the timer function has been + * started, but before the timer function calls __pm_relax(), it is + * possible that __pm_stay_awake() will be called in the meantime and + * will set ws->active. Then, ws->active may be cleared immediately + * by the __pm_relax() called from the timer function, but in such a + * case ws->relax_count will be different from ws->active_count. + */ + if (ws->relax_count != ws->active_count) { + ws->relax_count--; + return; + } + + ws->active = false; + + now = ktime_get(); + duration = ktime_sub(now, ws->last_time); + ws->total_time = ktime_add(ws->total_time, duration); + if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) + ws->max_time = duration; + + del_timer(&ws->timer); + + /* + * event_count has to be incremented before events_in_progress is + * modified, so that the callers of pm_check_wakeup_events() and + * pm_save_wakeup_count() don't see the old value of event_count and + * events_in_progress equal to zero at the same time. + */ + atomic_inc(&event_count); + smp_mb__before_atomic_dec(); + atomic_dec(&events_in_progress); +} + +/** + * __pm_relax - Notify the PM core that processing of a wakeup event has ended. + * @ws: Wakeup source object associated with the source of the event. * * Call this function for wakeup events whose processing started with calling - * pm_stay_awake(). + * __pm_stay_awake(). * * It is safe to call it from interrupt context. */ -void pm_relax(void) +void __pm_relax(struct wakeup_source *ws) { unsigned long flags; - spin_lock_irqsave(&events_lock, flags); - if (events_in_progress) { - events_in_progress--; - event_count++; - } - spin_unlock_irqrestore(&events_lock, flags); + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + if (ws->active) + wakeup_source_deactivate(ws); + spin_unlock_irqrestore(&ws->lock, flags); +} +EXPORT_SYMBOL_GPL(__pm_relax); + +/** + * pm_relax - Notify the PM core that processing of a wakeup event has ended. + * @dev: Device that signaled the event. + * + * Execute __pm_relax() for the @dev's wakeup source object. + */ +void pm_relax(struct device *dev) +{ + unsigned long flags; + + if (!dev) + return; + + spin_lock_irqsave(&dev->power.lock, flags); + __pm_relax(dev->power.wakeup); + spin_unlock_irqrestore(&dev->power.lock, flags); } +EXPORT_SYMBOL_GPL(pm_relax); /** * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. + * @data: Address of the wakeup source object associated with the event source. * - * Decrease the counter of wakeup events being processed after it was increased - * by pm_wakeup_event(). + * Call __pm_relax() for the wakeup source whose address is stored in @data. */ static void pm_wakeup_timer_fn(unsigned long data) { + __pm_relax((struct wakeup_source *)data); +} + +/** + * __pm_wakeup_event - Notify the PM core of a wakeup event. + * @ws: Wakeup source object associated with the event source. + * @msec: Anticipated event processing time (in milliseconds). + * + * Notify the PM core of a wakeup event whose source is @ws that will take + * approximately @msec milliseconds to be processed by the kernel. If @ws is + * not active, activate it. If @msec is nonzero, set up the @ws' timer to + * execute pm_wakeup_timer_fn() in future. + * + * It is safe to call this function from interrupt context. + */ +void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) +{ unsigned long flags; + unsigned long expires; - spin_lock_irqsave(&events_lock, flags); - if (events_timer_expires - && time_before_eq(events_timer_expires, jiffies)) { - events_in_progress--; - events_timer_expires = 0; + if (!ws) + return; + + spin_lock_irqsave(&ws->lock, flags); + + ws->event_count++; + if (!ws->active) + wakeup_source_activate(ws); + + if (!msec) { + wakeup_source_deactivate(ws); + goto unlock; } - spin_unlock_irqrestore(&events_lock, flags); + + expires = jiffies + msecs_to_jiffies(msec); + if (!expires) + expires = 1; + + if (time_after(expires, ws->timer_expires)) { + mod_timer(&ws->timer, expires); + ws->timer_expires = expires; + } + + unlock: + spin_unlock_irqrestore(&ws->lock, flags); } +EXPORT_SYMBOL_GPL(__pm_wakeup_event); + /** * pm_wakeup_event - Notify the PM core of a wakeup event. * @dev: Device the wakeup event is related to. * @msec: Anticipated event processing time (in milliseconds). * - * Notify the PM core of a wakeup event (signaled by @dev) that will take - * approximately @msec milliseconds to be processed by the kernel. Increment - * the counter of registered wakeup events and (if @msec is nonzero) set up - * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the - * timer has not been set up already, increment the counter of wakeup events - * being processed). If @dev is not NULL, the counter of wakeup events related - * to @dev is incremented too. - * - * It is safe to call this function from interrupt context. + * Call __pm_wakeup_event() for the @dev's wakeup source object. */ void pm_wakeup_event(struct device *dev, unsigned int msec) { unsigned long flags; - spin_lock_irqsave(&events_lock, flags); - event_count++; - if (dev) - dev->power.wakeup_count++; - - if (msec) { - unsigned long expires; + if (!dev) + return; - expires = jiffies + msecs_to_jiffies(msec); - if (!expires) - expires = 1; + spin_lock_irqsave(&dev->power.lock, flags); + __pm_wakeup_event(dev->power.wakeup, msec); + spin_unlock_irqrestore(&dev->power.lock, flags); +} +EXPORT_SYMBOL_GPL(pm_wakeup_event); - if (!events_timer_expires - || time_after(expires, events_timer_expires)) { - if (!events_timer_expires) - events_in_progress++; +/** + * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources. + */ +static void pm_wakeup_update_hit_counts(void) +{ + unsigned long flags; + struct wakeup_source *ws; - mod_timer(&events_timer, expires); - events_timer_expires = expires; - } + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + spin_lock_irqsave(&ws->lock, flags); + if (ws->active) + ws->hit_count++; + spin_unlock_irqrestore(&ws->lock, flags); } - spin_unlock_irqrestore(&events_lock, flags); + rcu_read_unlock(); } /** @@ -184,10 +556,13 @@ bool pm_check_wakeup_events(void) spin_lock_irqsave(&events_lock, flags); if (events_check_enabled) { - ret = (event_count == saved_event_count) && !events_in_progress; + ret = ((unsigned int)atomic_read(&event_count) == saved_count) + && !atomic_read(&events_in_progress); events_check_enabled = ret; } spin_unlock_irqrestore(&events_lock, flags); + if (!ret) + pm_wakeup_update_hit_counts(); return ret; } @@ -202,24 +577,20 @@ bool pm_check_wakeup_events(void) * drop down to zero has been interrupted by a signal (and the current number * of wakeup events being processed is still nonzero). Otherwise return true. */ -bool pm_get_wakeup_count(unsigned long *count) +bool pm_get_wakeup_count(unsigned int *count) { bool ret; - spin_lock_irq(&events_lock); if (capable(CAP_SYS_ADMIN)) events_check_enabled = false; - while (events_in_progress && !signal_pending(current)) { - spin_unlock_irq(&events_lock); - - schedule_timeout_interruptible(msecs_to_jiffies(100)); - - spin_lock_irq(&events_lock); + while (atomic_read(&events_in_progress) && !signal_pending(current)) { + pm_wakeup_update_hit_counts(); + schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); } - *count = event_count; - ret = !events_in_progress; - spin_unlock_irq(&events_lock); + + ret = !atomic_read(&events_in_progress); + *count = atomic_read(&event_count); return ret; } @@ -232,16 +603,102 @@ bool pm_get_wakeup_count(unsigned long *count) * old number of registered wakeup events to be used by pm_check_wakeup_events() * and return true. Otherwise return false. */ -bool pm_save_wakeup_count(unsigned long count) +bool pm_save_wakeup_count(unsigned int count) { bool ret = false; spin_lock_irq(&events_lock); - if (count == event_count && !events_in_progress) { - saved_event_count = count; + if (count == (unsigned int)atomic_read(&event_count) + && !atomic_read(&events_in_progress)) { + saved_count = count; events_check_enabled = true; ret = true; } spin_unlock_irq(&events_lock); + if (!ret) + pm_wakeup_update_hit_counts(); + return ret; +} + +static struct dentry *wakeup_sources_stats_dentry; + +/** + * print_wakeup_source_stats - Print wakeup source statistics information. + * @m: seq_file to print the statistics into. + * @ws: Wakeup source object to print the statistics for. + */ +static int print_wakeup_source_stats(struct seq_file *m, + struct wakeup_source *ws) +{ + unsigned long flags; + ktime_t total_time; + ktime_t max_time; + unsigned long active_count; + ktime_t active_time; + int ret; + + spin_lock_irqsave(&ws->lock, flags); + + total_time = ws->total_time; + max_time = ws->max_time; + active_count = ws->active_count; + if (ws->active) { + active_time = ktime_sub(ktime_get(), ws->last_time); + total_time = ktime_add(total_time, active_time); + if (active_time.tv64 > max_time.tv64) + max_time = active_time; + } else { + active_time = ktime_set(0, 0); + } + + ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t" + "%lld\t\t%lld\t\t%lld\t\t%lld\n", + ws->name, active_count, ws->event_count, ws->hit_count, + ktime_to_ms(active_time), ktime_to_ms(total_time), + ktime_to_ms(max_time), ktime_to_ms(ws->last_time)); + + spin_unlock_irqrestore(&ws->lock, flags); + return ret; } + +/** + * wakeup_sources_stats_show - Print wakeup sources statistics information. + * @m: seq_file to print the statistics into. + */ +static int wakeup_sources_stats_show(struct seq_file *m, void *unused) +{ + struct wakeup_source *ws; + + seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t" + "active_since\ttotal_time\tmax_time\tlast_change\n"); + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) + print_wakeup_source_stats(m, ws); + rcu_read_unlock(); + + return 0; +} + +static int wakeup_sources_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, wakeup_sources_stats_show, NULL); +} + +static const struct file_operations wakeup_sources_stats_fops = { + .owner = THIS_MODULE, + .open = wakeup_sources_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init wakeup_sources_debugfs_init(void) +{ + wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources", + S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops); + return 0; +} + +postcore_initcall(wakeup_sources_debugfs_init); diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 9fc630ce1dd..f6f37a05a0c 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -45,7 +45,8 @@ static ssize_t show_##name(struct sys_device *dev, \ return sprintf(buf, "%d\n", topology_##name(cpu)); \ } -#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) +#if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \ + defined(topology_book_cpumask) static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf) { ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; @@ -114,6 +115,14 @@ define_siblings_show_func(core_cpumask); define_one_ro_named(core_siblings, show_core_cpumask); define_one_ro_named(core_siblings_list, show_core_cpumask_list); +#ifdef CONFIG_SCHED_BOOK +define_id_show_func(book_id); +define_one_ro(book_id); +define_siblings_show_func(book_cpumask); +define_one_ro_named(book_siblings, show_book_cpumask); +define_one_ro_named(book_siblings_list, show_book_cpumask_list); +#endif + static struct attribute *default_attrs[] = { &attr_physical_package_id.attr, &attr_core_id.attr, @@ -121,6 +130,11 @@ static struct attribute *default_attrs[] = { &attr_thread_siblings_list.attr, &attr_core_siblings.attr, &attr_core_siblings_list.attr, +#ifdef CONFIG_SCHED_BOOK + &attr_book_id.attr, + &attr_book_siblings.attr, + &attr_book_siblings_list.attr, +#endif NULL }; diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 4e2c367fec1..1f286ab461d 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -36,7 +36,7 @@ #include <linux/ioport.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/reboot.h> @@ -54,6 +54,7 @@ #define DAC960_GAM_MINOR 252 +static DEFINE_MUTEX(DAC960_mutex); static DAC960_Controller_T *DAC960_Controllers[DAC960_MaxControllers]; static int DAC960_ControllerCount; static struct proc_dir_entry *DAC960_ProcDirectoryEntry; @@ -81,7 +82,7 @@ static int DAC960_open(struct block_device *bdev, fmode_t mode) int drive_nr = (long)disk->private_data; int ret = -ENXIO; - lock_kernel(); + mutex_lock(&DAC960_mutex); if (p->FirmwareType == DAC960_V1_Controller) { if (p->V1.LogicalDriveInformation[drive_nr]. LogicalDriveState == DAC960_V1_LogicalDrive_Offline) @@ -99,7 +100,7 @@ static int DAC960_open(struct block_device *bdev, fmode_t mode) goto out; ret = 0; out: - unlock_kernel(); + mutex_unlock(&DAC960_mutex); return ret; } @@ -6625,7 +6626,7 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request, long ErrorCode = 0; if (!capable(CAP_SYS_ADMIN)) return -EACCES; - lock_kernel(); + mutex_lock(&DAC960_mutex); switch (Request) { case DAC960_IOCTL_GET_CONTROLLER_COUNT: @@ -7056,13 +7057,14 @@ static long DAC960_gam_ioctl(struct file *file, unsigned int Request, default: ErrorCode = -ENOTTY; } - unlock_kernel(); + mutex_unlock(&DAC960_mutex); return ErrorCode; } static const struct file_operations DAC960_gam_fops = { .owner = THIS_MODULE, - .unlocked_ioctl = DAC960_gam_ioctl + .unlocked_ioctl = DAC960_gam_ioctl, + .llseek = noop_llseek, }; static struct miscdevice DAC960_gam_dev = { diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index de277689da6..4b9359a6f6c 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -488,4 +488,21 @@ config BLK_DEV_HD If unsure, say N. +config BLK_DEV_RBD + tristate "Rados block device (RBD)" + depends on INET && EXPERIMENTAL && BLOCK + select CEPH_LIB + select LIBCRC32C + select CRYPTO_AES + select CRYPTO + default n + help + Say Y here if you want include the Rados block device, which stripes + a block device over objects stored in the Ceph distributed object + store. + + More information at http://ceph.newdream.net/. + + If unsure, say N. + endif # BLK_DEV diff --git a/drivers/block/Makefile b/drivers/block/Makefile index aff5ac925c3..d7f463d6312 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -37,5 +37,6 @@ obj-$(CONFIG_BLK_DEV_HD) += hd.o obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ +obj-$(CONFIG_BLK_DEV_RBD) += rbd.o swim_mod-objs := swim.o swim_asm.o diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 76f114f0bba..a1725e6488d 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -60,7 +60,7 @@ #include <linux/hdreg.h> #include <linux/delay.h> #include <linux/init.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/amifdreg.h> #include <linux/amifd.h> #include <linux/buffer_head.h> @@ -109,13 +109,12 @@ #define FD_HD_3 0x55555555 /* high-density 3.5" (1760K) drive */ #define FD_DD_5 0xaaaaaaaa /* double-density 5.25" (440K) drive */ +static DEFINE_MUTEX(amiflop_mutex); static unsigned long int fd_def_df0 = FD_DD_3; /* default for df0 if it doesn't identify */ module_param(fd_def_df0, ulong, 0); MODULE_LICENSE("GPL"); -static struct request_queue *floppy_queue; - /* * Macros */ @@ -164,6 +163,7 @@ static volatile int selected = -1; /* currently selected drive */ static int writepending; static int writefromint; static char *raw_buf; +static int fdc_queue; static DEFINE_SPINLOCK(amiflop_lock); @@ -1334,6 +1334,42 @@ static int get_track(int drive, int track) return -1; } +/* + * Round-robin between our available drives, doing one request from each + */ +static struct request *set_next_request(void) +{ + struct request_queue *q; + int cnt = FD_MAX_UNITS; + struct request *rq; + + /* Find next queue we can dispatch from */ + fdc_queue = fdc_queue + 1; + if (fdc_queue == FD_MAX_UNITS) + fdc_queue = 0; + + for(cnt = FD_MAX_UNITS; cnt > 0; cnt--) { + + if (unit[fdc_queue].type->code == FD_NODRIVE) { + if (++fdc_queue == FD_MAX_UNITS) + fdc_queue = 0; + continue; + } + + q = unit[fdc_queue].gendisk->queue; + if (q) { + rq = blk_fetch_request(q); + if (rq) + break; + } + + if (++fdc_queue == FD_MAX_UNITS) + fdc_queue = 0; + } + + return rq; +} + static void redo_fd_request(void) { struct request *rq; @@ -1345,7 +1381,7 @@ static void redo_fd_request(void) int err; next_req: - rq = blk_fetch_request(floppy_queue); + rq = set_next_request(); if (!rq) { /* Nothing left to do */ return; @@ -1506,9 +1542,9 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, { int ret; - lock_kernel(); + mutex_lock(&amiflop_mutex); ret = fd_locked_ioctl(bdev, mode, cmd, param); - unlock_kernel(); + mutex_unlock(&amiflop_mutex); return ret; } @@ -1555,11 +1591,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) int old_dev; unsigned long flags; - lock_kernel(); + mutex_lock(&amiflop_mutex); old_dev = fd_device[drive]; if (fd_ref[drive] && old_dev != system) { - unlock_kernel(); + mutex_unlock(&amiflop_mutex); return -EBUSY; } @@ -1575,7 +1611,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) rel_fdc(); if (wrprot) { - unlock_kernel(); + mutex_unlock(&amiflop_mutex); return -EROFS; } } @@ -1594,7 +1630,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive, unit[drive].type->name, data_types[system].name); - unlock_kernel(); + mutex_unlock(&amiflop_mutex); return 0; } @@ -1603,7 +1639,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) struct amiga_floppy_struct *p = disk->private_data; int drive = p - unit; - lock_kernel(); + mutex_lock(&amiflop_mutex); if (unit[drive].dirty == 1) { del_timer (flush_track_timer + drive); non_int_flush_track (drive); @@ -1617,7 +1653,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) /* the mod_use counter is handled this way */ floppy_off (drive | 0x40000000); #endif - unlock_kernel(); + mutex_unlock(&amiflop_mutex); return 0; } @@ -1682,6 +1718,13 @@ static int __init fd_probe_drives(void) continue; } unit[drive].gendisk = disk; + + disk->queue = blk_init_queue(do_fd_request, &amiflop_lock); + if (!disk->queue) { + unit[drive].type->code = FD_NODRIVE; + continue; + } + drives++; if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) { printk("no mem for "); @@ -1695,7 +1738,6 @@ static int __init fd_probe_drives(void) disk->fops = &floppy_fops; sprintf(disk->disk_name, "fd%d", drive); disk->private_data = &unit[drive]; - disk->queue = floppy_queue; set_capacity(disk, 880*2); add_disk(disk); } @@ -1743,11 +1785,6 @@ static int __init amiga_floppy_probe(struct platform_device *pdev) goto out_irq2; } - ret = -ENOMEM; - floppy_queue = blk_init_queue(do_fd_request, &amiflop_lock); - if (!floppy_queue) - goto out_queue; - ret = -ENODEV; if (fd_probe_drives() < 1) /* No usable drives */ goto out_probe; @@ -1791,8 +1828,6 @@ static int __init amiga_floppy_probe(struct platform_device *pdev) return 0; out_probe: - blk_cleanup_queue(floppy_queue); -out_queue: free_irq(IRQ_AMIGA_CIAA_TB, NULL); out_irq2: free_irq(IRQ_AMIGA_DSKBLK, NULL); @@ -1810,9 +1845,12 @@ static int __exit amiga_floppy_remove(struct platform_device *pdev) for( i = 0; i < FD_MAX_UNITS; i++) { if (unit[i].type->code != FD_NODRIVE) { + struct request_queue *q = unit[i].gendisk->queue; del_gendisk(unit[i].gendisk); put_disk(unit[i].gendisk); kfree(unit[i].trackbuf); + if (q) + blk_cleanup_queue(q); } } blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); @@ -1820,7 +1858,6 @@ static int __exit amiga_floppy_remove(struct platform_device *pdev) free_irq(IRQ_AMIGA_DSKBLK, NULL); custom.dmacon = DMAF_DISK; /* disable DMA */ amiga_chip_free(raw_buf); - blk_cleanup_queue(floppy_queue); unregister_blkdev(FLOPPY_MAJOR, "fd"); } #endif diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index a946929735a..f21c237a9e5 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -12,9 +12,10 @@ #include <linux/slab.h> #include <linux/genhd.h> #include <linux/netdevice.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include "aoe.h" +static DEFINE_MUTEX(aoeblk_mutex); static struct kmem_cache *buf_pool_cache; static ssize_t aoedisk_show_state(struct device *dev, @@ -125,16 +126,16 @@ aoeblk_open(struct block_device *bdev, fmode_t mode) struct aoedev *d = bdev->bd_disk->private_data; ulong flags; - lock_kernel(); + mutex_lock(&aoeblk_mutex); spin_lock_irqsave(&d->lock, flags); if (d->flags & DEVFL_UP) { d->nopen++; spin_unlock_irqrestore(&d->lock, flags); - unlock_kernel(); + mutex_unlock(&aoeblk_mutex); return 0; } spin_unlock_irqrestore(&d->lock, flags); - unlock_kernel(); + mutex_unlock(&aoeblk_mutex); return -ENODEV; } diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index 4a1b9e7464a..146296ca496 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c @@ -9,7 +9,7 @@ #include <linux/completion.h> #include <linux/delay.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/skbuff.h> #include "aoe.h" @@ -37,6 +37,7 @@ struct ErrMsg { char *msg; }; +static DEFINE_MUTEX(aoechr_mutex); static struct ErrMsg emsgs[NMSG]; static int emsgs_head_idx, emsgs_tail_idx; static struct completion emsgs_comp; @@ -183,16 +184,16 @@ aoechr_open(struct inode *inode, struct file *filp) { int n, i; - lock_kernel(); + mutex_lock(&aoechr_mutex); n = iminor(inode); filp->private_data = (void *) (unsigned long) n; for (i = 0; i < ARRAY_SIZE(chardevs); ++i) if (chardevs[i].minor == n) { - unlock_kernel(); + mutex_unlock(&aoechr_mutex); return 0; } - unlock_kernel(); + mutex_unlock(&aoechr_mutex); return -EINVAL; } @@ -265,6 +266,7 @@ static const struct file_operations aoe_fops = { .open = aoechr_open, .release = aoechr_rel, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static char *aoe_devnode(struct device *dev, mode_t *mode) diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index aceb9647652..4e4cc6c828c 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -67,7 +67,7 @@ #include <linux/delay.h> #include <linux/init.h> #include <linux/blkdev.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/atafd.h> #include <asm/atafdreg.h> @@ -79,8 +79,9 @@ #undef DEBUG -static struct request_queue *floppy_queue; +static DEFINE_MUTEX(ataflop_mutex); static struct request *fd_request; +static int fdc_queue; /* Disk types: DD, HD, ED */ static struct atari_disk_type { @@ -1391,6 +1392,29 @@ static void setup_req_params( int drive ) ReqTrack, ReqSector, (unsigned long)ReqData )); } +/* + * Round-robin between our available drives, doing one request from each + */ +static struct request *set_next_request(void) +{ + struct request_queue *q; + int old_pos = fdc_queue; + struct request *rq; + + do { + q = unit[fdc_queue].disk->queue; + if (++fdc_queue == FD_MAX_UNITS) + fdc_queue = 0; + if (q) { + rq = blk_fetch_request(q); + if (rq) + break; + } + } while (fdc_queue != old_pos); + + return rq; +} + static void redo_fd_request(void) { @@ -1405,7 +1429,7 @@ static void redo_fd_request(void) repeat: if (!fd_request) { - fd_request = blk_fetch_request(floppy_queue); + fd_request = set_next_request(); if (!fd_request) goto the_end; } @@ -1671,9 +1695,9 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, { int ret; - lock_kernel(); + mutex_lock(&ataflop_mutex); ret = fd_locked_ioctl(bdev, mode, cmd, arg); - unlock_kernel(); + mutex_unlock(&ataflop_mutex); return ret; } @@ -1854,9 +1878,9 @@ static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; - lock_kernel(); + mutex_lock(&ataflop_mutex); ret = floppy_open(bdev, mode); - unlock_kernel(); + mutex_unlock(&ataflop_mutex); return ret; } @@ -1864,14 +1888,14 @@ static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) static int floppy_release(struct gendisk *disk, fmode_t mode) { struct atari_floppy_struct *p = disk->private_data; - lock_kernel(); + mutex_lock(&ataflop_mutex); if (p->ref < 0) p->ref = 0; else if (!p->ref--) { printk(KERN_ERR "floppy_release with fd_ref == 0"); p->ref = 0; } - unlock_kernel(); + mutex_unlock(&ataflop_mutex); return 0; } @@ -1932,10 +1956,6 @@ static int __init atari_floppy_init (void) PhysTrackBuffer = virt_to_phys(TrackBuffer); BufferDrive = BufferSide = BufferTrack = -1; - floppy_queue = blk_init_queue(do_fd_request, &ataflop_lock); - if (!floppy_queue) - goto Enomem; - for (i = 0; i < FD_MAX_UNITS; i++) { unit[i].track = -1; unit[i].flags = 0; @@ -1944,7 +1964,10 @@ static int __init atari_floppy_init (void) sprintf(unit[i].disk->disk_name, "fd%d", i); unit[i].disk->fops = &floppy_fops; unit[i].disk->private_data = &unit[i]; - unit[i].disk->queue = floppy_queue; + unit[i].disk->queue = blk_init_queue(do_fd_request, + &ataflop_lock); + if (!unit[i].disk->queue) + goto Enomem; set_capacity(unit[i].disk, MAX_DISK_SIZE * 2); add_disk(unit[i].disk); } @@ -1959,10 +1982,14 @@ static int __init atari_floppy_init (void) return 0; Enomem: - while (i--) + while (i--) { + struct request_queue *q = unit[i].disk->queue; + put_disk(unit[i].disk); - if (floppy_queue) - blk_cleanup_queue(floppy_queue); + if (q) + blk_cleanup_queue(q); + } + unregister_blkdev(FLOPPY_MAJOR, "fd"); return -ENOMEM; } @@ -2011,12 +2038,14 @@ static void __exit atari_floppy_exit(void) int i; blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); for (i = 0; i < FD_MAX_UNITS; i++) { + struct request_queue *q = unit[i].disk->queue; + del_gendisk(unit[i].disk); put_disk(unit[i].disk); + blk_cleanup_queue(q); } unregister_blkdev(FLOPPY_MAJOR, "fd"); - blk_cleanup_queue(floppy_queue); del_timer_sync(&fd_timer); atari_stram_free( DMABuffer ); } diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 1c7f63792ff..b7f51e4594f 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -15,7 +15,7 @@ #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/highmem.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/radix-tree.h> #include <linux/buffer_head.h> /* invalidate_bh_lrus() */ #include <linux/slab.h> @@ -55,6 +55,7 @@ struct brd_device { /* * Look up and return a brd's page for a given sector. */ +static DEFINE_MUTEX(brd_mutex); static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) { pgoff_t idx; @@ -402,7 +403,7 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode, * ram device BLKFLSBUF has special semantics, we want to actually * release and destroy the ramdisk data. */ - lock_kernel(); + mutex_lock(&brd_mutex); mutex_lock(&bdev->bd_mutex); error = -EBUSY; if (bdev->bd_openers <= 1) { @@ -419,7 +420,7 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode, error = 0; } mutex_unlock(&bdev->bd_mutex); - unlock_kernel(); + mutex_unlock(&brd_mutex); return error; } @@ -482,7 +483,6 @@ static struct brd_device *brd_alloc(int i) if (!brd->brd_queue) goto out_free_dev; blk_queue_make_request(brd->brd_queue, brd_make_request); - blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG); blk_queue_max_hw_sectors(brd->brd_queue, 1024); blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 6124c2fd2d3..f09e6df15aa 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -26,7 +26,6 @@ #include <linux/pci.h> #include <linux/kernel.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/delay.h> #include <linux/major.h> #include <linux/fs.h> @@ -66,6 +65,7 @@ MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); MODULE_VERSION("3.6.26"); MODULE_LICENSE("GPL"); +static DEFINE_MUTEX(cciss_mutex); static int cciss_allow_hpsa; module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(cciss_allow_hpsa, @@ -105,11 +105,12 @@ static const struct pci_device_id cciss_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, {0,} }; @@ -149,11 +150,12 @@ static struct board_type products[] = { {0x3249103C, "Smart Array P812", &SA5_access}, {0x324A103C, "Smart Array P712m", &SA5_access}, {0x324B103C, "Smart Array P711m", &SA5_access}, - {0x3250103C, "Smart Array", &SA5_access}, - {0x3251103C, "Smart Array", &SA5_access}, - {0x3252103C, "Smart Array", &SA5_access}, - {0x3253103C, "Smart Array", &SA5_access}, - {0x3254103C, "Smart Array", &SA5_access}, + {0x3350103C, "Smart Array", &SA5_access}, + {0x3351103C, "Smart Array", &SA5_access}, + {0x3352103C, "Smart Array", &SA5_access}, + {0x3353103C, "Smart Array", &SA5_access}, + {0x3354103C, "Smart Array", &SA5_access}, + {0x3355103C, "Smart Array", &SA5_access}, }; /* How long to wait (in milliseconds) for board to go into simple mode */ @@ -1059,9 +1061,9 @@ static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; - lock_kernel(); + mutex_lock(&cciss_mutex); ret = cciss_open(bdev, mode); - unlock_kernel(); + mutex_unlock(&cciss_mutex); return ret; } @@ -1074,13 +1076,13 @@ static int cciss_release(struct gendisk *disk, fmode_t mode) ctlr_info_t *h; drive_info_struct *drv; - lock_kernel(); + mutex_lock(&cciss_mutex); h = get_host(disk); drv = get_drv(disk); dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name); drv->usage_count--; h->usage_count--; - unlock_kernel(); + mutex_unlock(&cciss_mutex); return 0; } @@ -1088,9 +1090,9 @@ static int do_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { int ret; - lock_kernel(); + mutex_lock(&cciss_mutex); ret = cciss_ioctl(bdev, mode, cmd, arg); - unlock_kernel(); + mutex_unlock(&cciss_mutex); return ret; } @@ -1232,470 +1234,452 @@ static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c) c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) (void)check_for_unit_attention(h, c); } -/* - * ioctl - */ -static int cciss_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) + +static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp) { - struct gendisk *disk = bdev->bd_disk; - ctlr_info_t *h = get_host(disk); - drive_info_struct *drv = get_drv(disk); - void __user *argp = (void __user *)arg; + cciss_pci_info_struct pciinfo; - dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n", - cmd, arg); - switch (cmd) { - case CCISS_GETPCIINFO: - { - cciss_pci_info_struct pciinfo; - - if (!arg) - return -EINVAL; - pciinfo.domain = pci_domain_nr(h->pdev->bus); - pciinfo.bus = h->pdev->bus->number; - pciinfo.dev_fn = h->pdev->devfn; - pciinfo.board_id = h->board_id; - if (copy_to_user - (argp, &pciinfo, sizeof(cciss_pci_info_struct))) - return -EFAULT; - return 0; - } - case CCISS_GETINTINFO: - { - cciss_coalint_struct intinfo; - if (!arg) - return -EINVAL; - intinfo.delay = - readl(&h->cfgtable->HostWrite.CoalIntDelay); - intinfo.count = - readl(&h->cfgtable->HostWrite.CoalIntCount); - if (copy_to_user - (argp, &intinfo, sizeof(cciss_coalint_struct))) - return -EFAULT; - return 0; - } - case CCISS_SETINTINFO: - { - cciss_coalint_struct intinfo; - unsigned long flags; - int i; - - if (!arg) - return -EINVAL; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - if (copy_from_user - (&intinfo, argp, sizeof(cciss_coalint_struct))) - return -EFAULT; - if ((intinfo.delay == 0) && (intinfo.count == 0)) - return -EINVAL; - spin_lock_irqsave(&h->lock, flags); - /* Update the field, and then ring the doorbell */ - writel(intinfo.delay, - &(h->cfgtable->HostWrite.CoalIntDelay)); - writel(intinfo.count, - &(h->cfgtable->HostWrite.CoalIntCount)); - writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); - - for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { - if (!(readl(h->vaddr + SA5_DOORBELL) - & CFGTBL_ChangeReq)) - break; - /* delay and try again */ - udelay(1000); - } - spin_unlock_irqrestore(&h->lock, flags); - if (i >= MAX_IOCTL_CONFIG_WAIT) - return -EAGAIN; - return 0; - } - case CCISS_GETNODENAME: - { - NodeName_type NodeName; - int i; - - if (!arg) - return -EINVAL; - for (i = 0; i < 16; i++) - NodeName[i] = - readb(&h->cfgtable->ServerName[i]); - if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) - return -EFAULT; - return 0; - } - case CCISS_SETNODENAME: - { - NodeName_type NodeName; - unsigned long flags; - int i; + if (!argp) + return -EINVAL; + pciinfo.domain = pci_domain_nr(h->pdev->bus); + pciinfo.bus = h->pdev->bus->number; + pciinfo.dev_fn = h->pdev->devfn; + pciinfo.board_id = h->board_id; + if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct))) + return -EFAULT; + return 0; +} - if (!arg) - return -EINVAL; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; +static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) +{ + cciss_coalint_struct intinfo; - if (copy_from_user - (NodeName, argp, sizeof(NodeName_type))) - return -EFAULT; + if (!argp) + return -EINVAL; + intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); + intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); + if (copy_to_user + (argp, &intinfo, sizeof(cciss_coalint_struct))) + return -EFAULT; + return 0; +} - spin_lock_irqsave(&h->lock, flags); +static int cciss_setintinfo(ctlr_info_t *h, void __user *argp) +{ + cciss_coalint_struct intinfo; + unsigned long flags; + int i; - /* Update the field, and then ring the doorbell */ - for (i = 0; i < 16; i++) - writeb(NodeName[i], - &h->cfgtable->ServerName[i]); + if (!argp) + return -EINVAL; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(&intinfo, argp, sizeof(intinfo))) + return -EFAULT; + if ((intinfo.delay == 0) && (intinfo.count == 0)) + return -EINVAL; + spin_lock_irqsave(&h->lock, flags); + /* Update the field, and then ring the doorbell */ + writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay)); + writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount)); + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); - writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); + for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { + if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) + break; + udelay(1000); /* delay and try again */ + } + spin_unlock_irqrestore(&h->lock, flags); + if (i >= MAX_IOCTL_CONFIG_WAIT) + return -EAGAIN; + return 0; +} - for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { - if (!(readl(h->vaddr + SA5_DOORBELL) - & CFGTBL_ChangeReq)) - break; - /* delay and try again */ - udelay(1000); - } - spin_unlock_irqrestore(&h->lock, flags); - if (i >= MAX_IOCTL_CONFIG_WAIT) - return -EAGAIN; - return 0; - } +static int cciss_getnodename(ctlr_info_t *h, void __user *argp) +{ + NodeName_type NodeName; + int i; - case CCISS_GETHEARTBEAT: - { - Heartbeat_type heartbeat; - - if (!arg) - return -EINVAL; - heartbeat = readl(&h->cfgtable->HeartBeat); - if (copy_to_user - (argp, &heartbeat, sizeof(Heartbeat_type))) - return -EFAULT; - return 0; - } - case CCISS_GETBUSTYPES: - { - BusTypes_type BusTypes; - - if (!arg) - return -EINVAL; - BusTypes = readl(&h->cfgtable->BusTypes); - if (copy_to_user - (argp, &BusTypes, sizeof(BusTypes_type))) - return -EFAULT; - return 0; - } - case CCISS_GETFIRMVER: - { - FirmwareVer_type firmware; + if (!argp) + return -EINVAL; + for (i = 0; i < 16; i++) + NodeName[i] = readb(&h->cfgtable->ServerName[i]); + if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) + return -EFAULT; + return 0; +} - if (!arg) - return -EINVAL; - memcpy(firmware, h->firm_ver, 4); +static int cciss_setnodename(ctlr_info_t *h, void __user *argp) +{ + NodeName_type NodeName; + unsigned long flags; + int i; - if (copy_to_user - (argp, firmware, sizeof(FirmwareVer_type))) - return -EFAULT; - return 0; - } - case CCISS_GETDRIVVER: - { - DriverVer_type DriverVer = DRIVER_VERSION; + if (!argp) + return -EINVAL; + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (copy_from_user(NodeName, argp, sizeof(NodeName_type))) + return -EFAULT; + spin_lock_irqsave(&h->lock, flags); + /* Update the field, and then ring the doorbell */ + for (i = 0; i < 16; i++) + writeb(NodeName[i], &h->cfgtable->ServerName[i]); + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); + for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { + if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) + break; + udelay(1000); /* delay and try again */ + } + spin_unlock_irqrestore(&h->lock, flags); + if (i >= MAX_IOCTL_CONFIG_WAIT) + return -EAGAIN; + return 0; +} - if (!arg) - return -EINVAL; +static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) +{ + Heartbeat_type heartbeat; - if (copy_to_user - (argp, &DriverVer, sizeof(DriverVer_type))) - return -EFAULT; - return 0; - } + if (!argp) + return -EINVAL; + heartbeat = readl(&h->cfgtable->HeartBeat); + if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) + return -EFAULT; + return 0; +} - case CCISS_DEREGDISK: - case CCISS_REGNEWD: - case CCISS_REVALIDVOLS: - return rebuild_lun_table(h, 0, 1); +static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) +{ + BusTypes_type BusTypes; + + if (!argp) + return -EINVAL; + BusTypes = readl(&h->cfgtable->BusTypes); + if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) + return -EFAULT; + return 0; +} - case CCISS_GETLUNINFO:{ - LogvolInfo_struct luninfo; +static int cciss_getfirmver(ctlr_info_t *h, void __user *argp) +{ + FirmwareVer_type firmware; - memcpy(&luninfo.LunID, drv->LunID, - sizeof(luninfo.LunID)); - luninfo.num_opens = drv->usage_count; - luninfo.num_parts = 0; - if (copy_to_user(argp, &luninfo, - sizeof(LogvolInfo_struct))) - return -EFAULT; - return 0; + if (!argp) + return -EINVAL; + memcpy(firmware, h->firm_ver, 4); + + if (copy_to_user + (argp, firmware, sizeof(FirmwareVer_type))) + return -EFAULT; + return 0; +} + +static int cciss_getdrivver(ctlr_info_t *h, void __user *argp) +{ + DriverVer_type DriverVer = DRIVER_VERSION; + + if (!argp) + return -EINVAL; + if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) + return -EFAULT; + return 0; +} + +static int cciss_getluninfo(ctlr_info_t *h, + struct gendisk *disk, void __user *argp) +{ + LogvolInfo_struct luninfo; + drive_info_struct *drv = get_drv(disk); + + if (!argp) + return -EINVAL; + memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID)); + luninfo.num_opens = drv->usage_count; + luninfo.num_parts = 0; + if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct))) + return -EFAULT; + return 0; +} + +static int cciss_passthru(ctlr_info_t *h, void __user *argp) +{ + IOCTL_Command_struct iocommand; + CommandList_struct *c; + char *buff = NULL; + u64bit temp64; + DECLARE_COMPLETION_ONSTACK(wait); + + if (!argp) + return -EINVAL; + + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + if (copy_from_user + (&iocommand, argp, sizeof(IOCTL_Command_struct))) + return -EFAULT; + if ((iocommand.buf_size < 1) && + (iocommand.Request.Type.Direction != XFER_NONE)) { + return -EINVAL; + } + if (iocommand.buf_size > 0) { + buff = kmalloc(iocommand.buf_size, GFP_KERNEL); + if (buff == NULL) + return -EFAULT; + } + if (iocommand.Request.Type.Direction == XFER_WRITE) { + /* Copy the data into the buffer we created */ + if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { + kfree(buff); + return -EFAULT; } - case CCISS_PASSTHRU: - { - IOCTL_Command_struct iocommand; - CommandList_struct *c; - char *buff = NULL; - u64bit temp64; - DECLARE_COMPLETION_ONSTACK(wait); - - if (!arg) - return -EINVAL; - - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - - if (copy_from_user - (&iocommand, argp, sizeof(IOCTL_Command_struct))) - return -EFAULT; - if ((iocommand.buf_size < 1) && - (iocommand.Request.Type.Direction != XFER_NONE)) { - return -EINVAL; - } -#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */ - /* Check kmalloc limits */ - if (iocommand.buf_size > 128000) - return -EINVAL; -#endif - if (iocommand.buf_size > 0) { - buff = kmalloc(iocommand.buf_size, GFP_KERNEL); - if (buff == NULL) - return -EFAULT; - } - if (iocommand.Request.Type.Direction == XFER_WRITE) { - /* Copy the data into the buffer we created */ - if (copy_from_user - (buff, iocommand.buf, iocommand.buf_size)) { - kfree(buff); - return -EFAULT; - } - } else { - memset(buff, 0, iocommand.buf_size); - } - c = cmd_special_alloc(h); - if (!c) { - kfree(buff); - return -ENOMEM; - } - /* Fill in the command type */ - c->cmd_type = CMD_IOCTL_PEND; - /* Fill in Command Header */ - c->Header.ReplyQueue = 0; /* unused in simple mode */ - if (iocommand.buf_size > 0) /* buffer to fill */ - { - c->Header.SGList = 1; - c->Header.SGTotal = 1; - } else /* no buffers to fill */ - { - c->Header.SGList = 0; - c->Header.SGTotal = 0; - } - c->Header.LUN = iocommand.LUN_info; - /* use the kernel address the cmd block for tag */ - c->Header.Tag.lower = c->busaddr; - - /* Fill in Request block */ - c->Request = iocommand.Request; - - /* Fill in the scatter gather information */ - if (iocommand.buf_size > 0) { - temp64.val = pci_map_single(h->pdev, buff, - iocommand.buf_size, - PCI_DMA_BIDIRECTIONAL); - c->SG[0].Addr.lower = temp64.val32.lower; - c->SG[0].Addr.upper = temp64.val32.upper; - c->SG[0].Len = iocommand.buf_size; - c->SG[0].Ext = 0; /* we are not chaining */ - } - c->waiting = &wait; + } else { + memset(buff, 0, iocommand.buf_size); + } + c = cmd_special_alloc(h); + if (!c) { + kfree(buff); + return -ENOMEM; + } + /* Fill in the command type */ + c->cmd_type = CMD_IOCTL_PEND; + /* Fill in Command Header */ + c->Header.ReplyQueue = 0; /* unused in simple mode */ + if (iocommand.buf_size > 0) { /* buffer to fill */ + c->Header.SGList = 1; + c->Header.SGTotal = 1; + } else { /* no buffers to fill */ + c->Header.SGList = 0; + c->Header.SGTotal = 0; + } + c->Header.LUN = iocommand.LUN_info; + /* use the kernel address the cmd block for tag */ + c->Header.Tag.lower = c->busaddr; - enqueue_cmd_and_start_io(h, c); - wait_for_completion(&wait); + /* Fill in Request block */ + c->Request = iocommand.Request; - /* unlock the buffers from DMA */ - temp64.val32.lower = c->SG[0].Addr.lower; - temp64.val32.upper = c->SG[0].Addr.upper; - pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, - iocommand.buf_size, - PCI_DMA_BIDIRECTIONAL); + /* Fill in the scatter gather information */ + if (iocommand.buf_size > 0) { + temp64.val = pci_map_single(h->pdev, buff, + iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); + c->SG[0].Addr.lower = temp64.val32.lower; + c->SG[0].Addr.upper = temp64.val32.upper; + c->SG[0].Len = iocommand.buf_size; + c->SG[0].Ext = 0; /* we are not chaining */ + } + c->waiting = &wait; - check_ioctl_unit_attention(h, c); + enqueue_cmd_and_start_io(h, c); + wait_for_completion(&wait); - /* Copy the error information out */ - iocommand.error_info = *(c->err_info); - if (copy_to_user - (argp, &iocommand, sizeof(IOCTL_Command_struct))) { - kfree(buff); - cmd_special_free(h, c); - return -EFAULT; - } + /* unlock the buffers from DMA */ + temp64.val32.lower = c->SG[0].Addr.lower; + temp64.val32.upper = c->SG[0].Addr.upper; + pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size, + PCI_DMA_BIDIRECTIONAL); + check_ioctl_unit_attention(h, c); + + /* Copy the error information out */ + iocommand.error_info = *(c->err_info); + if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) { + kfree(buff); + cmd_special_free(h, c); + return -EFAULT; + } - if (iocommand.Request.Type.Direction == XFER_READ) { - /* Copy the data out of the buffer we created */ - if (copy_to_user - (iocommand.buf, buff, iocommand.buf_size)) { - kfree(buff); - cmd_special_free(h, c); - return -EFAULT; - } - } + if (iocommand.Request.Type.Direction == XFER_READ) { + /* Copy the data out of the buffer we created */ + if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { kfree(buff); cmd_special_free(h, c); - return 0; + return -EFAULT; } - case CCISS_BIG_PASSTHRU:{ - BIG_IOCTL_Command_struct *ioc; - CommandList_struct *c; - unsigned char **buff = NULL; - int *buff_size = NULL; - u64bit temp64; - BYTE sg_used = 0; - int status = 0; - int i; - DECLARE_COMPLETION_ONSTACK(wait); - __u32 left; - __u32 sz; - BYTE __user *data_ptr; - - if (!arg) - return -EINVAL; - if (!capable(CAP_SYS_RAWIO)) - return -EPERM; - ioc = (BIG_IOCTL_Command_struct *) - kmalloc(sizeof(*ioc), GFP_KERNEL); - if (!ioc) { - status = -ENOMEM; - goto cleanup1; - } - if (copy_from_user(ioc, argp, sizeof(*ioc))) { + } + kfree(buff); + cmd_special_free(h, c); + return 0; +} + +static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp) +{ + BIG_IOCTL_Command_struct *ioc; + CommandList_struct *c; + unsigned char **buff = NULL; + int *buff_size = NULL; + u64bit temp64; + BYTE sg_used = 0; + int status = 0; + int i; + DECLARE_COMPLETION_ONSTACK(wait); + __u32 left; + __u32 sz; + BYTE __user *data_ptr; + + if (!argp) + return -EINVAL; + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + ioc = (BIG_IOCTL_Command_struct *) + kmalloc(sizeof(*ioc), GFP_KERNEL); + if (!ioc) { + status = -ENOMEM; + goto cleanup1; + } + if (copy_from_user(ioc, argp, sizeof(*ioc))) { + status = -EFAULT; + goto cleanup1; + } + if ((ioc->buf_size < 1) && + (ioc->Request.Type.Direction != XFER_NONE)) { + status = -EINVAL; + goto cleanup1; + } + /* Check kmalloc limits using all SGs */ + if (ioc->malloc_size > MAX_KMALLOC_SIZE) { + status = -EINVAL; + goto cleanup1; + } + if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { + status = -EINVAL; + goto cleanup1; + } + buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); + if (!buff) { + status = -ENOMEM; + goto cleanup1; + } + buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); + if (!buff_size) { + status = -ENOMEM; + goto cleanup1; + } + left = ioc->buf_size; + data_ptr = ioc->buf; + while (left) { + sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; + buff_size[sg_used] = sz; + buff[sg_used] = kmalloc(sz, GFP_KERNEL); + if (buff[sg_used] == NULL) { + status = -ENOMEM; + goto cleanup1; + } + if (ioc->Request.Type.Direction == XFER_WRITE) { + if (copy_from_user(buff[sg_used], data_ptr, sz)) { status = -EFAULT; goto cleanup1; } - if ((ioc->buf_size < 1) && - (ioc->Request.Type.Direction != XFER_NONE)) { - status = -EINVAL; - goto cleanup1; - } - /* Check kmalloc limits using all SGs */ - if (ioc->malloc_size > MAX_KMALLOC_SIZE) { - status = -EINVAL; - goto cleanup1; - } - if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { - status = -EINVAL; - goto cleanup1; - } - buff = - kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); - if (!buff) { - status = -ENOMEM; - goto cleanup1; - } - buff_size = kmalloc(MAXSGENTRIES * sizeof(int), - GFP_KERNEL); - if (!buff_size) { - status = -ENOMEM; - goto cleanup1; - } - left = ioc->buf_size; - data_ptr = ioc->buf; - while (left) { - sz = (left > - ioc->malloc_size) ? ioc-> - malloc_size : left; - buff_size[sg_used] = sz; - buff[sg_used] = kmalloc(sz, GFP_KERNEL); - if (buff[sg_used] == NULL) { - status = -ENOMEM; - goto cleanup1; - } - if (ioc->Request.Type.Direction == XFER_WRITE) { - if (copy_from_user - (buff[sg_used], data_ptr, sz)) { - status = -EFAULT; - goto cleanup1; - } - } else { - memset(buff[sg_used], 0, sz); - } - left -= sz; - data_ptr += sz; - sg_used++; - } - c = cmd_special_alloc(h); - if (!c) { - status = -ENOMEM; - goto cleanup1; - } - c->cmd_type = CMD_IOCTL_PEND; - c->Header.ReplyQueue = 0; + } else { + memset(buff[sg_used], 0, sz); + } + left -= sz; + data_ptr += sz; + sg_used++; + } + c = cmd_special_alloc(h); + if (!c) { + status = -ENOMEM; + goto cleanup1; + } + c->cmd_type = CMD_IOCTL_PEND; + c->Header.ReplyQueue = 0; + c->Header.SGList = sg_used; + c->Header.SGTotal = sg_used; + c->Header.LUN = ioc->LUN_info; + c->Header.Tag.lower = c->busaddr; - if (ioc->buf_size > 0) { - c->Header.SGList = sg_used; - c->Header.SGTotal = sg_used; - } else { - c->Header.SGList = 0; - c->Header.SGTotal = 0; - } - c->Header.LUN = ioc->LUN_info; - c->Header.Tag.lower = c->busaddr; - - c->Request = ioc->Request; - if (ioc->buf_size > 0) { - for (i = 0; i < sg_used; i++) { - temp64.val = - pci_map_single(h->pdev, buff[i], - buff_size[i], - PCI_DMA_BIDIRECTIONAL); - c->SG[i].Addr.lower = - temp64.val32.lower; - c->SG[i].Addr.upper = - temp64.val32.upper; - c->SG[i].Len = buff_size[i]; - c->SG[i].Ext = 0; /* we are not chaining */ - } - } - c->waiting = &wait; - enqueue_cmd_and_start_io(h, c); - wait_for_completion(&wait); - /* unlock the buffers from DMA */ - for (i = 0; i < sg_used; i++) { - temp64.val32.lower = c->SG[i].Addr.lower; - temp64.val32.upper = c->SG[i].Addr.upper; - pci_unmap_single(h->pdev, - (dma_addr_t) temp64.val, buff_size[i], - PCI_DMA_BIDIRECTIONAL); - } - check_ioctl_unit_attention(h, c); - /* Copy the error information out */ - ioc->error_info = *(c->err_info); - if (copy_to_user(argp, ioc, sizeof(*ioc))) { + c->Request = ioc->Request; + for (i = 0; i < sg_used; i++) { + temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i], + PCI_DMA_BIDIRECTIONAL); + c->SG[i].Addr.lower = temp64.val32.lower; + c->SG[i].Addr.upper = temp64.val32.upper; + c->SG[i].Len = buff_size[i]; + c->SG[i].Ext = 0; /* we are not chaining */ + } + c->waiting = &wait; + enqueue_cmd_and_start_io(h, c); + wait_for_completion(&wait); + /* unlock the buffers from DMA */ + for (i = 0; i < sg_used; i++) { + temp64.val32.lower = c->SG[i].Addr.lower; + temp64.val32.upper = c->SG[i].Addr.upper; + pci_unmap_single(h->pdev, + (dma_addr_t) temp64.val, buff_size[i], + PCI_DMA_BIDIRECTIONAL); + } + check_ioctl_unit_attention(h, c); + /* Copy the error information out */ + ioc->error_info = *(c->err_info); + if (copy_to_user(argp, ioc, sizeof(*ioc))) { + cmd_special_free(h, c); + status = -EFAULT; + goto cleanup1; + } + if (ioc->Request.Type.Direction == XFER_READ) { + /* Copy the data out of the buffer we created */ + BYTE __user *ptr = ioc->buf; + for (i = 0; i < sg_used; i++) { + if (copy_to_user(ptr, buff[i], buff_size[i])) { cmd_special_free(h, c); status = -EFAULT; goto cleanup1; } - if (ioc->Request.Type.Direction == XFER_READ) { - /* Copy the data out of the buffer we created */ - BYTE __user *ptr = ioc->buf; - for (i = 0; i < sg_used; i++) { - if (copy_to_user - (ptr, buff[i], buff_size[i])) { - cmd_special_free(h, c); - status = -EFAULT; - goto cleanup1; - } - ptr += buff_size[i]; - } - } - cmd_special_free(h, c); - status = 0; - cleanup1: - if (buff) { - for (i = 0; i < sg_used; i++) - kfree(buff[i]); - kfree(buff); - } - kfree(buff_size); - kfree(ioc); - return status; + ptr += buff_size[i]; } + } + cmd_special_free(h, c); + status = 0; +cleanup1: + if (buff) { + for (i = 0; i < sg_used; i++) + kfree(buff[i]); + kfree(buff); + } + kfree(buff_size); + kfree(ioc); + return status; +} + +static int cciss_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct gendisk *disk = bdev->bd_disk; + ctlr_info_t *h = get_host(disk); + void __user *argp = (void __user *)arg; + + dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n", + cmd, arg); + switch (cmd) { + case CCISS_GETPCIINFO: + return cciss_getpciinfo(h, argp); + case CCISS_GETINTINFO: + return cciss_getintinfo(h, argp); + case CCISS_SETINTINFO: + return cciss_setintinfo(h, argp); + case CCISS_GETNODENAME: + return cciss_getnodename(h, argp); + case CCISS_SETNODENAME: + return cciss_setnodename(h, argp); + case CCISS_GETHEARTBEAT: + return cciss_getheartbeat(h, argp); + case CCISS_GETBUSTYPES: + return cciss_getbustypes(h, argp); + case CCISS_GETFIRMVER: + return cciss_getfirmver(h, argp); + case CCISS_GETDRIVVER: + return cciss_getdrivver(h, argp); + case CCISS_DEREGDISK: + case CCISS_REGNEWD: + case CCISS_REVALIDVOLS: + return rebuild_lun_table(h, 0, 1); + case CCISS_GETLUNINFO: + return cciss_getluninfo(h, disk, argp); + case CCISS_PASSTHRU: + return cciss_passthru(h, argp); + case CCISS_BIG_PASSTHRU: + return cciss_bigpassthru(h, argp); /* scsi_cmd_ioctl handles these, below, though some are not */ /* very meaningful for cciss. SG_IO is the main one people want. */ @@ -4792,7 +4776,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, clean4: kfree(h->cmd_pool_bits); /* Free up sg elements */ - for (k = 0; k < h->nr_cmds; k++) + for (k-- ; k >= 0; k--) kfree(h->scatter_list[k]); kfree(h->scatter_list); cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index d53b0291c44..946dad4caef 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -35,7 +35,7 @@ #include <linux/seq_file.h> #include <linux/init.h> #include <linux/hdreg.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/genhd.h> @@ -68,6 +68,7 @@ MODULE_LICENSE("GPL"); #define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */ +static DEFINE_MUTEX(cpqarray_mutex); static int nr_ctlr; static ctlr_info_t *hba[MAX_CTLR]; @@ -845,9 +846,9 @@ static int ida_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; - lock_kernel(); + mutex_lock(&cpqarray_mutex); ret = ida_open(bdev, mode); - unlock_kernel(); + mutex_unlock(&cpqarray_mutex); return ret; } @@ -859,10 +860,10 @@ static int ida_release(struct gendisk *disk, fmode_t mode) { ctlr_info_t *host; - lock_kernel(); + mutex_lock(&cpqarray_mutex); host = get_host(disk); host->usage_count--; - unlock_kernel(); + mutex_unlock(&cpqarray_mutex); return 0; } @@ -1217,9 +1218,9 @@ static int ida_ioctl(struct block_device *bdev, fmode_t mode, { int ret; - lock_kernel(); + mutex_lock(&cpqarray_mutex); ret = ida_locked_ioctl(bdev, mode, cmd, param); - unlock_kernel(); + mutex_unlock(&cpqarray_mutex); return ret; } diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 9400845d602..ac04ef97eac 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -965,29 +965,30 @@ void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, int size, * ok, (capacity & 7) != 0 sometimes, but who cares... * we count rs_{total,left} in bits, not sectors. */ - spin_lock_irqsave(&mdev->al_lock, flags); count = drbd_bm_clear_bits(mdev, sbnr, ebnr); - if (count) { - /* we need the lock for drbd_try_clear_on_disk_bm */ - if (jiffies - mdev->rs_mark_time > HZ*10) { - /* should be rolling marks, - * but we estimate only anyways. */ - if (mdev->rs_mark_left != drbd_bm_total_weight(mdev) && + if (count && get_ldev(mdev)) { + unsigned long now = jiffies; + unsigned long last = mdev->rs_mark_time[mdev->rs_last_mark]; + int next = (mdev->rs_last_mark + 1) % DRBD_SYNC_MARKS; + if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { + unsigned long tw = drbd_bm_total_weight(mdev); + if (mdev->rs_mark_left[mdev->rs_last_mark] != tw && mdev->state.conn != C_PAUSED_SYNC_T && mdev->state.conn != C_PAUSED_SYNC_S) { - mdev->rs_mark_time = jiffies; - mdev->rs_mark_left = drbd_bm_total_weight(mdev); + mdev->rs_mark_time[next] = now; + mdev->rs_mark_left[next] = tw; + mdev->rs_last_mark = next; } } - if (get_ldev(mdev)) { - drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); - put_ldev(mdev); - } + spin_lock_irqsave(&mdev->al_lock, flags); + drbd_try_clear_on_disk_bm(mdev, sector, count, TRUE); + spin_unlock_irqrestore(&mdev->al_lock, flags); + /* just wake_up unconditional now, various lc_chaged(), * lc_put() in drbd_try_clear_on_disk_bm(). */ wake_up = 1; + put_ldev(mdev); } - spin_unlock_irqrestore(&mdev->al_lock, flags); if (wake_up) wake_up(&mdev->al_wait); } @@ -1118,7 +1119,7 @@ static int _is_in_al(struct drbd_conf *mdev, unsigned int enr) * @mdev: DRBD device. * @sector: The sector number. * - * This functions sleeps on al_wait. Returns 1 on success, 0 if interrupted. + * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted. */ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) { @@ -1129,10 +1130,10 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) sig = wait_event_interruptible(mdev->al_wait, (bm_ext = _bme_get(mdev, enr))); if (sig) - return 0; + return -EINTR; if (test_bit(BME_LOCKED, &bm_ext->flags)) - return 1; + return 0; for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { sig = wait_event_interruptible(mdev->al_wait, @@ -1145,13 +1146,11 @@ int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector) wake_up(&mdev->al_wait); } spin_unlock_irq(&mdev->al_lock); - return 0; + return -EINTR; } } - set_bit(BME_LOCKED, &bm_ext->flags); - - return 1; + return 0; } /** diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index e3f88d6e141..fd42832f785 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -569,7 +569,7 @@ int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) * * maybe bm_set should be atomic_t ? */ -static unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev) +unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev) { struct drbd_bitmap *b = mdev->bitmap; unsigned long s; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 352441b0f92..9bdcf4393c0 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -337,13 +337,25 @@ static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) * NOTE that the payload starts at a long aligned offset, * regardless of 32 or 64 bit arch! */ -struct p_header { +struct p_header80 { u32 magic; u16 command; u16 length; /* bytes of data after this header */ u8 payload[0]; } __packed; -/* 8 bytes. packet FIXED for the next century! */ + +/* Header for big packets, Used for data packets exceeding 64kB */ +struct p_header95 { + u16 magic; /* use DRBD_MAGIC_BIG here */ + u16 command; + u32 length; /* Use only 24 bits of that. Ignore the highest 8 bit. */ + u8 payload[0]; +} __packed; + +union p_header { + struct p_header80 h80; + struct p_header95 h95; +}; /* * short commands, packets without payload, plain p_header: @@ -362,12 +374,16 @@ struct p_header { */ /* these defines must not be changed without changing the protocol version */ -#define DP_HARDBARRIER 1 -#define DP_RW_SYNC 2 +#define DP_HARDBARRIER 1 /* depricated */ +#define DP_RW_SYNC 2 /* equals REQ_SYNC */ #define DP_MAY_SET_IN_SYNC 4 +#define DP_UNPLUG 8 /* equals REQ_UNPLUG */ +#define DP_FUA 16 /* equals REQ_FUA */ +#define DP_FLUSH 32 /* equals REQ_FLUSH */ +#define DP_DISCARD 64 /* equals REQ_DISCARD */ struct p_data { - struct p_header head; + union p_header head; u64 sector; /* 64 bits sector number */ u64 block_id; /* to identify the request in protocol B&C */ u32 seq_num; @@ -383,7 +399,7 @@ struct p_data { * P_DATA_REQUEST, P_RS_DATA_REQUEST */ struct p_block_ack { - struct p_header head; + struct p_header80 head; u64 sector; u64 block_id; u32 blksize; @@ -392,7 +408,7 @@ struct p_block_ack { struct p_block_req { - struct p_header head; + struct p_header80 head; u64 sector; u64 block_id; u32 blksize; @@ -409,7 +425,7 @@ struct p_block_req { */ struct p_handshake { - struct p_header head; /* 8 bytes */ + struct p_header80 head; /* 8 bytes */ u32 protocol_min; u32 feature_flags; u32 protocol_max; @@ -424,19 +440,19 @@ struct p_handshake { /* 80 bytes, FIXED for the next century */ struct p_barrier { - struct p_header head; + struct p_header80 head; u32 barrier; /* barrier number _handle_ only */ u32 pad; /* to multiple of 8 Byte */ } __packed; struct p_barrier_ack { - struct p_header head; + struct p_header80 head; u32 barrier; u32 set_size; } __packed; struct p_rs_param { - struct p_header head; + struct p_header80 head; u32 rate; /* Since protocol version 88 and higher. */ @@ -444,20 +460,31 @@ struct p_rs_param { } __packed; struct p_rs_param_89 { - struct p_header head; + struct p_header80 head; u32 rate; /* protocol version 89: */ char verify_alg[SHARED_SECRET_MAX]; char csums_alg[SHARED_SECRET_MAX]; } __packed; +struct p_rs_param_95 { + struct p_header80 head; + u32 rate; + char verify_alg[SHARED_SECRET_MAX]; + char csums_alg[SHARED_SECRET_MAX]; + u32 c_plan_ahead; + u32 c_delay_target; + u32 c_fill_target; + u32 c_max_rate; +} __packed; + enum drbd_conn_flags { CF_WANT_LOSE = 1, CF_DRY_RUN = 2, }; struct p_protocol { - struct p_header head; + struct p_header80 head; u32 protocol; u32 after_sb_0p; u32 after_sb_1p; @@ -471,17 +498,17 @@ struct p_protocol { } __packed; struct p_uuids { - struct p_header head; + struct p_header80 head; u64 uuid[UI_EXTENDED_SIZE]; } __packed; struct p_rs_uuid { - struct p_header head; + struct p_header80 head; u64 uuid; } __packed; struct p_sizes { - struct p_header head; + struct p_header80 head; u64 d_size; /* size of disk */ u64 u_size; /* user requested size */ u64 c_size; /* current exported size */ @@ -491,18 +518,18 @@ struct p_sizes { } __packed; struct p_state { - struct p_header head; + struct p_header80 head; u32 state; } __packed; struct p_req_state { - struct p_header head; + struct p_header80 head; u32 mask; u32 val; } __packed; struct p_req_state_reply { - struct p_header head; + struct p_header80 head; u32 retcode; } __packed; @@ -517,7 +544,7 @@ struct p_drbd06_param { } __packed; struct p_discard { - struct p_header head; + struct p_header80 head; u64 block_id; u32 seq_num; u32 pad; @@ -533,7 +560,7 @@ enum drbd_bitmap_code { }; struct p_compressed_bm { - struct p_header head; + struct p_header80 head; /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code * (encoding & 0x80): polarity (set/unset) of first runlength * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits @@ -544,10 +571,10 @@ struct p_compressed_bm { u8 code[0]; } __packed; -struct p_delay_probe { - struct p_header head; - u32 seq_num; /* sequence number to match the two probe packets */ - u32 offset; /* usecs the probe got sent after the reference time point */ +struct p_delay_probe93 { + struct p_header80 head; + u32 seq_num; /* sequence number to match the two probe packets */ + u32 offset; /* usecs the probe got sent after the reference time point */ } __packed; /* DCBP: Drbd Compressed Bitmap Packet ... */ @@ -594,7 +621,7 @@ DCBP_set_pad_bits(struct p_compressed_bm *p, int n) * so we need to use the fixed size 4KiB page size * most architechtures have used for a long time. */ -#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header)) +#define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header80)) #define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long)) #define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm)) #if (PAGE_SIZE < 4096) @@ -603,13 +630,14 @@ DCBP_set_pad_bits(struct p_compressed_bm *p, int n) #endif union p_polymorph { - struct p_header header; + union p_header header; struct p_handshake handshake; struct p_data data; struct p_block_ack block_ack; struct p_barrier barrier; struct p_barrier_ack barrier_ack; struct p_rs_param_89 rs_param_89; + struct p_rs_param_95 rs_param_95; struct p_protocol protocol; struct p_sizes sizes; struct p_uuids uuids; @@ -617,6 +645,8 @@ union p_polymorph { struct p_req_state req_state; struct p_req_state_reply req_state_reply; struct p_block_req block_req; + struct p_delay_probe93 delay_probe93; + struct p_rs_uuid rs_uuid; } __packed; /**********************************************************************/ @@ -697,7 +727,7 @@ struct drbd_tl_epoch { struct list_head requests; /* requests before */ struct drbd_tl_epoch *next; /* pointer to the next barrier */ unsigned int br_number; /* the barriers identifier. */ - int n_req; /* number of requests attached before this barrier */ + int n_writes; /* number of requests attached before this barrier */ }; struct drbd_request; @@ -747,7 +777,7 @@ struct digest_info { struct drbd_epoch_entry { struct drbd_work w; struct hlist_node colision; - struct drbd_epoch *epoch; + struct drbd_epoch *epoch; /* for writes */ struct drbd_conf *mdev; struct page *pages; atomic_t pending_bios; @@ -755,7 +785,10 @@ struct drbd_epoch_entry { /* see comments on ee flag bits below */ unsigned long flags; sector_t sector; - u64 block_id; + union { + u64 block_id; + struct digest_info *digest; + }; }; /* ee flag bits. @@ -781,12 +814,16 @@ enum { * if any of those fail, we set this flag atomically * from the endio callback */ __EE_WAS_ERROR, + + /* This ee has a pointer to a digest instead of a block id */ + __EE_HAS_DIGEST, }; #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) #define EE_IS_BARRIER (1<<__EE_IS_BARRIER) #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) +#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) /* global flag bits */ enum { @@ -794,7 +831,6 @@ enum { SIGNAL_ASENDER, /* whether asender wants to be interrupted */ SEND_PING, /* whether asender should send a ping asap */ - STOP_SYNC_TIMER, /* tell timer to cancel itself */ UNPLUG_QUEUED, /* only relevant with kernel 2.4 */ UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ MD_DIRTY, /* current uuids and flags not yet on disk */ @@ -816,6 +852,7 @@ enum { BITMAP_IO, /* suspend application io; once no more io in flight, start bitmap io */ BITMAP_IO_QUEUED, /* Started bitmap IO */ + GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */ RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ NET_CONGESTED, /* The data socket is congested */ @@ -829,6 +866,8 @@ enum { * the peer, if it changed there as well. */ CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ + NEW_CUR_UUID, /* Create new current UUID when thawing IO */ + AL_SUSPENDED, /* Activity logging is currently suspended. */ }; struct drbd_bitmap; /* opaque for drbd_conf */ @@ -838,10 +877,6 @@ struct drbd_bitmap; /* opaque for drbd_conf */ /* THINK maybe we actually want to use the default "event/%s" worker threads * or similar in linux 2.6, which uses per cpu data and threads. - * - * To be general, this might need a spin_lock member. - * For now, please use the mdev->req_lock to protect list_head, - * see drbd_queue_work below. */ struct drbd_work_queue { struct list_head q; @@ -915,6 +950,12 @@ enum write_ordering_e { WO_bio_barrier }; +struct fifo_buffer { + int *values; + unsigned int head_index; + unsigned int size; +}; + struct drbd_conf { /* things that are stored as / read from meta data on disk */ unsigned long flags; @@ -936,9 +977,16 @@ struct drbd_conf { unsigned int ko_count; struct drbd_work resync_work, unplug_work, + go_diskless, md_sync_work; struct timer_list resync_timer; struct timer_list md_sync_timer; +#ifdef DRBD_DEBUG_MD_SYNC + struct { + unsigned int line; + const char* func; + } last_md_mark_dirty; +#endif /* Used after attach while negotiating new disk state. */ union drbd_state new_state_tmp; @@ -946,6 +994,7 @@ struct drbd_conf { union drbd_state state; wait_queue_head_t misc_wait; wait_queue_head_t state_wait; /* upon each state change. */ + wait_queue_head_t net_cnt_wait; unsigned int send_cnt; unsigned int recv_cnt; unsigned int read_cnt; @@ -974,12 +1023,16 @@ struct drbd_conf { unsigned long rs_start; /* cumulated time in PausedSyncX state [unit jiffies] */ unsigned long rs_paused; + /* skipped because csum was equal [unit BM_BLOCK_SIZE] */ + unsigned long rs_same_csum; +#define DRBD_SYNC_MARKS 8 +#define DRBD_SYNC_MARK_STEP (3*HZ) /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */ - unsigned long rs_mark_left; + unsigned long rs_mark_left[DRBD_SYNC_MARKS]; /* marks's time [unit jiffies] */ - unsigned long rs_mark_time; - /* skipped because csum was equeal [unit BM_BLOCK_SIZE] */ - unsigned long rs_same_csum; + unsigned long rs_mark_time[DRBD_SYNC_MARKS]; + /* current index into rs_mark_{left,time} */ + int rs_last_mark; /* where does the admin want us to start? (sector) */ sector_t ov_start_sector; @@ -1012,10 +1065,10 @@ struct drbd_conf { spinlock_t epoch_lock; unsigned int epochs; enum write_ordering_e write_ordering; - struct list_head active_ee; /* IO in progress */ - struct list_head sync_ee; /* IO in progress */ + struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */ + struct list_head sync_ee; /* IO in progress (P_RS_DATA_REPLY gets written to disk) */ struct list_head done_ee; /* send ack */ - struct list_head read_ee; /* IO in progress */ + struct list_head read_ee; /* IO in progress (any read) */ struct list_head net_ee; /* zero-copy network send in progress */ struct hlist_head *ee_hash; /* is proteced by req_lock! */ unsigned int ee_hash_s; @@ -1026,7 +1079,8 @@ struct drbd_conf { int next_barrier_nr; struct hlist_head *app_reads_hash; /* is proteced by req_lock */ struct list_head resync_reads; - atomic_t pp_in_use; + atomic_t pp_in_use; /* allocated from page pool */ + atomic_t pp_in_use_by_net; /* sendpage()d, still referenced by tcp */ wait_queue_head_t ee_wait; struct page *md_io_page; /* one page buffer for md_io */ struct page *md_io_tmpp; /* for logical_block_size != 512 */ @@ -1054,6 +1108,15 @@ struct drbd_conf { u64 ed_uuid; /* UUID of the exposed data */ struct mutex state_mutex; char congestion_reason; /* Why we where congested... */ + atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ + atomic_t rs_sect_ev; /* for submitted resync data rate, both */ + int rs_last_sect_ev; /* counter to compare with */ + int rs_last_events; /* counter of read or write "events" (unit sectors) + * on the lower level device when we last looked. */ + int c_sync_rate; /* current resync rate after syncer throttle magic */ + struct fifo_buffer rs_plan_s; /* correction values of resync planer */ + int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ + int rs_planed; /* resync sectors already planed */ }; static inline struct drbd_conf *minor_to_mdev(unsigned int minor) @@ -1138,6 +1201,8 @@ extern void drbd_free_resources(struct drbd_conf *mdev); extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, unsigned int set_size); extern void tl_clear(struct drbd_conf *mdev); +enum drbd_req_event; +extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what); extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *); extern void drbd_free_sock(struct drbd_conf *mdev); extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, @@ -1150,12 +1215,12 @@ extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_f extern int _drbd_send_state(struct drbd_conf *mdev); extern int drbd_send_state(struct drbd_conf *mdev); extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, - enum drbd_packets cmd, struct p_header *h, + enum drbd_packets cmd, struct p_header80 *h, size_t size, unsigned msg_flags); #define USE_DATA_SOCKET 1 #define USE_META_SOCKET 0 extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, - enum drbd_packets cmd, struct p_header *h, + enum drbd_packets cmd, struct p_header80 *h, size_t size); extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data, size_t size); @@ -1167,7 +1232,7 @@ extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd, struct p_block_req *rp); extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, - struct p_data *dp); + struct p_data *dp, int data_size); extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, sector_t sector, int blksize, u64 block_id); extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, @@ -1201,7 +1266,13 @@ extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local); extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local); extern int drbd_md_test_flag(struct drbd_backing_dev *, int); +#ifndef DRBD_DEBUG_MD_SYNC extern void drbd_md_mark_dirty(struct drbd_conf *mdev); +#else +#define drbd_md_mark_dirty(m) drbd_md_mark_dirty_(m, __LINE__ , __func__ ) +extern void drbd_md_mark_dirty_(struct drbd_conf *mdev, + unsigned int line, const char *func); +#endif extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), void (*done)(struct drbd_conf *, int), @@ -1209,6 +1280,7 @@ extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); +extern void drbd_go_diskless(struct drbd_conf *mdev); /* Meta data layout @@ -1264,6 +1336,8 @@ struct bm_extent { * Bit 1 ==> local node thinks this block needs to be synced. */ +#define SLEEP_TIME (HZ/10) + #define BM_BLOCK_SHIFT 12 /* 4k per bit */ #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT) /* (9+3) : 512 bytes @ 8 bits; representing 16M storage @@ -1335,11 +1409,13 @@ struct bm_extent { #endif /* Sector shift value for the "hash" functions of tl_hash and ee_hash tables. - * With a value of 6 all IO in one 32K block make it to the same slot of the + * With a value of 8 all IO in one 128K block make it to the same slot of the * hash table. */ -#define HT_SHIFT 6 +#define HT_SHIFT 8 #define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT)) +#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ + /* Number of elements in the app_reads_hash */ #define APP_R_HSIZE 15 @@ -1369,6 +1445,7 @@ extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_ /* bm_find_next variants for use while you hold drbd_bm_lock() */ extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo); +extern unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev); extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev); extern int drbd_bm_rs_done(struct drbd_conf *mdev); /* for receive_bitmap */ @@ -1421,7 +1498,8 @@ extern void resync_after_online_grow(struct drbd_conf *); extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force); -enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); +extern enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); +extern void drbd_try_outdate_peer_async(struct drbd_conf *mdev); extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); /* drbd_worker.c */ @@ -1467,10 +1545,12 @@ extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); +extern int w_restart_disk_io(struct drbd_conf *, struct drbd_work *, int); extern void resync_timer_fn(unsigned long data); /* drbd_receiver.c */ +extern int drbd_rs_should_slow_down(struct drbd_conf *mdev); extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, const unsigned rw, const int fault_type); extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); @@ -1479,7 +1559,10 @@ extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, sector_t sector, unsigned int data_size, gfp_t gfp_mask) __must_hold(local); -extern void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e); +extern void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, + int is_net); +#define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0) +#define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1) extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head); extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, @@ -1487,6 +1570,7 @@ extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled); extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed); extern void drbd_flush_workqueue(struct drbd_conf *mdev); +extern void drbd_free_tl_hash(struct drbd_conf *mdev); /* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to * mess with get_fs/set_fs, we know we are KERNEL_DS always. */ @@ -1600,6 +1684,8 @@ void drbd_bcast_ee(struct drbd_conf *mdev, #define susp_MASK 1 #define user_isp_MASK 1 #define aftr_isp_MASK 1 +#define susp_nod_MASK 1 +#define susp_fen_MASK 1 #define NS(T, S) \ ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \ @@ -1856,13 +1942,6 @@ static inline sector_t drbd_md_ss__(struct drbd_conf *mdev, } static inline void -_drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) -{ - list_add_tail(&w->list, &q->q); - up(&q->s); -} - -static inline void drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w) { unsigned long flags; @@ -1899,19 +1978,19 @@ static inline void request_ping(struct drbd_conf *mdev) static inline int drbd_send_short_cmd(struct drbd_conf *mdev, enum drbd_packets cmd) { - struct p_header h; + struct p_header80 h; return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h)); } static inline int drbd_send_ping(struct drbd_conf *mdev) { - struct p_header h; + struct p_header80 h; return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h)); } static inline int drbd_send_ping_ack(struct drbd_conf *mdev) { - struct p_header h; + struct p_header80 h; return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h)); } @@ -2013,7 +2092,7 @@ static inline void inc_unacked(struct drbd_conf *mdev) static inline void put_net_conf(struct drbd_conf *mdev) { if (atomic_dec_and_test(&mdev->net_cnt)) - wake_up(&mdev->misc_wait); + wake_up(&mdev->net_cnt_wait); } /** @@ -2044,10 +2123,14 @@ static inline int get_net_conf(struct drbd_conf *mdev) static inline void put_ldev(struct drbd_conf *mdev) { + int i = atomic_dec_return(&mdev->local_cnt); __release(local); - if (atomic_dec_and_test(&mdev->local_cnt)) + D_ASSERT(i >= 0); + if (i == 0) { + if (mdev->state.disk == D_FAILED) + drbd_go_diskless(mdev); wake_up(&mdev->misc_wait); - D_ASSERT(atomic_read(&mdev->local_cnt) >= 0); + } } #ifndef __CHECKER__ @@ -2179,11 +2262,16 @@ static inline int drbd_state_is_stable(union drbd_state s) return 1; } +static inline int is_susp(union drbd_state s) +{ + return s.susp || s.susp_nod || s.susp_fen; +} + static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) { int mxb = drbd_get_max_buffers(mdev); - if (mdev->state.susp) + if (is_susp(mdev->state)) return 0; if (test_bit(SUSPEND_IO, &mdev->flags)) return 0; @@ -2321,8 +2409,7 @@ static inline void drbd_md_flush(struct drbd_conf *mdev) if (test_bit(MD_NO_BARRIER, &mdev->flags)) return; - r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL, - BLKDEV_IFL_WAIT); + r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); if (r) { set_bit(MD_NO_BARRIER, &mdev->flags); dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index fa650dd85b9..c5dfe6486cf 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -32,7 +32,7 @@ #include <asm/types.h> #include <net/sock.h> #include <linux/ctype.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/proc_fs.h> @@ -64,6 +64,7 @@ struct after_state_chg_work { struct completion *done; }; +static DEFINE_MUTEX(drbd_main_mutex); int drbdd_init(struct drbd_thread *); int drbd_worker(struct drbd_thread *); int drbd_asender(struct drbd_thread *); @@ -77,6 +78,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused); static void md_sync_timer_fn(unsigned long data); static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused); +static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused); MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " "Lars Ellenberg <lars@linbit.com>"); @@ -199,7 +201,7 @@ static int tl_init(struct drbd_conf *mdev) INIT_LIST_HEAD(&b->w.list); b->next = NULL; b->br_number = 4711; - b->n_req = 0; + b->n_writes = 0; b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ mdev->oldest_tle = b; @@ -240,7 +242,7 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new) INIT_LIST_HEAD(&new->w.list); new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */ new->next = NULL; - new->n_req = 0; + new->n_writes = 0; newest_before = mdev->newest_tle; /* never send a barrier number == 0, because that is special-cased @@ -284,9 +286,9 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, barrier_nr, b->br_number); goto bail; } - if (b->n_req != set_size) { - dev_err(DEV, "BAD! BarrierAck #%u received with n_req=%u, expected n_req=%u!\n", - barrier_nr, set_size, b->n_req); + if (b->n_writes != set_size) { + dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n", + barrier_nr, set_size, b->n_writes); goto bail; } @@ -333,6 +335,82 @@ bail: drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); } +/** + * _tl_restart() - Walks the transfer log, and applies an action to all requests + * @mdev: DRBD device. + * @what: The action/event to perform with all request objects + * + * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io, + * restart_frozen_disk_io. + */ +static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) +{ + struct drbd_tl_epoch *b, *tmp, **pn; + struct list_head *le, *tle, carry_reads; + struct drbd_request *req; + int rv, n_writes, n_reads; + + b = mdev->oldest_tle; + pn = &mdev->oldest_tle; + while (b) { + n_writes = 0; + n_reads = 0; + INIT_LIST_HEAD(&carry_reads); + list_for_each_safe(le, tle, &b->requests) { + req = list_entry(le, struct drbd_request, tl_requests); + rv = _req_mod(req, what); + + n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT; + n_reads += (rv & MR_READ) >> MR_READ_SHIFT; + } + tmp = b->next; + + if (n_writes) { + if (what == resend) { + b->n_writes = n_writes; + if (b->w.cb == NULL) { + b->w.cb = w_send_barrier; + inc_ap_pending(mdev); + set_bit(CREATE_BARRIER, &mdev->flags); + } + + drbd_queue_work(&mdev->data.work, &b->w); + } + pn = &b->next; + } else { + if (n_reads) + list_add(&carry_reads, &b->requests); + /* there could still be requests on that ring list, + * in case local io is still pending */ + list_del(&b->requests); + + /* dec_ap_pending corresponding to queue_barrier. + * the newest barrier may not have been queued yet, + * in which case w.cb is still NULL. */ + if (b->w.cb != NULL) + dec_ap_pending(mdev); + + if (b == mdev->newest_tle) { + /* recycle, but reinit! */ + D_ASSERT(tmp == NULL); + INIT_LIST_HEAD(&b->requests); + list_splice(&carry_reads, &b->requests); + INIT_LIST_HEAD(&b->w.list); + b->w.cb = NULL; + b->br_number = net_random(); + b->n_writes = 0; + + *pn = b; + break; + } + *pn = tmp; + kfree(b); + } + b = tmp; + list_splice(&carry_reads, &b->requests); + } +} + /** * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL @@ -344,48 +422,12 @@ bail: */ void tl_clear(struct drbd_conf *mdev) { - struct drbd_tl_epoch *b, *tmp; struct list_head *le, *tle; struct drbd_request *r; - int new_initial_bnr = net_random(); spin_lock_irq(&mdev->req_lock); - b = mdev->oldest_tle; - while (b) { - list_for_each_safe(le, tle, &b->requests) { - r = list_entry(le, struct drbd_request, tl_requests); - /* It would be nice to complete outside of spinlock. - * But this is easier for now. */ - _req_mod(r, connection_lost_while_pending); - } - tmp = b->next; - - /* there could still be requests on that ring list, - * in case local io is still pending */ - list_del(&b->requests); - - /* dec_ap_pending corresponding to queue_barrier. - * the newest barrier may not have been queued yet, - * in which case w.cb is still NULL. */ - if (b->w.cb != NULL) - dec_ap_pending(mdev); - - if (b == mdev->newest_tle) { - /* recycle, but reinit! */ - D_ASSERT(tmp == NULL); - INIT_LIST_HEAD(&b->requests); - INIT_LIST_HEAD(&b->w.list); - b->w.cb = NULL; - b->br_number = new_initial_bnr; - b->n_req = 0; - - mdev->oldest_tle = b; - break; - } - kfree(b); - b = tmp; - } + _tl_restart(mdev, connection_lost_while_pending); /* we expect this list to be empty. */ D_ASSERT(list_empty(&mdev->out_of_sequence_requests)); @@ -401,6 +443,15 @@ void tl_clear(struct drbd_conf *mdev) /* ensure bit indicating barrier is required is clear */ clear_bit(CREATE_BARRIER, &mdev->flags); + memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); + + spin_unlock_irq(&mdev->req_lock); +} + +void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) +{ + spin_lock_irq(&mdev->req_lock); + _tl_restart(mdev, what); spin_unlock_irq(&mdev->req_lock); } @@ -455,7 +506,7 @@ static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns); static int is_valid_state_transition(struct drbd_conf *, union drbd_state, union drbd_state); static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, - union drbd_state ns, int *warn_sync_abort); + union drbd_state ns, const char **warn_sync_abort); int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state); @@ -605,7 +656,7 @@ static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns) drbd_role_str(ns.peer), drbd_disk_str(ns.disk), drbd_disk_str(ns.pdsk), - ns.susp ? 's' : 'r', + is_susp(ns) ? 's' : 'r', ns.aftr_isp ? 'a' : '-', ns.peer_isp ? 'p' : '-', ns.user_isp ? 'u' : '-' @@ -763,7 +814,7 @@ static int is_valid_state_transition(struct drbd_conf *mdev, * to D_UNKNOWN. This rule and many more along those lines are in this function. */ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os, - union drbd_state ns, int *warn_sync_abort) + union drbd_state ns, const char **warn_sync_abort) { enum drbd_fencing_p fp; @@ -778,9 +829,10 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os.conn <= C_DISCONNECTING) ns.conn = os.conn; - /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow */ + /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow. + * If you try to go into some Sync* state, that shall fail (elsewhere). */ if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN && - ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING) + ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN) ns.conn = os.conn; /* After C_DISCONNECTING only C_STANDALONE may follow */ @@ -798,14 +850,13 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY) ns.aftr_isp = 0; - if (ns.conn <= C_DISCONNECTING && ns.disk == D_DISKLESS) - ns.pdsk = D_UNKNOWN; - /* Abort resync if a disk fails/detaches */ if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) { if (warn_sync_abort) - *warn_sync_abort = 1; + *warn_sync_abort = + os.conn == C_VERIFY_S || os.conn == C_VERIFY_T ? + "Online-verify" : "Resync"; ns.conn = C_CONNECTED; } @@ -876,7 +927,12 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state if (fp == FP_STONITH && (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) && !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)) - ns.susp = 1; + ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */ + + if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO && + (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) && + !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE)) + ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */ if (ns.aftr_isp || ns.peer_isp || ns.user_isp) { if (ns.conn == C_SYNC_SOURCE) @@ -912,6 +968,12 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs) } } +static void drbd_resume_al(struct drbd_conf *mdev) +{ + if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags)) + dev_info(DEV, "Resumed AL updates\n"); +} + /** * __drbd_set_state() - Set a new DRBD state * @mdev: DRBD device. @@ -927,7 +989,7 @@ int __drbd_set_state(struct drbd_conf *mdev, { union drbd_state os; int rv = SS_SUCCESS; - int warn_sync_abort = 0; + const char *warn_sync_abort = NULL; struct after_state_chg_work *ascw; os = mdev->state; @@ -946,14 +1008,8 @@ int __drbd_set_state(struct drbd_conf *mdev, /* If the old state was illegal as well, then let this happen...*/ - if (is_valid_state(mdev, os) == rv) { - dev_err(DEV, "Considering state change from bad state. " - "Error would be: '%s'\n", - drbd_set_st_err_str(rv)); - print_st(mdev, "old", os); - print_st(mdev, "new", ns); + if (is_valid_state(mdev, os) == rv) rv = is_valid_state_transition(mdev, ns, os); - } } else rv = is_valid_state_transition(mdev, ns, os); } @@ -965,7 +1021,7 @@ int __drbd_set_state(struct drbd_conf *mdev, } if (warn_sync_abort) - dev_warn(DEV, "Resync aborted.\n"); + dev_warn(DEV, "%s aborted.\n", warn_sync_abort); { char *pbp, pb[300]; @@ -976,7 +1032,10 @@ int __drbd_set_state(struct drbd_conf *mdev, PSC(conn); PSC(disk); PSC(pdsk); - PSC(susp); + if (is_susp(ns) != is_susp(os)) + pbp += sprintf(pbp, "susp( %s -> %s ) ", + drbd_susp_str(is_susp(os)), + drbd_susp_str(is_susp(ns))); PSC(aftr_isp); PSC(peer_isp); PSC(user_isp); @@ -1001,12 +1060,6 @@ int __drbd_set_state(struct drbd_conf *mdev, wake_up(&mdev->misc_wait); wake_up(&mdev->state_wait); - /* post-state-change actions */ - if (os.conn >= C_SYNC_SOURCE && ns.conn <= C_CONNECTED) { - set_bit(STOP_SYNC_TIMER, &mdev->flags); - mod_timer(&mdev->resync_timer, jiffies); - } - /* aborted verify run. log the last position */ if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) && ns.conn < C_CONNECTED) { @@ -1019,41 +1072,42 @@ int __drbd_set_state(struct drbd_conf *mdev, if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) && (ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)) { dev_info(DEV, "Syncer continues.\n"); - mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time; - if (ns.conn == C_SYNC_TARGET) { - if (!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags)) - mod_timer(&mdev->resync_timer, jiffies); - /* This if (!test_bit) is only needed for the case - that a device that has ceased to used its timer, - i.e. it is already in drbd_resync_finished() gets - paused and resumed. */ - } + mdev->rs_paused += (long)jiffies + -(long)mdev->rs_mark_time[mdev->rs_last_mark]; + if (ns.conn == C_SYNC_TARGET) + mod_timer(&mdev->resync_timer, jiffies); } if ((os.conn == C_SYNC_TARGET || os.conn == C_SYNC_SOURCE) && (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) { dev_info(DEV, "Resync suspended\n"); - mdev->rs_mark_time = jiffies; - if (ns.conn == C_PAUSED_SYNC_T) - set_bit(STOP_SYNC_TIMER, &mdev->flags); + mdev->rs_mark_time[mdev->rs_last_mark] = jiffies; } if (os.conn == C_CONNECTED && (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) { + unsigned long now = jiffies; + int i; + mdev->ov_position = 0; - mdev->rs_total = - mdev->rs_mark_left = drbd_bm_bits(mdev); + mdev->rs_total = drbd_bm_bits(mdev); if (mdev->agreed_pro_version >= 90) set_ov_position(mdev, ns.conn); else mdev->ov_start_sector = 0; mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(mdev->ov_position); - mdev->rs_start = - mdev->rs_mark_time = jiffies; + mdev->rs_start = now; + mdev->rs_last_events = 0; + mdev->rs_last_sect_ev = 0; mdev->ov_last_oos_size = 0; mdev->ov_last_oos_start = 0; + for (i = 0; i < DRBD_SYNC_MARKS; i++) { + mdev->rs_mark_left[i] = mdev->rs_total; + mdev->rs_mark_time[i] = now; + } + if (ns.conn == C_VERIFY_S) { dev_info(DEV, "Starting Online Verify from sector %llu\n", (unsigned long long)mdev->ov_position); @@ -1106,6 +1160,10 @@ int __drbd_set_state(struct drbd_conf *mdev, ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) drbd_thread_restart_nowait(&mdev->receiver); + /* Resume AL writing if we get a connection */ + if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) + drbd_resume_al(mdev); + ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC); if (ascw) { ascw->os = os; @@ -1164,6 +1222,8 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, union drbd_state ns, enum chg_state_flags flags) { enum drbd_fencing_p fp; + enum drbd_req_event what = nothing; + union drbd_state nsm = (union drbd_state){ .i = -1 }; if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) { clear_bit(CRASHED_PRIMARY, &mdev->flags); @@ -1187,17 +1247,49 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, /* Here we have the actions that are performed after a state change. This function might sleep */ - if (fp == FP_STONITH && ns.susp) { - /* case1: The outdate peer handler is successful: - * case2: The connection was established again: */ - if ((os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) || - (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)) { + nsm.i = -1; + if (ns.susp_nod) { + if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { + if (ns.conn == C_CONNECTED) + what = resend, nsm.susp_nod = 0; + else /* ns.conn > C_CONNECTED */ + dev_err(DEV, "Unexpected Resynd going on!\n"); + } + + if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) + what = restart_frozen_disk_io, nsm.susp_nod = 0; + + } + + if (ns.susp_fen) { + /* case1: The outdate peer handler is successful: */ + if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) { tl_clear(mdev); + if (test_bit(NEW_CUR_UUID, &mdev->flags)) { + drbd_uuid_new_current(mdev); + clear_bit(NEW_CUR_UUID, &mdev->flags); + drbd_md_sync(mdev); + } spin_lock_irq(&mdev->req_lock); - _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL); + _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); spin_unlock_irq(&mdev->req_lock); } + /* case2: The connection was established again: */ + if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { + clear_bit(NEW_CUR_UUID, &mdev->flags); + what = resend; + nsm.susp_fen = 0; + } + } + + if (what != nothing) { + spin_lock_irq(&mdev->req_lock); + _tl_restart(mdev, what); + nsm.i &= mdev->state.i; + _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL); + spin_unlock_irq(&mdev->req_lock); } + /* Do not change the order of the if above and the two below... */ if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ drbd_send_uuids(mdev); @@ -1216,16 +1308,22 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (get_ldev(mdev)) { if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) && mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) { - drbd_uuid_new_current(mdev); - drbd_send_uuids(mdev); + if (is_susp(mdev->state)) { + set_bit(NEW_CUR_UUID, &mdev->flags); + } else { + drbd_uuid_new_current(mdev); + drbd_send_uuids(mdev); + } } put_ldev(mdev); } } if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) { - if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) + if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) { drbd_uuid_new_current(mdev); + drbd_send_uuids(mdev); + } /* D_DISKLESS Peer becomes secondary */ if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY) @@ -1267,42 +1365,51 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); + /* first half of local IO error */ if (os.disk > D_FAILED && ns.disk == D_FAILED) { - enum drbd_io_error_p eh; + enum drbd_io_error_p eh = EP_PASS_ON; + + if (drbd_send_state(mdev)) + dev_warn(DEV, "Notified peer that my disk is broken.\n"); + else + dev_err(DEV, "Sending state for drbd_io_error() failed\n"); + + drbd_rs_cancel_all(mdev); - eh = EP_PASS_ON; if (get_ldev_if_state(mdev, D_FAILED)) { eh = mdev->ldev->dc.on_io_error; put_ldev(mdev); } + if (eh == EP_CALL_HELPER) + drbd_khelper(mdev, "local-io-error"); + } - drbd_rs_cancel_all(mdev); - /* since get_ldev() only works as long as disk>=D_INCONSISTENT, - and it is D_DISKLESS here, local_cnt can only go down, it can - not increase... It will reach zero */ - wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); + + /* second half of local IO error handling, + * after local_cnt references have reached zero: */ + if (os.disk == D_FAILED && ns.disk == D_DISKLESS) { mdev->rs_total = 0; mdev->rs_failed = 0; atomic_set(&mdev->rs_pending_cnt, 0); - - spin_lock_irq(&mdev->req_lock); - _drbd_set_state(_NS(mdev, disk, D_DISKLESS), CS_HARD, NULL); - spin_unlock_irq(&mdev->req_lock); - - if (eh == EP_CALL_HELPER) - drbd_khelper(mdev, "local-io-error"); } if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) { + /* We must still be diskless, + * re-attach has to be serialized with this! */ + if (mdev->state.disk != D_DISKLESS) + dev_err(DEV, + "ASSERT FAILED: disk is %s while going diskless\n", + drbd_disk_str(mdev->state.disk)); + + /* we cannot assert local_cnt == 0 here, as get_ldev_if_state + * will inc/dec it frequently. Since we became D_DISKLESS, no + * one has touched the protected members anymore, though, so we + * are safe to free them here. */ + if (drbd_send_state(mdev)) + dev_warn(DEV, "Notified peer that I detached my disk.\n"); + else + dev_err(DEV, "Sending state for detach failed\n"); - if (os.disk == D_FAILED) /* && ns.disk == D_DISKLESS*/ { - if (drbd_send_state(mdev)) - dev_warn(DEV, "Notified peer that my disk is broken.\n"); - else - dev_err(DEV, "Sending state in drbd_io_error() failed\n"); - } - - wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); lc_destroy(mdev->resync); mdev->resync = NULL; lc_destroy(mdev->act_log); @@ -1311,8 +1418,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, drbd_free_bc(mdev->ldev); mdev->ldev = NULL;); - if (mdev->md_io_tmpp) + if (mdev->md_io_tmpp) { __free_page(mdev->md_io_tmpp); + mdev->md_io_tmpp = NULL; + } } /* Disks got bigger while they were detached */ @@ -1328,6 +1437,15 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, (os.user_isp && !ns.user_isp)) resume_next_sg(mdev); + /* sync target done with resync. Explicitly notify peer, even though + * it should (at least for non-empty resyncs) already know itself. */ + if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED) + drbd_send_state(mdev); + + /* free tl_hash if we Got thawed and are C_STANDALONE */ + if (ns.conn == C_STANDALONE && !is_susp(ns) && mdev->tl_hash) + drbd_free_tl_hash(mdev); + /* Upon network connection, we need to start the receiver */ if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED) drbd_thread_start(&mdev->receiver); @@ -1554,7 +1672,7 @@ void drbd_thread_current_set_cpu(struct drbd_conf *mdev) /* the appropriate socket mutex must be held already */ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, - enum drbd_packets cmd, struct p_header *h, + enum drbd_packets cmd, struct p_header80 *h, size_t size, unsigned msg_flags) { int sent, ok; @@ -1564,7 +1682,7 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, h->magic = BE_DRBD_MAGIC; h->command = cpu_to_be16(cmd); - h->length = cpu_to_be16(size-sizeof(struct p_header)); + h->length = cpu_to_be16(size-sizeof(struct p_header80)); sent = drbd_send(mdev, sock, h, size, msg_flags); @@ -1579,7 +1697,7 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, * when we hold the appropriate socket mutex. */ int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, - enum drbd_packets cmd, struct p_header *h, size_t size) + enum drbd_packets cmd, struct p_header80 *h, size_t size) { int ok = 0; struct socket *sock; @@ -1607,7 +1725,7 @@ int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data, size_t size) { - struct p_header h; + struct p_header80 h; int ok; h.magic = BE_DRBD_MAGIC; @@ -1629,7 +1747,7 @@ int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data, int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc) { - struct p_rs_param_89 *p; + struct p_rs_param_95 *p; struct socket *sock; int size, rv; const int apv = mdev->agreed_pro_version; @@ -1637,7 +1755,8 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc) size = apv <= 87 ? sizeof(struct p_rs_param) : apv == 88 ? sizeof(struct p_rs_param) + strlen(mdev->sync_conf.verify_alg) + 1 - : /* 89 */ sizeof(struct p_rs_param_89); + : apv <= 94 ? sizeof(struct p_rs_param_89) + : /* apv >= 95 */ sizeof(struct p_rs_param_95); /* used from admin command context and receiver/worker context. * to avoid kmalloc, grab the socket right here, @@ -1648,12 +1767,16 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc) if (likely(sock != NULL)) { enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM; - p = &mdev->data.sbuf.rs_param_89; + p = &mdev->data.sbuf.rs_param_95; /* initialize verify_alg and csums_alg */ memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); p->rate = cpu_to_be32(sc->rate); + p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead); + p->c_delay_target = cpu_to_be32(sc->c_delay_target); + p->c_fill_target = cpu_to_be32(sc->c_fill_target); + p->c_max_rate = cpu_to_be32(sc->c_max_rate); if (apv >= 88) strcpy(p->verify_alg, mdev->sync_conf.verify_alg); @@ -1709,7 +1832,7 @@ int drbd_send_protocol(struct drbd_conf *mdev) strcpy(p->integrity_alg, mdev->net_conf->integrity_alg); rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL, - (struct p_header *)p, size); + (struct p_header80 *)p, size); kfree(p); return rv; } @@ -1735,7 +1858,7 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags) put_ldev(mdev); return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS, - (struct p_header *)&p, sizeof(p)); + (struct p_header80 *)&p, sizeof(p)); } int drbd_send_uuids(struct drbd_conf *mdev) @@ -1756,7 +1879,7 @@ int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val) p.uuid = cpu_to_be64(val); return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID, - (struct p_header *)&p, sizeof(p)); + (struct p_header80 *)&p, sizeof(p)); } int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags) @@ -1786,7 +1909,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl p.dds_flags = cpu_to_be16(flags); ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES, - (struct p_header *)&p, sizeof(p)); + (struct p_header80 *)&p, sizeof(p)); return ok; } @@ -1811,7 +1934,7 @@ int drbd_send_state(struct drbd_conf *mdev) if (likely(sock != NULL)) { ok = _drbd_send_cmd(mdev, sock, P_STATE, - (struct p_header *)&p, sizeof(p), 0); + (struct p_header80 *)&p, sizeof(p), 0); } mutex_unlock(&mdev->data.mutex); @@ -1829,7 +1952,7 @@ int drbd_send_state_req(struct drbd_conf *mdev, p.val = cpu_to_be32(val.i); return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ, - (struct p_header *)&p, sizeof(p)); + (struct p_header80 *)&p, sizeof(p)); } int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode) @@ -1839,7 +1962,7 @@ int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode) p.retcode = cpu_to_be32(retcode); return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY, - (struct p_header *)&p, sizeof(p)); + (struct p_header80 *)&p, sizeof(p)); } int fill_bitmap_rle_bits(struct drbd_conf *mdev, @@ -1938,7 +2061,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev, enum { OK, FAILED, DONE } send_bitmap_rle_or_plain(struct drbd_conf *mdev, - struct p_header *h, struct bm_xfer_ctx *c) + struct p_header80 *h, struct bm_xfer_ctx *c) { struct p_compressed_bm *p = (void*)h; unsigned long num_words; @@ -1968,12 +2091,12 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, if (len) drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload); ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP, - h, sizeof(struct p_header) + len, 0); + h, sizeof(struct p_header80) + len, 0); c->word_offset += num_words; c->bit_offset = c->word_offset * BITS_PER_LONG; c->packets[1]++; - c->bytes[1] += sizeof(struct p_header) + len; + c->bytes[1] += sizeof(struct p_header80) + len; if (c->bit_offset > c->bm_bits) c->bit_offset = c->bm_bits; @@ -1989,14 +2112,14 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev, int _drbd_send_bitmap(struct drbd_conf *mdev) { struct bm_xfer_ctx c; - struct p_header *p; + struct p_header80 *p; int ret; ERR_IF(!mdev->bitmap) return FALSE; /* maybe we should use some per thread scratch page, * and allocate that during initial device creation? */ - p = (struct p_header *) __get_free_page(GFP_NOIO); + p = (struct p_header80 *) __get_free_page(GFP_NOIO); if (!p) { dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); return FALSE; @@ -2054,7 +2177,7 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size) if (mdev->state.conn < C_CONNECTED) return FALSE; ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK, - (struct p_header *)&p, sizeof(p)); + (struct p_header80 *)&p, sizeof(p)); return ok; } @@ -2082,17 +2205,18 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED) return FALSE; ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd, - (struct p_header *)&p, sizeof(p)); + (struct p_header80 *)&p, sizeof(p)); return ok; } +/* dp->sector and dp->block_id already/still in network byte order, + * data_size is payload size according to dp->head, + * and may need to be corrected for digest size. */ int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, - struct p_data *dp) + struct p_data *dp, int data_size) { - const int header_size = sizeof(struct p_data) - - sizeof(struct p_header); - int data_size = ((struct p_header *)dp)->length - header_size; - + data_size -= (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? + crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size), dp->block_id); } @@ -2140,7 +2264,7 @@ int drbd_send_drequest(struct drbd_conf *mdev, int cmd, p.blksize = cpu_to_be32(size); ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, - (struct p_header *)&p, sizeof(p)); + (struct p_header80 *)&p, sizeof(p)); return ok; } @@ -2158,7 +2282,7 @@ int drbd_send_drequest_csum(struct drbd_conf *mdev, p.head.magic = BE_DRBD_MAGIC; p.head.command = cpu_to_be16(cmd); - p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header) + digest_size); + p.head.length = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size); mutex_lock(&mdev->data.mutex); @@ -2180,7 +2304,7 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size) p.blksize = cpu_to_be32(size); ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST, - (struct p_header *)&p, sizeof(p)); + (struct p_header80 *)&p, sizeof(p)); return ok; } @@ -2332,6 +2456,18 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) return 1; } +static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) +{ + if (mdev->agreed_pro_version >= 95) + return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | + (bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) | + (bi_rw & REQ_FUA ? DP_FUA : 0) | + (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | + (bi_rw & REQ_DISCARD ? DP_DISCARD : 0); + else + return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0; +} + /* Used to send write requests * R_PRIMARY -> Peer (P_DATA) */ @@ -2349,30 +2485,25 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ? crypto_hash_digestsize(mdev->integrity_w_tfm) : 0; - p.head.magic = BE_DRBD_MAGIC; - p.head.command = cpu_to_be16(P_DATA); - p.head.length = - cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->size); + if (req->size <= DRBD_MAX_SIZE_H80_PACKET) { + p.head.h80.magic = BE_DRBD_MAGIC; + p.head.h80.command = cpu_to_be16(P_DATA); + p.head.h80.length = + cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size); + } else { + p.head.h95.magic = BE_DRBD_MAGIC_BIG; + p.head.h95.command = cpu_to_be16(P_DATA); + p.head.h95.length = + cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size); + } p.sector = cpu_to_be64(req->sector); p.block_id = (unsigned long)req; p.seq_num = cpu_to_be32(req->seq_num = atomic_add_return(1, &mdev->packet_seq)); - dp_flags = 0; - /* NOTE: no need to check if barriers supported here as we would - * not pass the test in make_request_common in that case - */ - if (req->master_bio->bi_rw & REQ_HARDBARRIER) { - dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n"); - /* dp_flags |= DP_HARDBARRIER; */ - } - if (req->master_bio->bi_rw & REQ_SYNC) - dp_flags |= DP_RW_SYNC; - /* for now handle SYNCIO and UNPLUG - * as if they still were one and the same flag */ - if (req->master_bio->bi_rw & REQ_UNPLUG) - dp_flags |= DP_RW_SYNC; + dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw); + if (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn <= C_PAUSED_SYNC_T) dp_flags |= DP_MAY_SET_IN_SYNC; @@ -2413,10 +2544,17 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ? crypto_hash_digestsize(mdev->integrity_w_tfm) : 0; - p.head.magic = BE_DRBD_MAGIC; - p.head.command = cpu_to_be16(cmd); - p.head.length = - cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + e->size); + if (e->size <= DRBD_MAX_SIZE_H80_PACKET) { + p.head.h80.magic = BE_DRBD_MAGIC; + p.head.h80.command = cpu_to_be16(cmd); + p.head.h80.length = + cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size); + } else { + p.head.h95.magic = BE_DRBD_MAGIC_BIG; + p.head.h95.command = cpu_to_be16(cmd); + p.head.h95.length = + cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size); + } p.sector = cpu_to_be64(e->sector); p.block_id = e->block_id; @@ -2429,8 +2567,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, if (!drbd_get_data_sock(mdev)) return 0; - ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, - sizeof(p), dgs ? MSG_MORE : 0); + ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0); if (ok && dgs) { dgb = mdev->int_dig_out; drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb); @@ -2536,7 +2673,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) unsigned long flags; int rv = 0; - lock_kernel(); + mutex_lock(&drbd_main_mutex); spin_lock_irqsave(&mdev->req_lock, flags); /* to have a stable mdev->state.role * and no race with updating open_cnt */ @@ -2551,7 +2688,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) if (!rv) mdev->open_cnt++; spin_unlock_irqrestore(&mdev->req_lock, flags); - unlock_kernel(); + mutex_unlock(&drbd_main_mutex); return rv; } @@ -2559,9 +2696,9 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) static int drbd_release(struct gendisk *gd, fmode_t mode) { struct drbd_conf *mdev = gd->private_data; - lock_kernel(); + mutex_lock(&drbd_main_mutex); mdev->open_cnt--; - unlock_kernel(); + mutex_unlock(&drbd_main_mutex); return 0; } @@ -2605,7 +2742,13 @@ static void drbd_set_defaults(struct drbd_conf *mdev) /* .verify_alg = */ {}, 0, /* .cpu_mask = */ {}, 0, /* .csums_alg = */ {}, 0, - /* .use_rle = */ 0 + /* .use_rle = */ 0, + /* .on_no_data = */ DRBD_ON_NO_DATA_DEF, + /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF, + /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF, + /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF, + /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF, + /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF }; /* Have to use that way, because the layout differs between @@ -2616,7 +2759,9 @@ static void drbd_set_defaults(struct drbd_conf *mdev) .conn = C_STANDALONE, .disk = D_DISKLESS, .pdsk = D_UNKNOWN, - .susp = 0 + .susp = 0, + .susp_nod = 0, + .susp_fen = 0 } }; } @@ -2640,6 +2785,9 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) atomic_set(&mdev->net_cnt, 0); atomic_set(&mdev->packet_seq, 0); atomic_set(&mdev->pp_in_use, 0); + atomic_set(&mdev->pp_in_use_by_net, 0); + atomic_set(&mdev->rs_sect_in, 0); + atomic_set(&mdev->rs_sect_ev, 0); mutex_init(&mdev->md_io_mutex); mutex_init(&mdev->data.mutex); @@ -2666,11 +2814,13 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) INIT_LIST_HEAD(&mdev->meta.work.q); INIT_LIST_HEAD(&mdev->resync_work.list); INIT_LIST_HEAD(&mdev->unplug_work.list); + INIT_LIST_HEAD(&mdev->go_diskless.list); INIT_LIST_HEAD(&mdev->md_sync_work.list); INIT_LIST_HEAD(&mdev->bm_io_work.w.list); mdev->resync_work.cb = w_resync_inactive; mdev->unplug_work.cb = w_send_write_hint; + mdev->go_diskless.cb = w_go_diskless; mdev->md_sync_work.cb = w_md_sync; mdev->bm_io_work.w.cb = w_bitmap_io; init_timer(&mdev->resync_timer); @@ -2682,6 +2832,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) init_waitqueue_head(&mdev->misc_wait); init_waitqueue_head(&mdev->state_wait); + init_waitqueue_head(&mdev->net_cnt_wait); init_waitqueue_head(&mdev->ee_wait); init_waitqueue_head(&mdev->al_wait); init_waitqueue_head(&mdev->seq_wait); @@ -2697,6 +2848,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) void drbd_mdev_cleanup(struct drbd_conf *mdev) { + int i; if (mdev->receiver.t_state != None) dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", mdev->receiver.t_state); @@ -2713,9 +2865,13 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) mdev->p_size = mdev->rs_start = mdev->rs_total = - mdev->rs_failed = - mdev->rs_mark_left = - mdev->rs_mark_time = 0; + mdev->rs_failed = 0; + mdev->rs_last_events = 0; + mdev->rs_last_sect_ev = 0; + for (i = 0; i < DRBD_SYNC_MARKS; i++) { + mdev->rs_mark_left[i] = 0; + mdev->rs_mark_time[i] = 0; + } D_ASSERT(mdev->net_conf == NULL); drbd_set_my_capacity(mdev, 0); @@ -2726,6 +2882,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) } drbd_free_resources(mdev); + clear_bit(AL_SUSPENDED, &mdev->flags); /* * currently we drbd_init_ee only on module load, so @@ -2741,6 +2898,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) D_ASSERT(list_empty(&mdev->meta.work.q)); D_ASSERT(list_empty(&mdev->resync_work.list)); D_ASSERT(list_empty(&mdev->unplug_work.list)); + D_ASSERT(list_empty(&mdev->go_diskless.list)); } @@ -3280,9 +3438,10 @@ void drbd_md_sync(struct drbd_conf *mdev) sector_t sector; int i; + del_timer(&mdev->md_sync_timer); + /* timer may be rearmed by drbd_md_mark_dirty() now. */ if (!test_and_clear_bit(MD_DIRTY, &mdev->flags)) return; - del_timer(&mdev->md_sync_timer); /* We use here D_FAILED and not D_ATTACHING because we try to write * metadata even if we detach due to a disk failure! */ @@ -3310,12 +3469,9 @@ void drbd_md_sync(struct drbd_conf *mdev) D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset); sector = mdev->ldev->md.md_offset; - if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { - clear_bit(MD_DIRTY, &mdev->flags); - } else { + if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { /* this was a try anyways ... */ dev_err(DEV, "meta data update failed!\n"); - drbd_chk_io_error(mdev, 1, TRUE); } @@ -3402,6 +3558,28 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) return rv; } +static void debug_drbd_uuid(struct drbd_conf *mdev, enum drbd_uuid_index index) +{ + static char *uuid_str[UI_EXTENDED_SIZE] = { + [UI_CURRENT] = "CURRENT", + [UI_BITMAP] = "BITMAP", + [UI_HISTORY_START] = "HISTORY_START", + [UI_HISTORY_END] = "HISTORY_END", + [UI_SIZE] = "SIZE", + [UI_FLAGS] = "FLAGS", + }; + + if (index >= UI_EXTENDED_SIZE) { + dev_warn(DEV, " uuid_index >= EXTENDED_SIZE\n"); + return; + } + + dynamic_dev_dbg(DEV, " uuid[%s] now %016llX\n", + uuid_str[index], + (unsigned long long)mdev->ldev->md.uuid[index]); +} + + /** * drbd_md_mark_dirty() - Mark meta data super block as dirty * @mdev: DRBD device. @@ -3410,19 +3588,31 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) * the meta-data super block. This function sets MD_DIRTY, and starts a * timer that ensures that within five seconds you have to call drbd_md_sync(). */ +#ifdef DEBUG +void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func) +{ + if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) { + mod_timer(&mdev->md_sync_timer, jiffies + HZ); + mdev->last_md_mark_dirty.line = line; + mdev->last_md_mark_dirty.func = func; + } +} +#else void drbd_md_mark_dirty(struct drbd_conf *mdev) { - set_bit(MD_DIRTY, &mdev->flags); - mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); + if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) + mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ); } - +#endif static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local) { int i; - for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) + for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) { mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i]; + debug_drbd_uuid(mdev, i+1); + } } void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) @@ -3437,6 +3627,7 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) } mdev->ldev->md.uuid[idx] = val; + debug_drbd_uuid(mdev, idx); drbd_md_mark_dirty(mdev); } @@ -3446,6 +3637,7 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local) if (mdev->ldev->md.uuid[idx]) { drbd_uuid_move_history(mdev); mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx]; + debug_drbd_uuid(mdev, UI_HISTORY_START); } _drbd_uuid_set(mdev, idx, val); } @@ -3464,6 +3656,7 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) dev_info(DEV, "Creating new current UUID\n"); D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0); mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT]; + debug_drbd_uuid(mdev, UI_BITMAP); get_random_bytes(&val, sizeof(u64)); _drbd_uuid_set(mdev, UI_CURRENT, val); @@ -3478,6 +3671,8 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) drbd_uuid_move_history(mdev); mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; mdev->ldev->md.uuid[UI_BITMAP] = 0; + debug_drbd_uuid(mdev, UI_HISTORY_START); + debug_drbd_uuid(mdev, UI_BITMAP); } else { if (mdev->ldev->md.uuid[UI_BITMAP]) dev_warn(DEV, "bm UUID already set"); @@ -3485,6 +3680,7 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) mdev->ldev->md.uuid[UI_BITMAP] = val; mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1); + debug_drbd_uuid(mdev, UI_BITMAP); } drbd_md_mark_dirty(mdev); } @@ -3527,6 +3723,7 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev) { int rv = -EIO; + drbd_resume_al(mdev); if (get_ldev_if_state(mdev, D_ATTACHING)) { drbd_bm_clear_all(mdev); rv = drbd_bm_write(mdev); @@ -3559,6 +3756,32 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) return 1; } +static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) +{ + D_ASSERT(mdev->state.disk == D_FAILED); + /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will + * inc/dec it frequently. Once we are D_DISKLESS, no one will touch + * the protected members anymore, though, so in the after_state_ch work + * it will be safe to free them. */ + drbd_force_state(mdev, NS(disk, D_DISKLESS)); + /* We need to wait for return of references checked out while we still + * have been D_FAILED, though (drbd_md_sync, bitmap io). */ + wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); + + clear_bit(GO_DISKLESS, &mdev->flags); + return 1; +} + +void drbd_go_diskless(struct drbd_conf *mdev) +{ + D_ASSERT(mdev->state.disk == D_FAILED); + if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) + drbd_queue_work(&mdev->data.work, &mdev->go_diskless); + /* don't drbd_queue_work_front, + * we need to serialize with the after_state_ch work + * of the -> D_FAILED transition. */ +} + /** * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap * @mdev: DRBD device. @@ -3655,8 +3878,11 @@ static void md_sync_timer_fn(unsigned long data) static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused) { dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n"); +#ifdef DEBUG + dev_warn(DEV, "last md_mark_dirty: %s:%u\n", + mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line); +#endif drbd_md_sync(mdev); - return 1; } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 73131c5ae33..87925e97e61 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -33,10 +33,13 @@ #include <linux/blkpg.h> #include <linux/cpumask.h> #include "drbd_int.h" +#include "drbd_req.h" #include "drbd_wrappers.h" #include <asm/unaligned.h> #include <linux/drbd_tag_magic.h> #include <linux/drbd_limits.h> +#include <linux/compiler.h> +#include <linux/kthread.h> static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); @@ -169,6 +172,10 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) put_net_conf(mdev); } + /* The helper may take some time. + * write out any unsynced meta data changes now */ + drbd_md_sync(mdev); + dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); drbd_bcast_ev_helper(mdev, cmd); @@ -202,12 +209,10 @@ enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) put_ldev(mdev); } else { dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); - return mdev->state.pdsk; + nps = mdev->state.pdsk; + goto out; } - if (fp == FP_STONITH) - _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE); - r = drbd_khelper(mdev, "fence-peer"); switch ((r>>8) & 0xff) { @@ -252,9 +257,36 @@ enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) dev_info(DEV, "fence-peer helper returned %d (%s)\n", (r>>8) & 0xff, ex_to_string); + +out: + if (mdev->state.susp_fen && nps >= D_UNKNOWN) { + /* The handler was not successful... unfreeze here, the + state engine can not unfreeze... */ + _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE); + } + return nps; } +static int _try_outdate_peer_async(void *data) +{ + struct drbd_conf *mdev = (struct drbd_conf *)data; + enum drbd_disk_state nps; + + nps = drbd_try_outdate_peer(mdev); + drbd_request_state(mdev, NS(pdsk, nps)); + + return 0; +} + +void drbd_try_outdate_peer_async(struct drbd_conf *mdev) +{ + struct task_struct *opa; + + opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev)); + if (IS_ERR(opa)) + dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n"); +} int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) { @@ -394,6 +426,39 @@ int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) return r; } +static struct drbd_conf *ensure_mdev(int minor, int create) +{ + struct drbd_conf *mdev; + + if (minor >= minor_count) + return NULL; + + mdev = minor_to_mdev(minor); + + if (!mdev && create) { + struct gendisk *disk = NULL; + mdev = drbd_new_device(minor); + + spin_lock_irq(&drbd_pp_lock); + if (minor_table[minor] == NULL) { + minor_table[minor] = mdev; + disk = mdev->vdisk; + mdev = NULL; + } /* else: we lost the race */ + spin_unlock_irq(&drbd_pp_lock); + + if (disk) /* we won the race above */ + /* in case we ever add a drbd_delete_device(), + * don't forget the del_gendisk! */ + add_disk(disk); + else /* we lost the race above */ + drbd_free_mdev(mdev); + + mdev = minor_to_mdev(minor); + } + + return mdev; +} static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply) @@ -494,6 +559,8 @@ char *ppsize(char *buf, unsigned long long size) void drbd_suspend_io(struct drbd_conf *mdev) { set_bit(SUSPEND_IO, &mdev->flags); + if (is_susp(mdev->state)) + return; wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); } @@ -713,9 +780,6 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu blk_queue_segment_boundary(q, PAGE_SIZE-1); blk_stack_limits(&q->limits, &b->limits, 0); - if (b->merge_bvec_fn) - dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n", - b->merge_bvec_fn); dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { @@ -729,14 +793,16 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu /* serialize deconfig (worker exiting, doing cleanup) * and reconfig (drbdsetup disk, drbdsetup net) * - * wait for a potentially exiting worker, then restart it, - * or start a new one. + * Wait for a potentially exiting worker, then restart it, + * or start a new one. Flush any pending work, there may still be an + * after_state_change queued. */ static void drbd_reconfig_start(struct drbd_conf *mdev) { wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); drbd_thread_start(&mdev->worker); + drbd_flush_workqueue(mdev); } /* if still unconfigured, stops worker again. @@ -756,6 +822,29 @@ static void drbd_reconfig_done(struct drbd_conf *mdev) wake_up(&mdev->state_wait); } +/* Make sure IO is suspended before calling this function(). */ +static void drbd_suspend_al(struct drbd_conf *mdev) +{ + int s = 0; + + if (lc_try_lock(mdev->act_log)) { + drbd_al_shrink(mdev); + lc_unlock(mdev->act_log); + } else { + dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n"); + return; + } + + spin_lock_irq(&mdev->req_lock); + if (mdev->state.conn < C_CONNECTED) + s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags); + + spin_unlock_irq(&mdev->req_lock); + + if (s) + dev_info(DEV, "Suspended AL updates\n"); +} + /* does always return 0; * interesting return code is in reply->ret_code */ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, @@ -769,6 +858,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp struct inode *inode, *inode2; struct lru_cache *resync_lru = NULL; union drbd_state ns, os; + unsigned int max_seg_s; int rv; int cp_discovered = 0; int logical_block_size; @@ -803,6 +893,15 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp goto fail; } + if (get_net_conf(mdev)) { + int prot = mdev->net_conf->wire_protocol; + put_net_conf(mdev); + if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) { + retcode = ERR_STONITH_AND_PROT_A; + goto fail; + } + } + nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); if (IS_ERR(nbc->lo_file)) { dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, @@ -924,7 +1023,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp drbd_suspend_io(mdev); /* also wait for the last barrier ack. */ - wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt)); + wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state)); /* and for any other previously queued work */ drbd_flush_workqueue(mdev); @@ -1021,7 +1120,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp else clear_bit(CRASHED_PRIMARY, &mdev->flags); - if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND)) { + if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && + !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) { set_bit(CRASHED_PRIMARY, &mdev->flags); cp_discovered = 1; } @@ -1031,7 +1131,20 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp mdev->read_cnt = 0; mdev->writ_cnt = 0; - drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE); + max_seg_s = DRBD_MAX_SEGMENT_SIZE; + if (mdev->state.conn == C_CONNECTED) { + /* We are Primary, Connected, and now attach a new local + * backing store. We must not increase the user visible maximum + * bio size on this device to something the peer may not be + * able to handle. */ + if (mdev->agreed_pro_version < 94) + max_seg_s = queue_max_segment_size(mdev->rq_queue); + else if (mdev->agreed_pro_version == 94) + max_seg_s = DRBD_MAX_SIZE_H80_PACKET; + /* else: drbd 8.3.9 and later, stay with default */ + } + + drbd_setup_queue_param(mdev, max_seg_s); /* If I am currently not R_PRIMARY, * but meta data primary indicator is set, @@ -1079,6 +1192,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp drbd_al_to_on_disk_bm(mdev); } + if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev)) + drbd_suspend_al(mdev); /* IO is still suspended here... */ + spin_lock_irq(&mdev->req_lock); os = mdev->state; ns.i = os.i; @@ -1235,7 +1351,16 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, && (new_conf->wire_protocol != DRBD_PROT_C)) { retcode = ERR_NOT_PROTO_C; goto fail; - }; + } + + if (get_ldev(mdev)) { + enum drbd_fencing_p fp = mdev->ldev->dc.fencing; + put_ldev(mdev); + if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) { + retcode = ERR_STONITH_AND_PROT_A; + goto fail; + } + } if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { retcode = ERR_DISCARD; @@ -1350,6 +1475,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, } } + drbd_flush_workqueue(mdev); spin_lock_irq(&mdev->req_lock); if (mdev->net_conf != NULL) { retcode = ERR_NET_CONFIGURED; @@ -1388,10 +1514,9 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, mdev->int_dig_out=int_dig_out; mdev->int_dig_in=int_dig_in; mdev->int_dig_vv=int_dig_vv; + retcode = _drbd_set_state(_NS(mdev, conn, C_UNCONNECTED), CS_VERBOSE, NULL); spin_unlock_irq(&mdev->req_lock); - retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE); - kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); reply->ret_code = retcode; drbd_reconfig_done(mdev); @@ -1546,6 +1671,8 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n struct crypto_hash *csums_tfm = NULL; struct syncer_conf sc; cpumask_var_t new_cpu_mask; + int *rs_plan_s = NULL; + int fifo_size; if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { retcode = ERR_NOMEM; @@ -1557,6 +1684,12 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n sc.rate = DRBD_RATE_DEF; sc.after = DRBD_AFTER_DEF; sc.al_extents = DRBD_AL_EXTENTS_DEF; + sc.on_no_data = DRBD_ON_NO_DATA_DEF; + sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF; + sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF; + sc.c_fill_target = DRBD_C_FILL_TARGET_DEF; + sc.c_max_rate = DRBD_C_MAX_RATE_DEF; + sc.c_min_rate = DRBD_C_MIN_RATE_DEF; } else memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); @@ -1634,6 +1767,12 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n } #undef AL_MAX + /* to avoid spurious errors when configuring minors before configuring + * the minors they depend on: if necessary, first create the minor we + * depend on */ + if (sc.after >= 0) + ensure_mdev(sc.after, 1); + /* most sanity checks done, try to assign the new sync-after * dependency. need to hold the global lock in there, * to avoid a race in the dependency loop check. */ @@ -1641,6 +1780,16 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n if (retcode != NO_ERROR) goto fail; + fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ; + if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) { + rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL); + if (!rs_plan_s) { + dev_err(DEV, "kmalloc of fifo_buffer failed"); + retcode = ERR_NOMEM; + goto fail; + } + } + /* ok, assign the rest of it as well. * lock against receive_SyncParam() */ spin_lock(&mdev->peer_seq_lock); @@ -1657,6 +1806,15 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n mdev->verify_tfm = verify_tfm; verify_tfm = NULL; } + + if (fifo_size != mdev->rs_plan_s.size) { + kfree(mdev->rs_plan_s.values); + mdev->rs_plan_s.values = rs_plan_s; + mdev->rs_plan_s.size = fifo_size; + mdev->rs_planed = 0; + rs_plan_s = NULL; + } + spin_unlock(&mdev->peer_seq_lock); if (get_ldev(mdev)) { @@ -1688,6 +1846,7 @@ static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *n kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); fail: + kfree(rs_plan_s); free_cpumask_var(new_cpu_mask); crypto_free_hash(csums_tfm); crypto_free_hash(verify_tfm); @@ -1721,12 +1880,38 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl return 0; } +static int drbd_bmio_set_susp_al(struct drbd_conf *mdev) +{ + int rv; + + rv = drbd_bmio_set_n_write(mdev); + drbd_suspend_al(mdev); + return rv; +} + static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply) { + int retcode; - reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); + retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); + + if (retcode < SS_SUCCESS) { + if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) { + /* The peer will get a resync upon connect anyways. Just make that + into a full resync. */ + retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT)); + if (retcode >= SS_SUCCESS) { + /* open coded drbd_bitmap_io() */ + if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al, + "set_n_write from invalidate_peer")) + retcode = ERR_IO_MD_DISK; + } + } else + retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); + } + reply->ret_code = retcode; return 0; } @@ -1765,7 +1950,21 @@ static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply) { - reply->ret_code = drbd_request_state(mdev, NS(susp, 0)); + if (test_bit(NEW_CUR_UUID, &mdev->flags)) { + drbd_uuid_new_current(mdev); + clear_bit(NEW_CUR_UUID, &mdev->flags); + drbd_md_sync(mdev); + } + drbd_suspend_io(mdev); + reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); + if (reply->ret_code == SS_SUCCESS) { + if (mdev->state.conn < C_CONNECTED) + tl_clear(mdev); + if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED) + tl_restart(mdev, fail_frozen_disk_io); + } + drbd_resume_io(mdev); + return 0; } @@ -1941,40 +2140,6 @@ out: return 0; } -static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp) -{ - struct drbd_conf *mdev; - - if (nlp->drbd_minor >= minor_count) - return NULL; - - mdev = minor_to_mdev(nlp->drbd_minor); - - if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) { - struct gendisk *disk = NULL; - mdev = drbd_new_device(nlp->drbd_minor); - - spin_lock_irq(&drbd_pp_lock); - if (minor_table[nlp->drbd_minor] == NULL) { - minor_table[nlp->drbd_minor] = mdev; - disk = mdev->vdisk; - mdev = NULL; - } /* else: we lost the race */ - spin_unlock_irq(&drbd_pp_lock); - - if (disk) /* we won the race above */ - /* in case we ever add a drbd_delete_device(), - * don't forget the del_gendisk! */ - add_disk(disk); - else /* we lost the race above */ - drbd_free_mdev(mdev); - - mdev = minor_to_mdev(nlp->drbd_minor); - } - - return mdev; -} - struct cn_handler_struct { int (*function)(struct drbd_conf *, struct drbd_nl_cfg_req *, @@ -2035,7 +2200,8 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms goto fail; } - mdev = ensure_mdev(nlp); + mdev = ensure_mdev(nlp->drbd_minor, + (nlp->flags & DRBD_NL_CREATE_DEVICE)); if (!mdev) { retcode = ERR_MINOR_INVALID; goto fail; diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index be3374b6846..ad325c5d0ce 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -57,6 +57,7 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) unsigned long db, dt, dbdt, rt, rs_left; unsigned int res; int i, x, y; + int stalled = 0; drbd_get_syncer_progress(mdev, &rs_left, &res); @@ -90,18 +91,17 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) * db: blocks written from mark until now * rt: remaining time */ - dt = (jiffies - mdev->rs_mark_time) / HZ; - - if (dt > 20) { - /* if we made no update to rs_mark_time for too long, - * we are stalled. show that. */ - seq_printf(seq, "stalled\n"); - return; - } + /* Rolling marks. last_mark+1 may just now be modified. last_mark+2 is + * at least (DRBD_SYNC_MARKS-2)*DRBD_SYNC_MARK_STEP old, and has at + * least DRBD_SYNC_MARK_STEP time before it will be modified. */ + i = (mdev->rs_last_mark + 2) % DRBD_SYNC_MARKS; + dt = (jiffies - mdev->rs_mark_time[i]) / HZ; + if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) + stalled = 1; if (!dt) dt++; - db = mdev->rs_mark_left - rs_left; + db = mdev->rs_mark_left[i] - rs_left; rt = (dt * (rs_left / (db/100+1)))/100; /* seconds */ seq_printf(seq, "finish: %lu:%02lu:%02lu", @@ -118,7 +118,7 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) /* mean speed since syncer started * we do account for PausedSync periods */ dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ; - if (dt <= 0) + if (dt == 0) dt = 1; db = mdev->rs_total - rs_left; dbdt = Bit2KB(db/dt); @@ -128,7 +128,14 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq) else seq_printf(seq, " (%ld)", dbdt); - seq_printf(seq, " K/sec\n"); + if (mdev->state.conn == C_SYNC_TARGET) { + if (mdev->c_sync_rate > 1000) + seq_printf(seq, " want: %d,%03d", + mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000); + else + seq_printf(seq, " want: %d", mdev->c_sync_rate); + } + seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : ""); } static void resync_dump_detail(struct seq_file *seq, struct lc_element *e) @@ -196,7 +203,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { seq_printf(seq, - "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c\n" + "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " "lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c", i, sn, @@ -206,11 +213,12 @@ static int drbd_seq_show(struct seq_file *seq, void *v) drbd_disk_str(mdev->state.pdsk), (mdev->net_conf == NULL ? ' ' : (mdev->net_conf->wire_protocol - DRBD_PROT_A+'A')), - mdev->state.susp ? 's' : 'r', + is_susp(mdev->state) ? 's' : 'r', mdev->state.aftr_isp ? 'a' : '-', mdev->state.peer_isp ? 'p' : '-', mdev->state.user_isp ? 'u' : '-', mdev->congestion_reason ?: '-', + test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-', mdev->send_cnt/2, mdev->recv_cnt/2, mdev->writ_cnt/2, diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 081522d3c74..efd6169acf2 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -241,7 +241,7 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) spin_unlock_irq(&mdev->req_lock); list_for_each_entry_safe(e, t, &reclaimed, w.list) - drbd_free_ee(mdev, e); + drbd_free_net_ee(mdev, e); } /** @@ -298,9 +298,11 @@ static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool * Is also used from inside an other spin_lock_irq(&mdev->req_lock); * Either links the page chain back to the global pool, * or returns all pages to the system. */ -static void drbd_pp_free(struct drbd_conf *mdev, struct page *page) +static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) { + atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; int i; + if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) i = page_chain_free(page); else { @@ -311,10 +313,10 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page) drbd_pp_vacant += i; spin_unlock(&drbd_pp_lock); } - atomic_sub(i, &mdev->pp_in_use); - i = atomic_read(&mdev->pp_in_use); + i = atomic_sub_return(i, a); if (i < 0) - dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i); + dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n", + is_net ? "pp_in_use_by_net" : "pp_in_use", i); wake_up(&drbd_pp_wait); } @@ -365,7 +367,6 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, e->size = data_size; e->flags = 0; e->sector = sector; - e->sector = sector; e->block_id = id; return e; @@ -375,9 +376,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, return NULL; } -void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) +void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net) { - drbd_pp_free(mdev, e->pages); + if (e->flags & EE_HAS_DIGEST) + kfree(e->digest); + drbd_pp_free(mdev, e->pages, is_net); D_ASSERT(atomic_read(&e->pending_bios) == 0); D_ASSERT(hlist_unhashed(&e->colision)); mempool_free(e, drbd_ee_mempool); @@ -388,13 +391,14 @@ int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) LIST_HEAD(work_list); struct drbd_epoch_entry *e, *t; int count = 0; + int is_net = list == &mdev->net_ee; spin_lock_irq(&mdev->req_lock); list_splice_init(list, &work_list); spin_unlock_irq(&mdev->req_lock); list_for_each_entry_safe(e, t, &work_list, w.list) { - drbd_free_ee(mdev, e); + drbd_free_some_ee(mdev, e, is_net); count++; } return count; @@ -423,7 +427,7 @@ static int drbd_process_done_ee(struct drbd_conf *mdev) spin_unlock_irq(&mdev->req_lock); list_for_each_entry_safe(e, t, &reclaimed, w.list) - drbd_free_ee(mdev, e); + drbd_free_net_ee(mdev, e); /* possible callbacks here: * e_end_block, and e_end_resync_block, e_send_discard_ack. @@ -719,14 +723,14 @@ out: static int drbd_send_fp(struct drbd_conf *mdev, struct socket *sock, enum drbd_packets cmd) { - struct p_header *h = (struct p_header *) &mdev->data.sbuf.header; + struct p_header80 *h = &mdev->data.sbuf.header.h80; return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0); } static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock) { - struct p_header *h = (struct p_header *) &mdev->data.sbuf.header; + struct p_header80 *h = &mdev->data.rbuf.header.h80; int rr; rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0); @@ -776,9 +780,6 @@ static int drbd_connect(struct drbd_conf *mdev) D_ASSERT(!mdev->data.socket); - if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) - dev_err(DEV, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n"); - if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS) return -2; @@ -927,6 +928,11 @@ retry: drbd_thread_start(&mdev->asender); + if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) { + drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET); + put_ldev(mdev); + } + if (!drbd_send_protocol(mdev)) return -1; drbd_send_sync_param(mdev, &mdev->sync_conf); @@ -946,22 +952,28 @@ out_release_sockets: return -1; } -static int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h) +static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size) { + union p_header *h = &mdev->data.rbuf.header; int r; r = drbd_recv(mdev, h, sizeof(*h)); - if (unlikely(r != sizeof(*h))) { dev_err(DEV, "short read expecting header on sock: r=%d\n", r); return FALSE; - }; - h->command = be16_to_cpu(h->command); - h->length = be16_to_cpu(h->length); - if (unlikely(h->magic != BE_DRBD_MAGIC)) { - dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n", - (long)be32_to_cpu(h->magic), - h->command, h->length); + } + + if (likely(h->h80.magic == BE_DRBD_MAGIC)) { + *cmd = be16_to_cpu(h->h80.command); + *packet_size = be16_to_cpu(h->h80.length); + } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) { + *cmd = be16_to_cpu(h->h95.command); + *packet_size = be32_to_cpu(h->h95.length); + } else { + dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n", + be32_to_cpu(h->h80.magic), + be16_to_cpu(h->h80.command), + be16_to_cpu(h->h80.length)); return FALSE; } mdev->last_received = jiffies; @@ -975,7 +987,7 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, - NULL, BLKDEV_IFL_WAIT); + NULL); if (rv) { dev_err(DEV, "local disk flush failed with status %d\n", rv); /* would rather check on EOPNOTSUPP, but that is not reliable. @@ -1268,17 +1280,12 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea return 1; } -static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h) +static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { int rv, issue_flush; - struct p_barrier *p = (struct p_barrier *)h; + struct p_barrier *p = &mdev->data.rbuf.barrier; struct drbd_epoch *epoch; - ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; - - rv = drbd_recv(mdev, h->payload, h->length); - ERR_IF(rv != h->length) return FALSE; - inc_unacked(mdev); if (mdev->net_conf->wire_protocol != DRBD_PROT_C) @@ -1457,7 +1464,7 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) data_size -= rr; } kunmap(page); - drbd_pp_free(mdev, page); + drbd_pp_free(mdev, page, 0); return rv; } @@ -1562,30 +1569,29 @@ static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_si list_add(&e->w.list, &mdev->sync_ee); spin_unlock_irq(&mdev->req_lock); + atomic_add(data_size >> 9, &mdev->rs_sect_ev); if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) return TRUE; + /* drbd_submit_ee currently fails for one reason only: + * not being able to allocate enough bios. + * Is dropping the connection going to help? */ + spin_lock_irq(&mdev->req_lock); + list_del(&e->w.list); + spin_unlock_irq(&mdev->req_lock); + drbd_free_ee(mdev, e); fail: put_ldev(mdev); return FALSE; } -static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h) +static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { struct drbd_request *req; sector_t sector; - unsigned int header_size, data_size; int ok; - struct p_data *p = (struct p_data *)h; - - header_size = sizeof(*p) - sizeof(*h); - data_size = h->length - header_size; - - ERR_IF(data_size == 0) return FALSE; - - if (drbd_recv(mdev, h->payload, header_size) != header_size) - return FALSE; + struct p_data *p = &mdev->data.rbuf.data; sector = be64_to_cpu(p->sector); @@ -1611,20 +1617,11 @@ static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h) return ok; } -static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h) +static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { sector_t sector; - unsigned int header_size, data_size; int ok; - struct p_data *p = (struct p_data *)h; - - header_size = sizeof(*p) - sizeof(*h); - data_size = h->length - header_size; - - ERR_IF(data_size == 0) return FALSE; - - if (drbd_recv(mdev, h->payload, header_size) != header_size) - return FALSE; + struct p_data *p = &mdev->data.rbuf.data; sector = be64_to_cpu(p->sector); D_ASSERT(p->block_id == ID_SYNCER); @@ -1640,9 +1637,11 @@ static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h) ok = drbd_drain_block(mdev, data_size); - drbd_send_ack_dp(mdev, P_NEG_ACK, p); + drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); } + atomic_add(data_size >> 9, &mdev->rs_sect_in); + return ok; } @@ -1765,24 +1764,27 @@ static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq) return ret; } +static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf) +{ + if (mdev->agreed_pro_version >= 95) + return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | + (dpf & DP_UNPLUG ? REQ_UNPLUG : 0) | + (dpf & DP_FUA ? REQ_FUA : 0) | + (dpf & DP_FLUSH ? REQ_FUA : 0) | + (dpf & DP_DISCARD ? REQ_DISCARD : 0); + else + return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0; +} + /* mirrored write */ -static int receive_Data(struct drbd_conf *mdev, struct p_header *h) +static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { sector_t sector; struct drbd_epoch_entry *e; - struct p_data *p = (struct p_data *)h; - int header_size, data_size; + struct p_data *p = &mdev->data.rbuf.data; int rw = WRITE; u32 dp_flags; - header_size = sizeof(*p) - sizeof(*h); - data_size = h->length - header_size; - - ERR_IF(data_size == 0) return FALSE; - - if (drbd_recv(mdev, h->payload, header_size) != header_size) - return FALSE; - if (!get_ldev(mdev)) { if (__ratelimit(&drbd_ratelimit_state)) dev_err(DEV, "Can not write mirrored data block " @@ -1792,7 +1794,7 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) mdev->peer_seq++; spin_unlock(&mdev->peer_seq_lock); - drbd_send_ack_dp(mdev, P_NEG_ACK, p); + drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); atomic_inc(&mdev->current_epoch->epoch_size); return drbd_drain_block(mdev, data_size); } @@ -1839,12 +1841,8 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) spin_unlock(&mdev->epoch_lock); dp_flags = be32_to_cpu(p->dp_flags); - if (dp_flags & DP_HARDBARRIER) { - dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); - /* rw |= REQ_HARDBARRIER; */ - } - if (dp_flags & DP_RW_SYNC) - rw |= REQ_SYNC | REQ_UNPLUG; + rw |= write_flags_to_bio(mdev, dp_flags); + if (dp_flags & DP_MAY_SET_IN_SYNC) e->flags |= EE_MAY_SET_IN_SYNC; @@ -2007,6 +2005,16 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h) if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) return TRUE; + /* drbd_submit_ee currently fails for one reason only: + * not being able to allocate enough bios. + * Is dropping the connection going to help? */ + spin_lock_irq(&mdev->req_lock); + list_del(&e->w.list); + hlist_del_init(&e->colision); + spin_unlock_irq(&mdev->req_lock); + if (e->flags & EE_CALL_AL_COMPLETE_IO) + drbd_al_complete_io(mdev, e->sector); + out_interrupted: /* yes, the epoch_size now is imbalanced. * but we drop the connection anyways, so we don't have a chance to @@ -2016,20 +2024,64 @@ out_interrupted: return FALSE; } -static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h) +/* We may throttle resync, if the lower device seems to be busy, + * and current sync rate is above c_min_rate. + * + * To decide whether or not the lower device is busy, we use a scheme similar + * to MD RAID is_mddev_idle(): if the partition stats reveal "significant" + * (more than 64 sectors) of activity we cannot account for with our own resync + * activity, it obviously is "busy". + * + * The current sync rate used here uses only the most recent two step marks, + * to have a short time average so we can react faster. + */ +int drbd_rs_should_slow_down(struct drbd_conf *mdev) +{ + struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; + unsigned long db, dt, dbdt; + int curr_events; + int throttle = 0; + + /* feature disabled? */ + if (mdev->sync_conf.c_min_rate == 0) + return 0; + + curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + + (int)part_stat_read(&disk->part0, sectors[1]) - + atomic_read(&mdev->rs_sect_ev); + if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { + unsigned long rs_left; + int i; + + mdev->rs_last_events = curr_events; + + /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, + * approx. */ + i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-2) % DRBD_SYNC_MARKS; + rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; + + dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; + if (!dt) + dt++; + db = mdev->rs_mark_left[i] - rs_left; + dbdt = Bit2KB(db/dt); + + if (dbdt > mdev->sync_conf.c_min_rate) + throttle = 1; + } + return throttle; +} + + +static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size) { sector_t sector; const sector_t capacity = drbd_get_capacity(mdev->this_bdev); struct drbd_epoch_entry *e; struct digest_info *di = NULL; - int size, digest_size; + int size, verb; unsigned int fault_type; - struct p_block_req *p = - (struct p_block_req *)h; - const int brps = sizeof(*p)-sizeof(*h); - - if (drbd_recv(mdev, h->payload, brps) != brps) - return FALSE; + struct p_block_req *p = &mdev->data.rbuf.block_req; sector = be64_to_cpu(p->sector); size = be32_to_cpu(p->blksize); @@ -2046,12 +2098,31 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h) } if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { - if (__ratelimit(&drbd_ratelimit_state)) + verb = 1; + switch (cmd) { + case P_DATA_REQUEST: + drbd_send_ack_rp(mdev, P_NEG_DREPLY, p); + break; + case P_RS_DATA_REQUEST: + case P_CSUM_RS_REQUEST: + case P_OV_REQUEST: + drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p); + break; + case P_OV_REPLY: + verb = 0; + dec_rs_pending(mdev); + drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC); + break; + default: + dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", + cmdname(cmd)); + } + if (verb && __ratelimit(&drbd_ratelimit_state)) dev_err(DEV, "Can not satisfy peer's read request, " "no local data.\n"); - drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY : - P_NEG_RS_DREPLY , p); - return drbd_drain_block(mdev, h->length - brps); + + /* drain possibly payload */ + return drbd_drain_block(mdev, digest_size); } /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD @@ -2063,31 +2134,21 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h) return FALSE; } - switch (h->command) { + switch (cmd) { case P_DATA_REQUEST: e->w.cb = w_e_end_data_req; fault_type = DRBD_FAULT_DT_RD; - break; + /* application IO, don't drbd_rs_begin_io */ + goto submit; + case P_RS_DATA_REQUEST: e->w.cb = w_e_end_rsdata_req; fault_type = DRBD_FAULT_RS_RD; - /* Eventually this should become asynchronously. Currently it - * blocks the whole receiver just to delay the reading of a - * resync data block. - * the drbd_work_queue mechanism is made for this... - */ - if (!drbd_rs_begin_io(mdev, sector)) { - /* we have been interrupted, - * probably connection lost! */ - D_ASSERT(signal_pending(current)); - goto out_free_e; - } break; case P_OV_REPLY: case P_CSUM_RS_REQUEST: fault_type = DRBD_FAULT_RS_RD; - digest_size = h->length - brps ; di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO); if (!di) goto out_free_e; @@ -2095,31 +2156,25 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h) di->digest_size = digest_size; di->digest = (((char *)di)+sizeof(struct digest_info)); + e->digest = di; + e->flags |= EE_HAS_DIGEST; + if (drbd_recv(mdev, di->digest, digest_size) != digest_size) goto out_free_e; - e->block_id = (u64)(unsigned long)di; - if (h->command == P_CSUM_RS_REQUEST) { + if (cmd == P_CSUM_RS_REQUEST) { D_ASSERT(mdev->agreed_pro_version >= 89); e->w.cb = w_e_end_csum_rs_req; - } else if (h->command == P_OV_REPLY) { + } else if (cmd == P_OV_REPLY) { e->w.cb = w_e_end_ov_reply; dec_rs_pending(mdev); - break; - } - - if (!drbd_rs_begin_io(mdev, sector)) { - /* we have been interrupted, probably connection lost! */ - D_ASSERT(signal_pending(current)); - goto out_free_e; + /* drbd_rs_begin_io done when we sent this request, + * but accounting still needs to be done. */ + goto submit_for_resync; } break; case P_OV_REQUEST: - if (mdev->state.conn >= C_CONNECTED && - mdev->state.conn != C_VERIFY_T) - dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n", - drbd_conn_str(mdev->state.conn)); if (mdev->ov_start_sector == ~(sector_t)0 && mdev->agreed_pro_version >= 90) { mdev->ov_start_sector = sector; @@ -2130,37 +2185,63 @@ static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h) } e->w.cb = w_e_end_ov_req; fault_type = DRBD_FAULT_RS_RD; - /* Eventually this should become asynchronous. Currently it - * blocks the whole receiver just to delay the reading of a - * resync data block. - * the drbd_work_queue mechanism is made for this... - */ - if (!drbd_rs_begin_io(mdev, sector)) { - /* we have been interrupted, - * probably connection lost! */ - D_ASSERT(signal_pending(current)); - goto out_free_e; - } break; - default: dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", - cmdname(h->command)); + cmdname(cmd)); fault_type = DRBD_FAULT_MAX; + goto out_free_e; } - spin_lock_irq(&mdev->req_lock); - list_add(&e->w.list, &mdev->read_ee); - spin_unlock_irq(&mdev->req_lock); + /* Throttle, drbd_rs_begin_io and submit should become asynchronous + * wrt the receiver, but it is not as straightforward as it may seem. + * Various places in the resync start and stop logic assume resync + * requests are processed in order, requeuing this on the worker thread + * introduces a bunch of new code for synchronization between threads. + * + * Unlimited throttling before drbd_rs_begin_io may stall the resync + * "forever", throttling after drbd_rs_begin_io will lock that extent + * for application writes for the same time. For now, just throttle + * here, where the rest of the code expects the receiver to sleep for + * a while, anyways. + */ + + /* Throttle before drbd_rs_begin_io, as that locks out application IO; + * this defers syncer requests for some time, before letting at least + * on request through. The resync controller on the receiving side + * will adapt to the incoming rate accordingly. + * + * We cannot throttle here if remote is Primary/SyncTarget: + * we would also throttle its application reads. + * In that case, throttling is done on the SyncTarget only. + */ + if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev)) + msleep(100); + if (drbd_rs_begin_io(mdev, e->sector)) + goto out_free_e; +submit_for_resync: + atomic_add(size >> 9, &mdev->rs_sect_ev); + +submit: inc_unacked(mdev); + spin_lock_irq(&mdev->req_lock); + list_add_tail(&e->w.list, &mdev->read_ee); + spin_unlock_irq(&mdev->req_lock); if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) return TRUE; + /* drbd_submit_ee currently fails for one reason only: + * not being able to allocate enough bios. + * Is dropping the connection going to help? */ + spin_lock_irq(&mdev->req_lock); + list_del(&e->w.list); + spin_unlock_irq(&mdev->req_lock); + /* no drbd_rs_complete_io(), we are dropping the connection anyways */ + out_free_e: - kfree(di); put_ldev(mdev); drbd_free_ee(mdev, e); return FALSE; @@ -2699,20 +2780,13 @@ static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self) return 1; } -static int receive_protocol(struct drbd_conf *mdev, struct p_header *h) +static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { - struct p_protocol *p = (struct p_protocol *)h; - int header_size, data_size; + struct p_protocol *p = &mdev->data.rbuf.protocol; int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; int p_want_lose, p_two_primaries, cf; char p_integrity_alg[SHARED_SECRET_MAX] = ""; - header_size = sizeof(*p) - sizeof(*h); - data_size = h->length - header_size; - - if (drbd_recv(mdev, h->payload, header_size) != header_size) - return FALSE; - p_proto = be32_to_cpu(p->protocol); p_after_sb_0p = be32_to_cpu(p->after_sb_0p); p_after_sb_1p = be32_to_cpu(p->after_sb_1p); @@ -2805,39 +2879,46 @@ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, return tfm; } -static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h) +static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) { int ok = TRUE; - struct p_rs_param_89 *p = (struct p_rs_param_89 *)h; + struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; unsigned int header_size, data_size, exp_max_sz; struct crypto_hash *verify_tfm = NULL; struct crypto_hash *csums_tfm = NULL; const int apv = mdev->agreed_pro_version; + int *rs_plan_s = NULL; + int fifo_size = 0; exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) : apv == 88 ? sizeof(struct p_rs_param) + SHARED_SECRET_MAX - : /* 89 */ sizeof(struct p_rs_param_89); + : apv <= 94 ? sizeof(struct p_rs_param_89) + : /* apv >= 95 */ sizeof(struct p_rs_param_95); - if (h->length > exp_max_sz) { + if (packet_size > exp_max_sz) { dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", - h->length, exp_max_sz); + packet_size, exp_max_sz); return FALSE; } if (apv <= 88) { - header_size = sizeof(struct p_rs_param) - sizeof(*h); - data_size = h->length - header_size; - } else /* apv >= 89 */ { - header_size = sizeof(struct p_rs_param_89) - sizeof(*h); - data_size = h->length - header_size; + header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80); + data_size = packet_size - header_size; + } else if (apv <= 94) { + header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80); + data_size = packet_size - header_size; + D_ASSERT(data_size == 0); + } else { + header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80); + data_size = packet_size - header_size; D_ASSERT(data_size == 0); } /* initialize verify_alg and csums_alg */ memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); - if (drbd_recv(mdev, h->payload, header_size) != header_size) + if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) return FALSE; mdev->sync_conf.rate = be32_to_cpu(p->rate); @@ -2896,6 +2977,22 @@ static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h) } } + if (apv > 94) { + mdev->sync_conf.rate = be32_to_cpu(p->rate); + mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead); + mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target); + mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target); + mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate); + + fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; + if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) { + rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL); + if (!rs_plan_s) { + dev_err(DEV, "kmalloc of fifo_buffer failed"); + goto disconnect; + } + } + } spin_lock(&mdev->peer_seq_lock); /* lock against drbd_nl_syncer_conf() */ @@ -2913,6 +3010,12 @@ static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h) mdev->csums_tfm = csums_tfm; dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); } + if (fifo_size != mdev->rs_plan_s.size) { + kfree(mdev->rs_plan_s.values); + mdev->rs_plan_s.values = rs_plan_s; + mdev->rs_plan_s.size = fifo_size; + mdev->rs_planed = 0; + } spin_unlock(&mdev->peer_seq_lock); } @@ -2946,19 +3049,15 @@ static void warn_if_differ_considerably(struct drbd_conf *mdev, (unsigned long long)a, (unsigned long long)b); } -static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) +static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { - struct p_sizes *p = (struct p_sizes *)h; + struct p_sizes *p = &mdev->data.rbuf.sizes; enum determine_dev_size dd = unchanged; unsigned int max_seg_s; sector_t p_size, p_usize, my_usize; int ldsc = 0; /* local disk size changed */ enum dds_flags ddsf; - ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; - if (drbd_recv(mdev, h->payload, h->length) != h->length) - return FALSE; - p_size = be64_to_cpu(p->d_size); p_usize = be64_to_cpu(p->u_size); @@ -2972,7 +3071,6 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) * we still need to figure out whether we accept that. */ mdev->p_size = p_size; -#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) if (get_ldev(mdev)) { warn_if_differ_considerably(mdev, "lower level device sizes", p_size, drbd_get_max_capacity(mdev->ldev)); @@ -3029,6 +3127,8 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) if (mdev->agreed_pro_version < 94) max_seg_s = be32_to_cpu(p->max_segment_size); + else if (mdev->agreed_pro_version == 94) + max_seg_s = DRBD_MAX_SIZE_H80_PACKET; else /* drbd 8.3.8 onwards */ max_seg_s = DRBD_MAX_SEGMENT_SIZE; @@ -3062,16 +3162,12 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) return TRUE; } -static int receive_uuids(struct drbd_conf *mdev, struct p_header *h) +static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { - struct p_uuids *p = (struct p_uuids *)h; + struct p_uuids *p = &mdev->data.rbuf.uuids; u64 *p_uuid; int i; - ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; - if (drbd_recv(mdev, h->payload, h->length) != h->length) - return FALSE; - p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) @@ -3107,6 +3203,11 @@ static int receive_uuids(struct drbd_conf *mdev, struct p_header *h) drbd_md_sync(mdev); } put_ldev(mdev); + } else if (mdev->state.disk < D_INCONSISTENT && + mdev->state.role == R_PRIMARY) { + /* I am a diskless primary, the peer just created a new current UUID + for me. */ + drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); } /* Before we test for the disk state, we should wait until an eventually @@ -3150,16 +3251,12 @@ static union drbd_state convert_state(union drbd_state ps) return ms; } -static int receive_req_state(struct drbd_conf *mdev, struct p_header *h) +static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { - struct p_req_state *p = (struct p_req_state *)h; + struct p_req_state *p = &mdev->data.rbuf.req_state; union drbd_state mask, val; int rv; - ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; - if (drbd_recv(mdev, h->payload, h->length) != h->length) - return FALSE; - mask.i = be32_to_cpu(p->mask); val.i = be32_to_cpu(p->val); @@ -3180,20 +3277,14 @@ static int receive_req_state(struct drbd_conf *mdev, struct p_header *h) return TRUE; } -static int receive_state(struct drbd_conf *mdev, struct p_header *h) +static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { - struct p_state *p = (struct p_state *)h; - enum drbd_conns nconn, oconn; - union drbd_state ns, peer_state; + struct p_state *p = &mdev->data.rbuf.state; + union drbd_state os, ns, peer_state; enum drbd_disk_state real_peer_disk; + enum chg_state_flags cs_flags; int rv; - ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) - return FALSE; - - if (drbd_recv(mdev, h->payload, h->length) != h->length) - return FALSE; - peer_state.i = be32_to_cpu(p->state); real_peer_disk = peer_state.disk; @@ -3204,38 +3295,72 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h) spin_lock_irq(&mdev->req_lock); retry: - oconn = nconn = mdev->state.conn; + os = ns = mdev->state; spin_unlock_irq(&mdev->req_lock); - if (nconn == C_WF_REPORT_PARAMS) - nconn = C_CONNECTED; + /* peer says his disk is uptodate, while we think it is inconsistent, + * and this happens while we think we have a sync going on. */ + if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE && + os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) { + /* If we are (becoming) SyncSource, but peer is still in sync + * preparation, ignore its uptodate-ness to avoid flapping, it + * will change to inconsistent once the peer reaches active + * syncing states. + * It may have changed syncer-paused flags, however, so we + * cannot ignore this completely. */ + if (peer_state.conn > C_CONNECTED && + peer_state.conn < C_SYNC_SOURCE) + real_peer_disk = D_INCONSISTENT; + + /* if peer_state changes to connected at the same time, + * it explicitly notifies us that it finished resync. + * Maybe we should finish it up, too? */ + else if (os.conn >= C_SYNC_SOURCE && + peer_state.conn == C_CONNECTED) { + if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) + drbd_resync_finished(mdev); + return TRUE; + } + } + + /* peer says his disk is inconsistent, while we think it is uptodate, + * and this happens while the peer still thinks we have a sync going on, + * but we think we are already done with the sync. + * We ignore this to avoid flapping pdsk. + * This should not happen, if the peer is a recent version of drbd. */ + if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT && + os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE) + real_peer_disk = D_UP_TO_DATE; + + if (ns.conn == C_WF_REPORT_PARAMS) + ns.conn = C_CONNECTED; if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && get_ldev_if_state(mdev, D_NEGOTIATING)) { int cr; /* consider resync */ /* if we established a new connection */ - cr = (oconn < C_CONNECTED); + cr = (os.conn < C_CONNECTED); /* if we had an established connection * and one of the nodes newly attaches a disk */ - cr |= (oconn == C_CONNECTED && + cr |= (os.conn == C_CONNECTED && (peer_state.disk == D_NEGOTIATING || - mdev->state.disk == D_NEGOTIATING)); + os.disk == D_NEGOTIATING)); /* if we have both been inconsistent, and the peer has been * forced to be UpToDate with --overwrite-data */ cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); /* if we had been plain connected, and the admin requested to * start a sync by "invalidate" or "invalidate-remote" */ - cr |= (oconn == C_CONNECTED && + cr |= (os.conn == C_CONNECTED && (peer_state.conn >= C_STARTING_SYNC_S && peer_state.conn <= C_WF_BITMAP_T)); if (cr) - nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk); + ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk); put_ldev(mdev); - if (nconn == C_MASK) { - nconn = C_CONNECTED; + if (ns.conn == C_MASK) { + ns.conn = C_CONNECTED; if (mdev->state.disk == D_NEGOTIATING) { drbd_force_state(mdev, NS(disk, D_DISKLESS)); } else if (peer_state.disk == D_NEGOTIATING) { @@ -3245,7 +3370,7 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h) } else { if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) return FALSE; - D_ASSERT(oconn == C_WF_REPORT_PARAMS); + D_ASSERT(os.conn == C_WF_REPORT_PARAMS); drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); return FALSE; } @@ -3253,18 +3378,28 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h) } spin_lock_irq(&mdev->req_lock); - if (mdev->state.conn != oconn) + if (mdev->state.i != os.i) goto retry; clear_bit(CONSIDER_RESYNC, &mdev->flags); - ns.i = mdev->state.i; - ns.conn = nconn; ns.peer = peer_state.role; ns.pdsk = real_peer_disk; ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); - if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) + if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) ns.disk = mdev->new_state_tmp.disk; - - rv = _drbd_set_state(mdev, ns, CS_VERBOSE | CS_HARD, NULL); + cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); + if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && + test_bit(NEW_CUR_UUID, &mdev->flags)) { + /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this + for temporal network outages! */ + spin_unlock_irq(&mdev->req_lock); + dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); + tl_clear(mdev); + drbd_uuid_new_current(mdev); + clear_bit(NEW_CUR_UUID, &mdev->flags); + drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); + return FALSE; + } + rv = _drbd_set_state(mdev, ns, cs_flags, NULL); ns = mdev->state; spin_unlock_irq(&mdev->req_lock); @@ -3273,8 +3408,8 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h) return FALSE; } - if (oconn > C_WF_REPORT_PARAMS) { - if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED && + if (os.conn > C_WF_REPORT_PARAMS) { + if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED && peer_state.disk != D_NEGOTIATING ) { /* we want resync, peer has not yet decided to sync... */ /* Nowadays only used when forcing a node into primary role and @@ -3291,9 +3426,9 @@ static int receive_state(struct drbd_conf *mdev, struct p_header *h) return TRUE; } -static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h) +static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { - struct p_rs_uuid *p = (struct p_rs_uuid *)h; + struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid; wait_event(mdev->misc_wait, mdev->state.conn == C_WF_SYNC_UUID || @@ -3302,10 +3437,6 @@ static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h) /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */ - ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; - if (drbd_recv(mdev, h->payload, h->length) != h->length) - return FALSE; - /* Here the _drbd_uuid_ functions are right, current should _not_ be rotated into the history */ if (get_ldev_if_state(mdev, D_NEGOTIATING)) { @@ -3324,14 +3455,14 @@ static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h) enum receive_bitmap_ret { OK, DONE, FAILED }; static enum receive_bitmap_ret -receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h, - unsigned long *buffer, struct bm_xfer_ctx *c) +receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, + unsigned long *buffer, struct bm_xfer_ctx *c) { unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); unsigned want = num_words * sizeof(long); - if (want != h->length) { - dev_err(DEV, "%s:want (%u) != h->length (%u)\n", __func__, want, h->length); + if (want != data_size) { + dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); return FAILED; } if (want == 0) @@ -3360,7 +3491,7 @@ recv_bm_rle_bits(struct drbd_conf *mdev, u64 tmp; unsigned long s = c->bit_offset; unsigned long e; - int len = p->head.length - (sizeof(*p) - sizeof(p->head)); + int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head)); int toggle = DCBP_get_start(p); int have; int bits; @@ -3429,7 +3560,7 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev, const char *direction, struct bm_xfer_ctx *c) { /* what would it take to transfer it "plaintext" */ - unsigned plain = sizeof(struct p_header) * + unsigned plain = sizeof(struct p_header80) * ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1) + c->bm_words * sizeof(long); unsigned total = c->bytes[0] + c->bytes[1]; @@ -3467,12 +3598,13 @@ void INFO_bm_xfer_stats(struct drbd_conf *mdev, in order to be agnostic to the 32 vs 64 bits issue. returns 0 on failure, 1 if we successfully received it. */ -static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h) +static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { struct bm_xfer_ctx c; void *buffer; enum receive_bitmap_ret ret; int ok = FALSE; + struct p_header80 *h = &mdev->data.rbuf.header.h80; wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); @@ -3492,39 +3624,39 @@ static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h) }; do { - if (h->command == P_BITMAP) { - ret = receive_bitmap_plain(mdev, h, buffer, &c); - } else if (h->command == P_COMPRESSED_BITMAP) { + if (cmd == P_BITMAP) { + ret = receive_bitmap_plain(mdev, data_size, buffer, &c); + } else if (cmd == P_COMPRESSED_BITMAP) { /* MAYBE: sanity check that we speak proto >= 90, * and the feature is enabled! */ struct p_compressed_bm *p; - if (h->length > BM_PACKET_PAYLOAD_BYTES) { + if (data_size > BM_PACKET_PAYLOAD_BYTES) { dev_err(DEV, "ReportCBitmap packet too large\n"); goto out; } /* use the page buff */ p = buffer; memcpy(p, h, sizeof(*h)); - if (drbd_recv(mdev, p->head.payload, h->length) != h->length) + if (drbd_recv(mdev, p->head.payload, data_size) != data_size) goto out; - if (p->head.length <= (sizeof(*p) - sizeof(p->head))) { - dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length); + if (data_size <= (sizeof(*p) - sizeof(p->head))) { + dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); return FAILED; } ret = decode_bitmap_c(mdev, p, &c); } else { - dev_warn(DEV, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h->command); + dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); goto out; } - c.packets[h->command == P_BITMAP]++; - c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length; + c.packets[cmd == P_BITMAP]++; + c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; if (ret != OK) break; - if (!drbd_recv_header(mdev, h)) + if (!drbd_recv_header(mdev, &cmd, &data_size)) goto out; } while (ret == OK); if (ret == FAILED) @@ -3555,17 +3687,16 @@ static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h) return ok; } -static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent) +static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { /* TODO zero copy sink :) */ static char sink[128]; int size, want, r; - if (!silent) - dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", - h->command, h->length); + dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", + cmd, data_size); - size = h->length; + size = data_size; while (size > 0) { want = min_t(int, size, sizeof(sink)); r = drbd_recv(mdev, sink, want); @@ -3575,17 +3706,7 @@ static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent) return size == 0; } -static int receive_skip(struct drbd_conf *mdev, struct p_header *h) -{ - return receive_skip_(mdev, h, 0); -} - -static int receive_skip_silent(struct drbd_conf *mdev, struct p_header *h) -{ - return receive_skip_(mdev, h, 1); -} - -static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h) +static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { if (mdev->state.disk >= D_INCONSISTENT) drbd_kick_lo(mdev); @@ -3597,108 +3718,94 @@ static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h) return TRUE; } -typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *); - -static drbd_cmd_handler_f drbd_default_handler[] = { - [P_DATA] = receive_Data, - [P_DATA_REPLY] = receive_DataReply, - [P_RS_DATA_REPLY] = receive_RSDataReply, - [P_BARRIER] = receive_Barrier, - [P_BITMAP] = receive_bitmap, - [P_COMPRESSED_BITMAP] = receive_bitmap, - [P_UNPLUG_REMOTE] = receive_UnplugRemote, - [P_DATA_REQUEST] = receive_DataRequest, - [P_RS_DATA_REQUEST] = receive_DataRequest, - [P_SYNC_PARAM] = receive_SyncParam, - [P_SYNC_PARAM89] = receive_SyncParam, - [P_PROTOCOL] = receive_protocol, - [P_UUIDS] = receive_uuids, - [P_SIZES] = receive_sizes, - [P_STATE] = receive_state, - [P_STATE_CHG_REQ] = receive_req_state, - [P_SYNC_UUID] = receive_sync_uuid, - [P_OV_REQUEST] = receive_DataRequest, - [P_OV_REPLY] = receive_DataRequest, - [P_CSUM_RS_REQUEST] = receive_DataRequest, - [P_DELAY_PROBE] = receive_skip_silent, +typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); + +struct data_cmd { + int expect_payload; + size_t pkt_size; + drbd_cmd_handler_f function; +}; + +static struct data_cmd drbd_cmd_handler[] = { + [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, + [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply }, + [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } , + [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } , + [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } , + [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } , + [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote }, + [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, + [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, + [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam }, + [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam }, + [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol }, + [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids }, + [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes }, + [P_STATE] = { 0, sizeof(struct p_state), receive_state }, + [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state }, + [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid }, + [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, + [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, + [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, + [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, /* anything missing from this table is in * the asender_tbl, see get_asender_cmd */ - [P_MAX_CMD] = NULL, + [P_MAX_CMD] = { 0, 0, NULL }, }; -static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler; -static drbd_cmd_handler_f *drbd_opt_cmd_handler; +/* All handler functions that expect a sub-header get that sub-heder in + mdev->data.rbuf.header.head.payload. + + Usually in mdev->data.rbuf.header.head the callback can find the usual + p_header, but they may not rely on that. Since there is also p_header95 ! + */ static void drbdd(struct drbd_conf *mdev) { - drbd_cmd_handler_f handler; - struct p_header *header = &mdev->data.rbuf.header; + union p_header *header = &mdev->data.rbuf.header; + unsigned int packet_size; + enum drbd_packets cmd; + size_t shs; /* sub header size */ + int rv; while (get_t_state(&mdev->receiver) == Running) { drbd_thread_current_set_cpu(mdev); - if (!drbd_recv_header(mdev, header)) { - drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); - break; - } + if (!drbd_recv_header(mdev, &cmd, &packet_size)) + goto err_out; - if (header->command < P_MAX_CMD) - handler = drbd_cmd_handler[header->command]; - else if (P_MAY_IGNORE < header->command - && header->command < P_MAX_OPT_CMD) - handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE]; - else if (header->command > P_MAX_OPT_CMD) - handler = receive_skip; - else - handler = NULL; + if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) { + dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size); + goto err_out; + } - if (unlikely(!handler)) { - dev_err(DEV, "unknown packet type %d, l: %d!\n", - header->command, header->length); - drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); - break; + shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header); + rv = drbd_recv(mdev, &header->h80.payload, shs); + if (unlikely(rv != shs)) { + dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); + goto err_out; } - if (unlikely(!handler(mdev, header))) { - dev_err(DEV, "error receiving %s, l: %d!\n", - cmdname(header->command), header->length); - drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); - break; + + if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) { + dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size); + goto err_out; } - } -} -static void drbd_fail_pending_reads(struct drbd_conf *mdev) -{ - struct hlist_head *slot; - struct hlist_node *pos; - struct hlist_node *tmp; - struct drbd_request *req; - int i; + rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs); - /* - * Application READ requests - */ - spin_lock_irq(&mdev->req_lock); - for (i = 0; i < APP_R_HSIZE; i++) { - slot = mdev->app_reads_hash+i; - hlist_for_each_entry_safe(req, pos, tmp, slot, colision) { - /* it may (but should not any longer!) - * be on the work queue; if that assert triggers, - * we need to also grab the - * spin_lock_irq(&mdev->data.work.q_lock); - * and list_del_init here. */ - D_ASSERT(list_empty(&req->w.list)); - /* It would be nice to complete outside of spinlock. - * But this is easier for now. */ - _req_mod(req, connection_lost_while_pending); + if (unlikely(!rv)) { + dev_err(DEV, "error receiving %s, l: %d!\n", + cmdname(cmd), packet_size); + goto err_out; } } - for (i = 0; i < APP_R_HSIZE; i++) - if (!hlist_empty(mdev->app_reads_hash+i)) - dev_warn(DEV, "ASSERT FAILED: app_reads_hash[%d].first: " - "%p, should be NULL\n", i, mdev->app_reads_hash[i].first); - memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); - spin_unlock_irq(&mdev->req_lock); + if (0) { + err_out: + drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); + } + /* If we leave here, we probably want to update at least the + * "Connected" indicator on stable storage. Do so explicitly here. */ + drbd_md_sync(mdev); } void drbd_flush_workqueue(struct drbd_conf *mdev) @@ -3711,6 +3818,36 @@ void drbd_flush_workqueue(struct drbd_conf *mdev) wait_for_completion(&barr.done); } +void drbd_free_tl_hash(struct drbd_conf *mdev) +{ + struct hlist_head *h; + + spin_lock_irq(&mdev->req_lock); + + if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) { + spin_unlock_irq(&mdev->req_lock); + return; + } + /* paranoia code */ + for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++) + if (h->first) + dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n", + (int)(h - mdev->ee_hash), h->first); + kfree(mdev->ee_hash); + mdev->ee_hash = NULL; + mdev->ee_hash_s = 0; + + /* paranoia code */ + for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) + if (h->first) + dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n", + (int)(h - mdev->tl_hash), h->first); + kfree(mdev->tl_hash); + mdev->tl_hash = NULL; + mdev->tl_hash_s = 0; + spin_unlock_irq(&mdev->req_lock); +} + static void drbd_disconnect(struct drbd_conf *mdev) { enum drbd_fencing_p fp; @@ -3728,6 +3865,7 @@ static void drbd_disconnect(struct drbd_conf *mdev) drbd_thread_stop(&mdev->asender); drbd_free_sock(mdev); + /* wait for current activity to cease. */ spin_lock_irq(&mdev->req_lock); _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); @@ -3752,7 +3890,6 @@ static void drbd_disconnect(struct drbd_conf *mdev) /* make sure syncer is stopped and w_resume_next_sg queued */ del_timer_sync(&mdev->resync_timer); - set_bit(STOP_SYNC_TIMER, &mdev->flags); resync_timer_fn((unsigned long)mdev); /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, @@ -3767,11 +3904,9 @@ static void drbd_disconnect(struct drbd_conf *mdev) kfree(mdev->p_uuid); mdev->p_uuid = NULL; - if (!mdev->state.susp) + if (!is_susp(mdev->state)) tl_clear(mdev); - drbd_fail_pending_reads(mdev); - dev_info(DEV, "Connection closed\n"); drbd_md_sync(mdev); @@ -3782,12 +3917,8 @@ static void drbd_disconnect(struct drbd_conf *mdev) put_ldev(mdev); } - if (mdev->state.role == R_PRIMARY) { - if (fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) { - enum drbd_disk_state nps = drbd_try_outdate_peer(mdev); - drbd_request_state(mdev, NS(pdsk, nps)); - } - } + if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) + drbd_try_outdate_peer_async(mdev); spin_lock_irq(&mdev->req_lock); os = mdev->state; @@ -3800,32 +3931,14 @@ static void drbd_disconnect(struct drbd_conf *mdev) spin_unlock_irq(&mdev->req_lock); if (os.conn == C_DISCONNECTING) { - struct hlist_head *h; - wait_event(mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0); + wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); - /* we must not free the tl_hash - * while application io is still on the fly */ - wait_event(mdev->misc_wait, atomic_read(&mdev->ap_bio_cnt) == 0); - - spin_lock_irq(&mdev->req_lock); - /* paranoia code */ - for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++) - if (h->first) - dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n", - (int)(h - mdev->ee_hash), h->first); - kfree(mdev->ee_hash); - mdev->ee_hash = NULL; - mdev->ee_hash_s = 0; - - /* paranoia code */ - for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) - if (h->first) - dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n", - (int)(h - mdev->tl_hash), h->first); - kfree(mdev->tl_hash); - mdev->tl_hash = NULL; - mdev->tl_hash_s = 0; - spin_unlock_irq(&mdev->req_lock); + if (!is_susp(mdev->state)) { + /* we must not free the tl_hash + * while application io is still on the fly */ + wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); + drbd_free_tl_hash(mdev); + } crypto_free_hash(mdev->cram_hmac_tfm); mdev->cram_hmac_tfm = NULL; @@ -3845,6 +3958,9 @@ static void drbd_disconnect(struct drbd_conf *mdev) i = drbd_release_ee(mdev, &mdev->net_ee); if (i) dev_info(DEV, "net_ee not empty, killed %u entries\n", i); + i = atomic_read(&mdev->pp_in_use_by_net); + if (i) + dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i); i = atomic_read(&mdev->pp_in_use); if (i) dev_info(DEV, "pp_in_use = %d, expected 0\n", i); @@ -3888,7 +4004,7 @@ static int drbd_send_handshake(struct drbd_conf *mdev) p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE, - (struct p_header *)p, sizeof(*p), 0 ); + (struct p_header80 *)p, sizeof(*p), 0 ); mutex_unlock(&mdev->data.mutex); return ok; } @@ -3904,27 +4020,28 @@ static int drbd_do_handshake(struct drbd_conf *mdev) { /* ASSERT current == mdev->receiver ... */ struct p_handshake *p = &mdev->data.rbuf.handshake; - const int expect = sizeof(struct p_handshake) - -sizeof(struct p_header); + const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80); + unsigned int length; + enum drbd_packets cmd; int rv; rv = drbd_send_handshake(mdev); if (!rv) return 0; - rv = drbd_recv_header(mdev, &p->head); + rv = drbd_recv_header(mdev, &cmd, &length); if (!rv) return 0; - if (p->head.command != P_HAND_SHAKE) { + if (cmd != P_HAND_SHAKE) { dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n", - cmdname(p->head.command), p->head.command); + cmdname(cmd), cmd); return -1; } - if (p->head.length != expect) { + if (length != expect) { dev_err(DEV, "expected HandShake length: %u, received: %u\n", - expect, p->head.length); + expect, length); return -1; } @@ -3982,10 +4099,11 @@ static int drbd_do_auth(struct drbd_conf *mdev) char *response = NULL; char *right_response = NULL; char *peers_ch = NULL; - struct p_header p; unsigned int key_len = strlen(mdev->net_conf->shared_secret); unsigned int resp_size; struct hash_desc desc; + enum drbd_packets cmd; + unsigned int length; int rv; desc.tfm = mdev->cram_hmac_tfm; @@ -4005,33 +4123,33 @@ static int drbd_do_auth(struct drbd_conf *mdev) if (!rv) goto fail; - rv = drbd_recv_header(mdev, &p); + rv = drbd_recv_header(mdev, &cmd, &length); if (!rv) goto fail; - if (p.command != P_AUTH_CHALLENGE) { + if (cmd != P_AUTH_CHALLENGE) { dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n", - cmdname(p.command), p.command); + cmdname(cmd), cmd); rv = 0; goto fail; } - if (p.length > CHALLENGE_LEN*2) { + if (length > CHALLENGE_LEN * 2) { dev_err(DEV, "expected AuthChallenge payload too big.\n"); rv = -1; goto fail; } - peers_ch = kmalloc(p.length, GFP_NOIO); + peers_ch = kmalloc(length, GFP_NOIO); if (peers_ch == NULL) { dev_err(DEV, "kmalloc of peers_ch failed\n"); rv = -1; goto fail; } - rv = drbd_recv(mdev, peers_ch, p.length); + rv = drbd_recv(mdev, peers_ch, length); - if (rv != p.length) { + if (rv != length) { dev_err(DEV, "short read AuthChallenge: l=%u\n", rv); rv = 0; goto fail; @@ -4046,7 +4164,7 @@ static int drbd_do_auth(struct drbd_conf *mdev) } sg_init_table(&sg, 1); - sg_set_buf(&sg, peers_ch, p.length); + sg_set_buf(&sg, peers_ch, length); rv = crypto_hash_digest(&desc, &sg, sg.length, response); if (rv) { @@ -4059,18 +4177,18 @@ static int drbd_do_auth(struct drbd_conf *mdev) if (!rv) goto fail; - rv = drbd_recv_header(mdev, &p); + rv = drbd_recv_header(mdev, &cmd, &length); if (!rv) goto fail; - if (p.command != P_AUTH_RESPONSE) { + if (cmd != P_AUTH_RESPONSE) { dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n", - cmdname(p.command), p.command); + cmdname(cmd), cmd); rv = 0; goto fail; } - if (p.length != resp_size) { + if (length != resp_size) { dev_err(DEV, "expected AuthResponse payload of wrong size\n"); rv = 0; goto fail; @@ -4155,7 +4273,7 @@ int drbdd_init(struct drbd_thread *thi) /* ********* acknowledge sender ******** */ -static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h) +static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h) { struct p_req_state_reply *p = (struct p_req_state_reply *)h; @@ -4173,13 +4291,13 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h) return TRUE; } -static int got_Ping(struct drbd_conf *mdev, struct p_header *h) +static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) { return drbd_send_ping_ack(mdev); } -static int got_PingAck(struct drbd_conf *mdev, struct p_header *h) +static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h) { /* restore idle timeout */ mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; @@ -4189,7 +4307,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header *h) return TRUE; } -static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h) +static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) { struct p_block_ack *p = (struct p_block_ack *)h; sector_t sector = be64_to_cpu(p->sector); @@ -4199,11 +4317,15 @@ static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h) update_peer_seq(mdev, be32_to_cpu(p->seq_num)); - drbd_rs_complete_io(mdev, sector); - drbd_set_in_sync(mdev, sector, blksize); - /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ - mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); + if (get_ldev(mdev)) { + drbd_rs_complete_io(mdev, sector); + drbd_set_in_sync(mdev, sector, blksize); + /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ + mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); + put_ldev(mdev); + } dec_rs_pending(mdev); + atomic_add(blksize >> 9, &mdev->rs_sect_in); return TRUE; } @@ -4259,7 +4381,7 @@ static int validate_req_change_req_state(struct drbd_conf *mdev, return TRUE; } -static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h) +static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) { struct p_block_ack *p = (struct p_block_ack *)h; sector_t sector = be64_to_cpu(p->sector); @@ -4299,7 +4421,7 @@ static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h) _ack_id_to_req, __func__ , what); } -static int got_NegAck(struct drbd_conf *mdev, struct p_header *h) +static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h) { struct p_block_ack *p = (struct p_block_ack *)h; sector_t sector = be64_to_cpu(p->sector); @@ -4319,7 +4441,7 @@ static int got_NegAck(struct drbd_conf *mdev, struct p_header *h) _ack_id_to_req, __func__ , neg_acked); } -static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h) +static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) { struct p_block_ack *p = (struct p_block_ack *)h; sector_t sector = be64_to_cpu(p->sector); @@ -4332,7 +4454,7 @@ static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h) _ar_id_to_req, __func__ , neg_acked); } -static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h) +static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h) { sector_t sector; int size; @@ -4354,7 +4476,7 @@ static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h) return TRUE; } -static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h) +static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) { struct p_barrier_ack *p = (struct p_barrier_ack *)h; @@ -4363,7 +4485,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h) return TRUE; } -static int got_OVResult(struct drbd_conf *mdev, struct p_header *h) +static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) { struct p_block_ack *p = (struct p_block_ack *)h; struct drbd_work *w; @@ -4380,6 +4502,9 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h) else ov_oos_print(mdev); + if (!get_ldev(mdev)) + return TRUE; + drbd_rs_complete_io(mdev, sector); dec_rs_pending(mdev); @@ -4394,18 +4519,18 @@ static int got_OVResult(struct drbd_conf *mdev, struct p_header *h) drbd_resync_finished(mdev); } } + put_ldev(mdev); return TRUE; } -static int got_something_to_ignore_m(struct drbd_conf *mdev, struct p_header *h) +static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) { - /* IGNORE */ return TRUE; } struct asender_cmd { size_t pkt_size; - int (*process)(struct drbd_conf *mdev, struct p_header *h); + int (*process)(struct drbd_conf *mdev, struct p_header80 *h); }; static struct asender_cmd *get_asender_cmd(int cmd) @@ -4414,8 +4539,8 @@ static struct asender_cmd *get_asender_cmd(int cmd) /* anything missing from this table is in * the drbd_cmd_handler (drbd_default_handler) table, * see the beginning of drbdd() */ - [P_PING] = { sizeof(struct p_header), got_Ping }, - [P_PING_ACK] = { sizeof(struct p_header), got_PingAck }, + [P_PING] = { sizeof(struct p_header80), got_Ping }, + [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck }, [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, @@ -4427,7 +4552,7 @@ static struct asender_cmd *get_asender_cmd(int cmd) [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, - [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_something_to_ignore_m }, + [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, [P_MAX_CMD] = { 0, NULL }, }; if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) @@ -4438,13 +4563,13 @@ static struct asender_cmd *get_asender_cmd(int cmd) int drbd_asender(struct drbd_thread *thi) { struct drbd_conf *mdev = thi->mdev; - struct p_header *h = &mdev->meta.rbuf.header; + struct p_header80 *h = &mdev->meta.rbuf.header.h80; struct asender_cmd *cmd = NULL; int rv, len; void *buf = h; int received = 0; - int expect = sizeof(struct p_header); + int expect = sizeof(struct p_header80); int empty; sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev)); @@ -4468,10 +4593,8 @@ int drbd_asender(struct drbd_thread *thi) while (1) { clear_bit(SIGNAL_ASENDER, &mdev->flags); flush_signals(current); - if (!drbd_process_done_ee(mdev)) { - dev_err(DEV, "process_done_ee() = NOT_OK\n"); + if (!drbd_process_done_ee(mdev)) goto reconnect; - } /* to avoid race with newly queued ACKs */ set_bit(SIGNAL_ASENDER, &mdev->flags); spin_lock_irq(&mdev->req_lock); @@ -4530,21 +4653,23 @@ int drbd_asender(struct drbd_thread *thi) if (received == expect && cmd == NULL) { if (unlikely(h->magic != BE_DRBD_MAGIC)) { - dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n", - (long)be32_to_cpu(h->magic), - h->command, h->length); + dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n", + be32_to_cpu(h->magic), + be16_to_cpu(h->command), + be16_to_cpu(h->length)); goto reconnect; } cmd = get_asender_cmd(be16_to_cpu(h->command)); len = be16_to_cpu(h->length); if (unlikely(cmd == NULL)) { - dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n", - (long)be32_to_cpu(h->magic), - h->command, h->length); + dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n", + be32_to_cpu(h->magic), + be16_to_cpu(h->command), + be16_to_cpu(h->length)); goto disconnect; } expect = cmd->pkt_size; - ERR_IF(len != expect-sizeof(struct p_header)) + ERR_IF(len != expect-sizeof(struct p_header80)) goto reconnect; } if (received == expect) { @@ -4554,7 +4679,7 @@ int drbd_asender(struct drbd_thread *thi) buf = h; received = 0; - expect = sizeof(struct p_header); + expect = sizeof(struct p_header80); cmd = NULL; } } @@ -4562,10 +4687,12 @@ int drbd_asender(struct drbd_thread *thi) if (0) { reconnect: drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); + drbd_md_sync(mdev); } if (0) { disconnect: drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); + drbd_md_sync(mdev); } clear_bit(SIGNAL_ASENDER, &mdev->flags); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index f761d98a4e9..9e91a2545fc 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -59,17 +59,19 @@ static void _drbd_end_io_acct(struct drbd_conf *mdev, struct drbd_request *req) static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const int rw) { const unsigned long s = req->rq_state; + + /* remove it from the transfer log. + * well, only if it had been there in the first + * place... if it had not (local only or conflicting + * and never sent), it should still be "empty" as + * initialized in drbd_req_new(), so we can list_del() it + * here unconditionally */ + list_del(&req->tl_requests); + /* if it was a write, we may have to set the corresponding * bit(s) out-of-sync first. If it had a local part, we need to * release the reference to the activity log. */ if (rw == WRITE) { - /* remove it from the transfer log. - * well, only if it had been there in the first - * place... if it had not (local only or conflicting - * and never sent), it should still be "empty" as - * initialized in drbd_req_new(), so we can list_del() it - * here unconditionally */ - list_del(&req->tl_requests); /* Set out-of-sync unless both OK flags are set * (local only or remote failed). * Other places where we set out-of-sync: @@ -92,7 +94,8 @@ static void _req_is_done(struct drbd_conf *mdev, struct drbd_request *req, const */ if (s & RQ_LOCAL_MASK) { if (get_ldev_if_state(mdev, D_FAILED)) { - drbd_al_complete_io(mdev, req->sector); + if (s & RQ_IN_ACT_LOG) + drbd_al_complete_io(mdev, req->sector); put_ldev(mdev); } else if (__ratelimit(&drbd_ratelimit_state)) { dev_warn(DEV, "Should have called drbd_al_complete_io(, %llu), " @@ -280,6 +283,14 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) * protocol A or B, barrier ack still pending... */ } +static void _req_may_be_done_not_susp(struct drbd_request *req, struct bio_and_error *m) +{ + struct drbd_conf *mdev = req->mdev; + + if (!is_susp(mdev->state)) + _req_may_be_done(req, m); +} + /* * checks whether there was an overlapping request * or ee already registered. @@ -380,10 +391,11 @@ out_conflict: * and it enforces that we have to think in a very structured manner * about the "events" that may happen to a request during its life time ... */ -void __req_mod(struct drbd_request *req, enum drbd_req_event what, +int __req_mod(struct drbd_request *req, enum drbd_req_event what, struct bio_and_error *m) { struct drbd_conf *mdev = req->mdev; + int rv = 0; m->bio = NULL; switch (what) { @@ -420,7 +432,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state |= (RQ_LOCAL_COMPLETED|RQ_LOCAL_OK); req->rq_state &= ~RQ_LOCAL_PENDING; - _req_may_be_done(req, m); + _req_may_be_done_not_susp(req, m); put_ldev(mdev); break; @@ -429,7 +441,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state &= ~RQ_LOCAL_PENDING; __drbd_chk_io_error(mdev, FALSE); - _req_may_be_done(req, m); + _req_may_be_done_not_susp(req, m); put_ldev(mdev); break; @@ -437,7 +449,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, /* it is legal to fail READA */ req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state &= ~RQ_LOCAL_PENDING; - _req_may_be_done(req, m); + _req_may_be_done_not_susp(req, m); put_ldev(mdev); break; @@ -455,7 +467,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, /* no point in retrying if there is no good remote data, * or we have no connection. */ if (mdev->state.pdsk != D_UP_TO_DATE) { - _req_may_be_done(req, m); + _req_may_be_done_not_susp(req, m); break; } @@ -517,11 +529,9 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0); req->epoch = mdev->newest_tle->br_number; - list_add_tail(&req->tl_requests, - &mdev->newest_tle->requests); /* increment size of current epoch */ - mdev->newest_tle->n_req++; + mdev->newest_tle->n_writes++; /* queue work item to send data */ D_ASSERT(req->rq_state & RQ_NET_PENDING); @@ -530,7 +540,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, drbd_queue_work(&mdev->data.work, &req->w); /* close the epoch, in case it outgrew the limit */ - if (mdev->newest_tle->n_req >= mdev->net_conf->max_epoch_size) + if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size) queue_barrier(mdev); break; @@ -543,7 +553,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state &= ~RQ_NET_QUEUED; /* if we did it right, tl_clear should be scheduled only after * this, so this should not be necessary! */ - _req_may_be_done(req, m); + _req_may_be_done_not_susp(req, m); break; case handed_over_to_network: @@ -568,7 +578,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, * "completed_ok" events came in, once we return from * _drbd_send_zc_bio (drbd_send_dblock), we have to check * whether it is done already, and end it. */ - _req_may_be_done(req, m); + _req_may_be_done_not_susp(req, m); break; case read_retry_remote_canceled: @@ -584,7 +594,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, /* if it is still queued, we may not complete it here. * it will be canceled soon. */ if (!(req->rq_state & RQ_NET_QUEUED)) - _req_may_be_done(req, m); + _req_may_be_done(req, m); /* Allowed while state.susp */ break; case write_acked_by_peer_and_sis: @@ -619,7 +629,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, D_ASSERT(req->rq_state & RQ_NET_PENDING); dec_ap_pending(mdev); req->rq_state &= ~RQ_NET_PENDING; - _req_may_be_done(req, m); + _req_may_be_done_not_susp(req, m); break; case neg_acked: @@ -629,11 +639,50 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state &= ~(RQ_NET_OK|RQ_NET_PENDING); req->rq_state |= RQ_NET_DONE; - _req_may_be_done(req, m); + _req_may_be_done_not_susp(req, m); /* else: done by handed_over_to_network */ break; + case fail_frozen_disk_io: + if (!(req->rq_state & RQ_LOCAL_COMPLETED)) + break; + + _req_may_be_done(req, m); /* Allowed while state.susp */ + break; + + case restart_frozen_disk_io: + if (!(req->rq_state & RQ_LOCAL_COMPLETED)) + break; + + req->rq_state &= ~RQ_LOCAL_COMPLETED; + + rv = MR_READ; + if (bio_data_dir(req->master_bio) == WRITE) + rv = MR_WRITE; + + get_ldev(mdev); + req->w.cb = w_restart_disk_io; + drbd_queue_work(&mdev->data.work, &req->w); + break; + + case resend: + /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK + before the connection loss (B&C only); only P_BARRIER_ACK was missing. + Trowing them out of the TL here by pretending we got a BARRIER_ACK + We ensure that the peer was not rebooted */ + if (!(req->rq_state & RQ_NET_OK)) { + if (req->w.cb) { + drbd_queue_work(&mdev->data.work, &req->w); + rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; + } + break; + } + /* else, fall through to barrier_acked */ + case barrier_acked: + if (!(req->rq_state & RQ_WRITE)) + break; + if (req->rq_state & RQ_NET_PENDING) { /* barrier came in before all requests have been acked. * this is bad, because if the connection is lost now, @@ -643,7 +692,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, } D_ASSERT(req->rq_state & RQ_NET_SENT); req->rq_state |= RQ_NET_DONE; - _req_may_be_done(req, m); + _req_may_be_done(req, m); /* Allowed while state.susp */ break; case data_received: @@ -651,9 +700,11 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what, dec_ap_pending(mdev); req->rq_state &= ~RQ_NET_PENDING; req->rq_state |= (RQ_NET_OK|RQ_NET_DONE); - _req_may_be_done(req, m); + _req_may_be_done_not_susp(req, m); break; }; + + return rv; } /* we may do a local read if: @@ -752,14 +803,16 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) * resync extent to finish, and, if necessary, pulls in the target * extent into the activity log, which involves further disk io because * of transactional on-disk meta data updates. */ - if (rw == WRITE && local) + if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { + req->rq_state |= RQ_IN_ACT_LOG; drbd_al_begin_io(mdev, sector); + } remote = remote && (mdev->state.pdsk == D_UP_TO_DATE || (mdev->state.pdsk == D_INCONSISTENT && mdev->state.conn >= C_CONNECTED)); - if (!(local || remote) && !mdev->state.susp) { + if (!(local || remote) && !is_susp(mdev->state)) { dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); goto fail_free_complete; } @@ -785,7 +838,7 @@ allocate_barrier: /* GOOD, everything prepared, grab the spin_lock */ spin_lock_irq(&mdev->req_lock); - if (mdev->state.susp) { + if (is_susp(mdev->state)) { /* If we got suspended, use the retry mechanism of generic_make_request() to restart processing of this bio. In the next call to drbd_make_request_26 @@ -867,30 +920,10 @@ allocate_barrier: /* check this request on the collision detection hash tables. * if we have a conflict, just complete it here. * THINK do we want to check reads, too? (I don't think so...) */ - if (rw == WRITE && _req_conflicts(req)) { - /* this is a conflicting request. - * even though it may have been only _partially_ - * overlapping with one of the currently pending requests, - * without even submitting or sending it, we will - * pretend that it was successfully served right now. - */ - if (local) { - bio_put(req->private_bio); - req->private_bio = NULL; - drbd_al_complete_io(mdev, req->sector); - put_ldev(mdev); - local = 0; - } - if (remote) - dec_ap_pending(mdev); - _drbd_end_io_acct(mdev, req); - /* THINK: do we want to fail it (-EIO), or pretend success? */ - bio_endio(req->master_bio, 0); - req->master_bio = NULL; - dec_ap_bio(mdev); - drbd_req_free(req); - remote = 0; - } + if (rw == WRITE && _req_conflicts(req)) + goto fail_conflicting; + + list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); /* NOTE remote first: to get the concurrent write detection right, * we must register the request before start of local IO. */ @@ -923,6 +956,21 @@ allocate_barrier: return 0; +fail_conflicting: + /* this is a conflicting request. + * even though it may have been only _partially_ + * overlapping with one of the currently pending requests, + * without even submitting or sending it, we will + * pretend that it was successfully served right now. + */ + _drbd_end_io_acct(mdev, req); + spin_unlock_irq(&mdev->req_lock); + if (remote) + dec_ap_pending(mdev); + /* THINK: do we want to fail it (-EIO), or pretend success? + * this pretends success. */ + err = 0; + fail_free_complete: if (rw == WRITE && local) drbd_al_complete_io(mdev, sector); @@ -961,21 +1009,6 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write) return 1; } - /* - * Paranoia: we might have been primary, but sync target, or - * even diskless, then lost the connection. - * This should have been handled (panic? suspend?) somewhere - * else. But maybe it was not, so check again here. - * Caution: as long as we do not have a read/write lock on mdev, - * to serialize state changes, this is racy, since we may lose - * the connection *after* we test for the cstate. - */ - if (mdev->state.disk < D_UP_TO_DATE && mdev->state.pdsk < D_UP_TO_DATE) { - if (__ratelimit(&drbd_ratelimit_state)) - dev_err(DEV, "Sorry, I have no access to good data anymore.\n"); - return 1; - } - return 0; } diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 02d575d2451..181ea036482 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -104,6 +104,9 @@ enum drbd_req_event { read_ahead_completed_with_error, write_completed_with_error, completed_ok, + resend, + fail_frozen_disk_io, + restart_frozen_disk_io, nothing, /* for tracing only */ }; @@ -183,6 +186,12 @@ enum drbd_req_state_bits { /* keep this last, its for the RQ_NET_MASK */ __RQ_NET_MAX, + + /* Set when this is a write, clear for a read */ + __RQ_WRITE, + + /* Should call drbd_al_complete_io() for this request... */ + __RQ_IN_ACT_LOG, }; #define RQ_LOCAL_PENDING (1UL << __RQ_LOCAL_PENDING) @@ -201,6 +210,16 @@ enum drbd_req_state_bits { /* 0x1f8 */ #define RQ_NET_MASK (((1UL << __RQ_NET_MAX)-1) & ~RQ_LOCAL_MASK) +#define RQ_WRITE (1UL << __RQ_WRITE) +#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG) + +/* For waking up the frozen transfer log mod_req() has to return if the request + should be counted in the epoch object*/ +#define MR_WRITE_SHIFT 0 +#define MR_WRITE (1 << MR_WRITE_SHIFT) +#define MR_READ_SHIFT 1 +#define MR_READ (1 << MR_READ_SHIFT) + /* epoch entries */ static inline struct hlist_head *ee_hash_slot(struct drbd_conf *mdev, sector_t sector) @@ -244,30 +263,36 @@ static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev, return NULL; } +static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bio *bio_src) +{ + struct bio *bio; + bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */ + + req->private_bio = bio; + + bio->bi_private = req; + bio->bi_end_io = drbd_endio_pri; + bio->bi_next = NULL; +} + static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, struct bio *bio_src) { - struct bio *bio; struct drbd_request *req = mempool_alloc(drbd_request_mempool, GFP_NOIO); if (likely(req)) { - bio = bio_clone(bio_src, GFP_NOIO); /* XXX cannot fail?? */ + drbd_req_make_private_bio(req, bio_src); - req->rq_state = 0; + req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; req->mdev = mdev; req->master_bio = bio_src; - req->private_bio = bio; req->epoch = 0; - req->sector = bio->bi_sector; - req->size = bio->bi_size; + req->sector = bio_src->bi_sector; + req->size = bio_src->bi_size; req->start_time = jiffies; INIT_HLIST_NODE(&req->colision); INIT_LIST_HEAD(&req->tl_requests); INIT_LIST_HEAD(&req->w.list); - - bio->bi_private = req; - bio->bi_end_io = drbd_endio_pri; - bio->bi_next = NULL; } return req; } @@ -292,36 +317,43 @@ struct bio_and_error { extern void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m); -extern void __req_mod(struct drbd_request *req, enum drbd_req_event what, +extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, struct bio_and_error *m); extern void complete_master_bio(struct drbd_conf *mdev, struct bio_and_error *m); /* use this if you don't want to deal with calling complete_master_bio() * outside the spinlock, e.g. when walking some list on cleanup. */ -static inline void _req_mod(struct drbd_request *req, enum drbd_req_event what) +static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) { struct drbd_conf *mdev = req->mdev; struct bio_and_error m; + int rv; /* __req_mod possibly frees req, do not touch req after that! */ - __req_mod(req, what, &m); + rv = __req_mod(req, what, &m); if (m.bio) complete_master_bio(mdev, &m); + + return rv; } /* completion of master bio is outside of spinlock. * If you need it irqsave, do it your self! */ -static inline void req_mod(struct drbd_request *req, +static inline int req_mod(struct drbd_request *req, enum drbd_req_event what) { struct drbd_conf *mdev = req->mdev; struct bio_and_error m; + int rv; + spin_lock_irq(&mdev->req_lock); - __req_mod(req, what, &m); + rv = __req_mod(req, what, &m); spin_unlock_irq(&mdev->req_lock); if (m.bio) complete_master_bio(mdev, &m); + + return rv; } #endif diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index ca4a16cea2d..108d58015cd 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -39,8 +39,6 @@ #include "drbd_int.h" #include "drbd_req.h" -#define SLEEP_TIME (HZ/10) - static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel); @@ -217,10 +215,8 @@ void drbd_endio_sec(struct bio *bio, int error) */ void drbd_endio_pri(struct bio *bio, int error) { - unsigned long flags; struct drbd_request *req = bio->bi_private; struct drbd_conf *mdev = req->mdev; - struct bio_and_error m; enum drbd_req_event what; int uptodate = bio_flagged(bio, BIO_UPTODATE); @@ -246,12 +242,7 @@ void drbd_endio_pri(struct bio *bio, int error) bio_put(req->private_bio); req->private_bio = ERR_PTR(error); - spin_lock_irqsave(&mdev->req_lock, flags); - __req_mod(req, what, &m); - spin_unlock_irqrestore(&mdev->req_lock, flags); - - if (m.bio) - complete_master_bio(mdev, &m); + req_mod(req, what); } int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) @@ -376,54 +367,145 @@ static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size) struct drbd_epoch_entry *e; if (!get_ldev(mdev)) - return 0; + return -EIO; + + if (drbd_rs_should_slow_down(mdev)) + goto defer; /* GFP_TRY, because if there is no memory available right now, this may * be rescheduled for later. It is "only" background resync, after all. */ e = drbd_alloc_ee(mdev, DRBD_MAGIC+0xbeef, sector, size, GFP_TRY); if (!e) - goto fail; + goto defer; + e->w.cb = w_e_send_csum; spin_lock_irq(&mdev->req_lock); list_add(&e->w.list, &mdev->read_ee); spin_unlock_irq(&mdev->req_lock); - e->w.cb = w_e_send_csum; + atomic_add(size >> 9, &mdev->rs_sect_ev); if (drbd_submit_ee(mdev, e, READ, DRBD_FAULT_RS_RD) == 0) - return 1; + return 0; + + /* drbd_submit_ee currently fails for one reason only: + * not being able to allocate enough bios. + * Is dropping the connection going to help? */ + spin_lock_irq(&mdev->req_lock); + list_del(&e->w.list); + spin_unlock_irq(&mdev->req_lock); drbd_free_ee(mdev, e); -fail: +defer: put_ldev(mdev); - return 2; + return -EAGAIN; } void resync_timer_fn(unsigned long data) { - unsigned long flags; struct drbd_conf *mdev = (struct drbd_conf *) data; int queue; - spin_lock_irqsave(&mdev->req_lock, flags); - - if (likely(!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))) { - queue = 1; - if (mdev->state.conn == C_VERIFY_S) - mdev->resync_work.cb = w_make_ov_request; - else - mdev->resync_work.cb = w_make_resync_request; - } else { + queue = 1; + switch (mdev->state.conn) { + case C_VERIFY_S: + mdev->resync_work.cb = w_make_ov_request; + break; + case C_SYNC_TARGET: + mdev->resync_work.cb = w_make_resync_request; + break; + default: queue = 0; mdev->resync_work.cb = w_resync_inactive; } - spin_unlock_irqrestore(&mdev->req_lock, flags); - /* harmless race: list_empty outside data.work.q_lock */ if (list_empty(&mdev->resync_work.list) && queue) drbd_queue_work(&mdev->data.work, &mdev->resync_work); } +static void fifo_set(struct fifo_buffer *fb, int value) +{ + int i; + + for (i = 0; i < fb->size; i++) + fb->values[i] = value; +} + +static int fifo_push(struct fifo_buffer *fb, int value) +{ + int ov; + + ov = fb->values[fb->head_index]; + fb->values[fb->head_index++] = value; + + if (fb->head_index >= fb->size) + fb->head_index = 0; + + return ov; +} + +static void fifo_add_val(struct fifo_buffer *fb, int value) +{ + int i; + + for (i = 0; i < fb->size; i++) + fb->values[i] += value; +} + +int drbd_rs_controller(struct drbd_conf *mdev) +{ + unsigned int sect_in; /* Number of sectors that came in since the last turn */ + unsigned int want; /* The number of sectors we want in the proxy */ + int req_sect; /* Number of sectors to request in this turn */ + int correction; /* Number of sectors more we need in the proxy*/ + int cps; /* correction per invocation of drbd_rs_controller() */ + int steps; /* Number of time steps to plan ahead */ + int curr_corr; + int max_sect; + + sect_in = atomic_xchg(&mdev->rs_sect_in, 0); /* Number of sectors that came in */ + mdev->rs_in_flight -= sect_in; + + spin_lock(&mdev->peer_seq_lock); /* get an atomic view on mdev->rs_plan_s */ + + steps = mdev->rs_plan_s.size; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */ + + if (mdev->rs_in_flight + sect_in == 0) { /* At start of resync */ + want = ((mdev->sync_conf.rate * 2 * SLEEP_TIME) / HZ) * steps; + } else { /* normal path */ + want = mdev->sync_conf.c_fill_target ? mdev->sync_conf.c_fill_target : + sect_in * mdev->sync_conf.c_delay_target * HZ / (SLEEP_TIME * 10); + } + + correction = want - mdev->rs_in_flight - mdev->rs_planed; + + /* Plan ahead */ + cps = correction / steps; + fifo_add_val(&mdev->rs_plan_s, cps); + mdev->rs_planed += cps * steps; + + /* What we do in this step */ + curr_corr = fifo_push(&mdev->rs_plan_s, 0); + spin_unlock(&mdev->peer_seq_lock); + mdev->rs_planed -= curr_corr; + + req_sect = sect_in + curr_corr; + if (req_sect < 0) + req_sect = 0; + + max_sect = (mdev->sync_conf.c_max_rate * 2 * SLEEP_TIME) / HZ; + if (req_sect > max_sect) + req_sect = max_sect; + + /* + dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n", + sect_in, mdev->rs_in_flight, want, correction, + steps, cps, mdev->rs_planed, curr_corr, req_sect); + */ + + return req_sect; +} + int w_make_resync_request(struct drbd_conf *mdev, struct drbd_work *w, int cancel) { @@ -431,8 +513,9 @@ int w_make_resync_request(struct drbd_conf *mdev, sector_t sector; const sector_t capacity = drbd_get_capacity(mdev->this_bdev); int max_segment_size; - int number, i, size, pe, mx; + int number, rollback_i, size, pe, mx; int align, queued, sndbuf; + int i = 0; if (unlikely(cancel)) return 1; @@ -446,6 +529,12 @@ int w_make_resync_request(struct drbd_conf *mdev, dev_err(DEV, "%s in w_make_resync_request\n", drbd_conn_str(mdev->state.conn)); + if (mdev->rs_total == 0) { + /* empty resync? */ + drbd_resync_finished(mdev); + return 1; + } + if (!get_ldev(mdev)) { /* Since we only need to access mdev->rsync a get_ldev_if_state(mdev,D_FAILED) would be sufficient, but @@ -458,11 +547,25 @@ int w_make_resync_request(struct drbd_conf *mdev, /* starting with drbd 8.3.8, we can handle multi-bio EEs, * if it should be necessary */ - max_segment_size = mdev->agreed_pro_version < 94 ? - queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE; + max_segment_size = + mdev->agreed_pro_version < 94 ? queue_max_segment_size(mdev->rq_queue) : + mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_SEGMENT_SIZE; - number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE / 1024) * HZ); - pe = atomic_read(&mdev->rs_pending_cnt); + if (mdev->rs_plan_s.size) { /* mdev->sync_conf.c_plan_ahead */ + number = drbd_rs_controller(mdev) >> (BM_BLOCK_SHIFT - 9); + mdev->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; + } else { + mdev->c_sync_rate = mdev->sync_conf.rate; + number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); + } + + /* Throttle resync on lower level disk activity, which may also be + * caused by application IO on Primary/SyncTarget. + * Keep this after the call to drbd_rs_controller, as that assumes + * to be called as precisely as possible every SLEEP_TIME, + * and would be confused otherwise. */ + if (drbd_rs_should_slow_down(mdev)) + goto requeue; mutex_lock(&mdev->data.mutex); if (mdev->data.socket) @@ -476,6 +579,7 @@ int w_make_resync_request(struct drbd_conf *mdev, mx = number; /* Limit the number of pending RS requests to no more than the peer's receive buffer */ + pe = atomic_read(&mdev->rs_pending_cnt); if ((pe + number) > mx) { number = mx - pe; } @@ -526,6 +630,7 @@ next_sector: * be prepared for all stripe sizes of software RAIDs. */ align = 1; + rollback_i = i; for (;;) { if (size + BM_BLOCK_SIZE > max_segment_size) break; @@ -561,14 +666,19 @@ next_sector: size = (capacity-sector)<<9; if (mdev->agreed_pro_version >= 89 && mdev->csums_tfm) { switch (read_for_csum(mdev, sector, size)) { - case 0: /* Disk failure*/ + case -EIO: /* Disk failure */ put_ldev(mdev); return 0; - case 2: /* Allocation failed */ + case -EAGAIN: /* allocation failed, or ldev busy */ drbd_rs_complete_io(mdev, sector); mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); + i = rollback_i; goto requeue; - /* case 1: everything ok */ + case 0: + /* everything ok */ + break; + default: + BUG(); } } else { inc_rs_pending(mdev); @@ -595,6 +705,7 @@ next_sector: } requeue: + mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9)); mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME); put_ldev(mdev); return 1; @@ -670,6 +781,14 @@ static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int ca return 1; } +static void ping_peer(struct drbd_conf *mdev) +{ + clear_bit(GOT_PING_ACK, &mdev->flags); + request_ping(mdev); + wait_event(mdev->misc_wait, + test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED); +} + int drbd_resync_finished(struct drbd_conf *mdev) { unsigned long db, dt, dbdt; @@ -709,6 +828,8 @@ int drbd_resync_finished(struct drbd_conf *mdev) if (!get_ldev(mdev)) goto out; + ping_peer(mdev); + spin_lock_irq(&mdev->req_lock); os = mdev->state; @@ -801,6 +922,8 @@ out: mdev->rs_paused = 0; mdev->ov_start_sector = 0; + drbd_md_sync(mdev); + if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n"); drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); @@ -817,9 +940,13 @@ static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_ent { if (drbd_ee_has_active_page(e)) { /* This might happen if sendpage() has not finished */ + int i = (e->size + PAGE_SIZE -1) >> PAGE_SHIFT; + atomic_add(i, &mdev->pp_in_use_by_net); + atomic_sub(i, &mdev->pp_in_use); spin_lock_irq(&mdev->req_lock); list_add_tail(&e->w.list, &mdev->net_ee); spin_unlock_irq(&mdev->req_lock); + wake_up(&drbd_pp_wait); } else drbd_free_ee(mdev, e); } @@ -926,9 +1053,12 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) return 1; } - drbd_rs_complete_io(mdev, e->sector); + if (get_ldev(mdev)) { + drbd_rs_complete_io(mdev, e->sector); + put_ldev(mdev); + } - di = (struct digest_info *)(unsigned long)e->block_id; + di = e->digest; if (likely((e->flags & EE_WAS_ERROR) == 0)) { /* quick hack to try to avoid a race against reconfiguration. @@ -952,7 +1082,9 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) ok = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, e); } else { inc_rs_pending(mdev); - e->block_id = ID_SYNCER; + e->block_id = ID_SYNCER; /* By setting block_id, digest pointer becomes invalid! */ + e->flags &= ~EE_HAS_DIGEST; /* This e no longer has a digest pointer */ + kfree(di); ok = drbd_send_block(mdev, P_RS_DATA_REPLY, e); } } else { @@ -962,9 +1094,6 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) } dec_unacked(mdev); - - kfree(di); - move_to_net_ee_or_free(mdev, e); if (unlikely(!ok)) @@ -1034,9 +1163,12 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all * the resync lru has been cleaned up already */ - drbd_rs_complete_io(mdev, e->sector); + if (get_ldev(mdev)) { + drbd_rs_complete_io(mdev, e->sector); + put_ldev(mdev); + } - di = (struct digest_info *)(unsigned long)e->block_id; + di = e->digest; if (likely((e->flags & EE_WAS_ERROR) == 0)) { digest_size = crypto_hash_digestsize(mdev->verify_tfm); @@ -1055,9 +1187,6 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) } dec_unacked(mdev); - - kfree(di); - if (!eq) drbd_ov_oos_found(mdev, e->sector, e->size); else @@ -1108,7 +1237,7 @@ int w_send_barrier(struct drbd_conf *mdev, struct drbd_work *w, int cancel) * dec_ap_pending will be done in got_BarrierAck * or (on connection loss) in w_clear_epoch. */ ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BARRIER, - (struct p_header *)p, sizeof(*p), 0); + (struct p_header80 *)p, sizeof(*p), 0); drbd_put_data_sock(mdev); return ok; @@ -1173,6 +1302,24 @@ int w_send_read_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) return ok; } +int w_restart_disk_io(struct drbd_conf *mdev, struct drbd_work *w, int cancel) +{ + struct drbd_request *req = container_of(w, struct drbd_request, w); + + if (bio_data_dir(req->master_bio) == WRITE && req->rq_state & RQ_IN_ACT_LOG) + drbd_al_begin_io(mdev, req->sector); + /* Calling drbd_al_begin_io() out of the worker might deadlocks + theoretically. Practically it can not deadlock, since this is + only used when unfreezing IOs. All the extents of the requests + that made it into the TL are already active */ + + drbd_req_make_private_bio(req, req->master_bio); + req->private_bio->bi_bdev = mdev->ldev->backing_bdev; + generic_make_request(req->private_bio); + + return 1; +} + static int _drbd_may_sync_now(struct drbd_conf *mdev) { struct drbd_conf *odev = mdev; @@ -1298,14 +1445,6 @@ int drbd_alter_sa(struct drbd_conf *mdev, int na) return retcode; } -static void ping_peer(struct drbd_conf *mdev) -{ - clear_bit(GOT_PING_ACK, &mdev->flags); - request_ping(mdev); - wait_event(mdev->misc_wait, - test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED); -} - /** * drbd_start_resync() - Start the resync process * @mdev: DRBD device. @@ -1379,13 +1518,21 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) r = SS_UNKNOWN_ERROR; if (r == SS_SUCCESS) { - mdev->rs_total = - mdev->rs_mark_left = drbd_bm_total_weight(mdev); + unsigned long tw = drbd_bm_total_weight(mdev); + unsigned long now = jiffies; + int i; + mdev->rs_failed = 0; mdev->rs_paused = 0; - mdev->rs_start = - mdev->rs_mark_time = jiffies; mdev->rs_same_csum = 0; + mdev->rs_last_events = 0; + mdev->rs_last_sect_ev = 0; + mdev->rs_total = tw; + mdev->rs_start = now; + for (i = 0; i < DRBD_SYNC_MARKS; i++) { + mdev->rs_mark_left[i] = tw; + mdev->rs_mark_time[i] = now; + } _drbd_pause_after(mdev); } write_unlock_irq(&global_state_lock); @@ -1397,12 +1544,31 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) (unsigned long) mdev->rs_total << (BM_BLOCK_SHIFT-10), (unsigned long) mdev->rs_total); - if (mdev->rs_total == 0) { - /* Peer still reachable? Beware of failing before-resync-target handlers! */ - ping_peer(mdev); + if (mdev->agreed_pro_version < 95 && mdev->rs_total == 0) { + /* This still has a race (about when exactly the peers + * detect connection loss) that can lead to a full sync + * on next handshake. In 8.3.9 we fixed this with explicit + * resync-finished notifications, but the fix + * introduces a protocol change. Sleeping for some + * time longer than the ping interval + timeout on the + * SyncSource, to give the SyncTarget the chance to + * detect connection loss, then waiting for a ping + * response (implicit in drbd_resync_finished) reduces + * the race considerably, but does not solve it. */ + if (side == C_SYNC_SOURCE) + schedule_timeout_interruptible( + mdev->net_conf->ping_int * HZ + + mdev->net_conf->ping_timeo*HZ/9); drbd_resync_finished(mdev); } + atomic_set(&mdev->rs_sect_in, 0); + atomic_set(&mdev->rs_sect_ev, 0); + mdev->rs_in_flight = 0; + mdev->rs_planed = 0; + spin_lock(&mdev->peer_seq_lock); + fifo_set(&mdev->rs_plan_s, 0); + spin_unlock(&mdev->peer_seq_lock); /* ns.conn may already be != mdev->state.conn, * we may have been paused in between, or become paused until * the timer triggers. diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index cf04c1b234e..767107cce98 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -178,7 +178,6 @@ static int print_unex = 1; #include <linux/slab.h> #include <linux/mm.h> #include <linux/bio.h> -#include <linux/smp_lock.h> #include <linux/string.h> #include <linux/jiffies.h> #include <linux/fcntl.h> @@ -199,6 +198,7 @@ static int print_unex = 1; * It's been recommended that take about 1/4 of the default speed * in some more extreme cases. */ +static DEFINE_MUTEX(floppy_mutex); static int slow_floppy; #include <asm/dma.h> @@ -258,8 +258,8 @@ static int irqdma_allocated; #include <linux/completion.h> static struct request *current_req; -static struct request_queue *floppy_queue; static void do_fd_request(struct request_queue *q); +static int set_next_request(void); #ifndef fd_get_dma_residue #define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA) @@ -413,6 +413,7 @@ static struct gendisk *disks[N_DRIVE]; static struct block_device *opened_bdev[N_DRIVE]; static DEFINE_MUTEX(open_lock); static struct floppy_raw_cmd *raw_cmd, default_raw_cmd; +static int fdc_queue; /* * This struct defines the different floppy types. @@ -890,8 +891,8 @@ static void unlock_fdc(void) del_timer(&fd_timeout); cont = NULL; clear_bit(0, &fdc_busy); - if (current_req || blk_peek_request(floppy_queue)) - do_fd_request(floppy_queue); + if (current_req || set_next_request()) + do_fd_request(current_req->q); spin_unlock_irqrestore(&floppy_lock, flags); wake_up(&fdc_wait); } @@ -2243,8 +2244,8 @@ static void floppy_end_request(struct request *req, int error) * logical buffer */ static void request_done(int uptodate) { - struct request_queue *q = floppy_queue; struct request *req = current_req; + struct request_queue *q; unsigned long flags; int block; char msg[sizeof("request done ") + sizeof(int) * 3]; @@ -2258,6 +2259,8 @@ static void request_done(int uptodate) return; } + q = req->q; + if (uptodate) { /* maintain values for invalidation on geometry * change */ @@ -2811,6 +2814,28 @@ static int make_raw_rw_request(void) return 2; } +/* + * Round-robin between our available drives, doing one request from each + */ +static int set_next_request(void) +{ + struct request_queue *q; + int old_pos = fdc_queue; + + do { + q = disks[fdc_queue]->queue; + if (++fdc_queue == N_DRIVE) + fdc_queue = 0; + if (q) { + current_req = blk_fetch_request(q); + if (current_req) + break; + } + } while (fdc_queue != old_pos); + + return current_req != NULL; +} + static void redo_fd_request(void) { int drive; @@ -2822,17 +2847,17 @@ static void redo_fd_request(void) do_request: if (!current_req) { - struct request *req; + int pending; + + spin_lock_irq(&floppy_lock); + pending = set_next_request(); + spin_unlock_irq(&floppy_lock); - spin_lock_irq(floppy_queue->queue_lock); - req = blk_fetch_request(floppy_queue); - spin_unlock_irq(floppy_queue->queue_lock); - if (!req) { + if (!pending) { do_floppy = NULL; unlock_fdc(); return; } - current_req = req; } drive = (long)current_req->rq_disk->private_data; set_fdc(drive); @@ -3553,9 +3578,9 @@ static int fd_ioctl(struct block_device *bdev, fmode_t mode, { int ret; - lock_kernel(); + mutex_lock(&floppy_mutex); ret = fd_locked_ioctl(bdev, mode, cmd, param); - unlock_kernel(); + mutex_unlock(&floppy_mutex); return ret; } @@ -3616,7 +3641,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) { int drive = (long)disk->private_data; - lock_kernel(); + mutex_lock(&floppy_mutex); mutex_lock(&open_lock); if (UDRS->fd_ref < 0) UDRS->fd_ref = 0; @@ -3627,7 +3652,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) if (!UDRS->fd_ref) opened_bdev[drive] = NULL; mutex_unlock(&open_lock); - unlock_kernel(); + mutex_unlock(&floppy_mutex); return 0; } @@ -3645,7 +3670,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) int res = -EBUSY; char *tmp; - lock_kernel(); + mutex_lock(&floppy_mutex); mutex_lock(&open_lock); old_dev = UDRS->fd_device; if (opened_bdev[drive] && opened_bdev[drive] != bdev) @@ -3722,7 +3747,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) goto out; } mutex_unlock(&open_lock); - unlock_kernel(); + mutex_unlock(&floppy_mutex); return 0; out: if (UDRS->fd_ref < 0) @@ -3733,7 +3758,7 @@ out: opened_bdev[drive] = NULL; out2: mutex_unlock(&open_lock); - unlock_kernel(); + mutex_unlock(&floppy_mutex); return res; } @@ -4165,6 +4190,13 @@ static int __init floppy_init(void) goto out_put_disk; } + disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock); + if (!disks[dr]->queue) { + err = -ENOMEM; + goto out_put_disk; + } + + blk_queue_max_hw_sectors(disks[dr]->queue, 64); disks[dr]->major = FLOPPY_MAJOR; disks[dr]->first_minor = TOMINOR(dr); disks[dr]->fops = &floppy_fops; @@ -4183,13 +4215,6 @@ static int __init floppy_init(void) if (err) goto out_unreg_blkdev; - floppy_queue = blk_init_queue(do_fd_request, &floppy_lock); - if (!floppy_queue) { - err = -ENOMEM; - goto out_unreg_driver; - } - blk_queue_max_hw_sectors(floppy_queue, 64); - blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, floppy_find, NULL, NULL); @@ -4317,7 +4342,6 @@ static int __init floppy_init(void) /* to be cleaned up... */ disks[drive]->private_data = (void *)(long)drive; - disks[drive]->queue = floppy_queue; disks[drive]->flags |= GENHD_FL_REMOVABLE; disks[drive]->driverfs_dev = &floppy_device[drive].dev; add_disk(disks[drive]); @@ -4333,8 +4357,6 @@ out_flush_work: floppy_release_irq_and_dma(); out_unreg_region: blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); - blk_cleanup_queue(floppy_queue); -out_unreg_driver: platform_driver_unregister(&floppy_driver); out_unreg_blkdev: unregister_blkdev(FLOPPY_MAJOR, "fd"); @@ -4342,6 +4364,8 @@ out_put_disk: while (dr--) { del_timer(&motor_off_timer[dr]); put_disk(disks[dr]); + if (disks[dr]->queue) + blk_cleanup_queue(disks[dr]->queue); } return err; } @@ -4550,11 +4574,11 @@ static void __exit floppy_module_exit(void) platform_device_unregister(&floppy_device[drive]); } put_disk(disks[drive]); + blk_cleanup_queue(disks[drive]->queue); } del_timer_sync(&fd_timeout); del_timer_sync(&fd_timer); - blk_cleanup_queue(floppy_queue); if (atomic_read(&usage_count)) floppy_release_irq_and_dma(); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 91797bbbe70..6c48b3545f8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -67,16 +67,18 @@ #include <linux/compat.h> #include <linux/suspend.h> #include <linux/freezer.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/writeback.h> #include <linux/buffer_head.h> /* for invalidate_bdev() */ #include <linux/completion.h> #include <linux/highmem.h> #include <linux/kthread.h> #include <linux/splice.h> +#include <linux/sysfs.h> #include <asm/uaccess.h> +static DEFINE_MUTEX(loop_mutex); static LIST_HEAD(loop_devices); static DEFINE_MUTEX(loop_devices_mutex); @@ -477,17 +479,17 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; if (bio_rw(bio) == WRITE) { - bool barrier = !!(bio->bi_rw & REQ_HARDBARRIER); struct file *file = lo->lo_backing_file; - if (barrier) { - if (unlikely(!file->f_op->fsync)) { - ret = -EOPNOTSUPP; - goto out; - } + /* REQ_HARDBARRIER is deprecated */ + if (bio->bi_rw & REQ_HARDBARRIER) { + ret = -EOPNOTSUPP; + goto out; + } + if (bio->bi_rw & REQ_FLUSH) { ret = vfs_fsync(file, 0); - if (unlikely(ret)) { + if (unlikely(ret && ret != -EINVAL)) { ret = -EIO; goto out; } @@ -495,9 +497,9 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) ret = lo_send(lo, bio, pos); - if (barrier && !ret) { + if ((bio->bi_rw & REQ_FUA) && !ret) { ret = vfs_fsync(file, 0); - if (unlikely(ret)) + if (unlikely(ret && ret != -EINVAL)) ret = -EIO; } } else @@ -737,6 +739,103 @@ static inline int is_loop_device(struct file *file) return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; } +/* loop sysfs attributes */ + +static ssize_t loop_attr_show(struct device *dev, char *page, + ssize_t (*callback)(struct loop_device *, char *)) +{ + struct loop_device *l, *lo = NULL; + + mutex_lock(&loop_devices_mutex); + list_for_each_entry(l, &loop_devices, lo_list) + if (disk_to_dev(l->lo_disk) == dev) { + lo = l; + break; + } + mutex_unlock(&loop_devices_mutex); + + return lo ? callback(lo, page) : -EIO; +} + +#define LOOP_ATTR_RO(_name) \ +static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \ +static ssize_t loop_attr_do_show_##_name(struct device *d, \ + struct device_attribute *attr, char *b) \ +{ \ + return loop_attr_show(d, b, loop_attr_##_name##_show); \ +} \ +static struct device_attribute loop_attr_##_name = \ + __ATTR(_name, S_IRUGO, loop_attr_do_show_##_name, NULL); + +static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) +{ + ssize_t ret; + char *p = NULL; + + mutex_lock(&lo->lo_ctl_mutex); + if (lo->lo_backing_file) + p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); + mutex_unlock(&lo->lo_ctl_mutex); + + if (IS_ERR_OR_NULL(p)) + ret = PTR_ERR(p); + else { + ret = strlen(p); + memmove(buf, p, ret); + buf[ret++] = '\n'; + buf[ret] = 0; + } + + return ret; +} + +static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) +{ + return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset); +} + +static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) +{ + return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); +} + +static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) +{ + int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); + + return sprintf(buf, "%s\n", autoclear ? "1" : "0"); +} + +LOOP_ATTR_RO(backing_file); +LOOP_ATTR_RO(offset); +LOOP_ATTR_RO(sizelimit); +LOOP_ATTR_RO(autoclear); + +static struct attribute *loop_attrs[] = { + &loop_attr_backing_file.attr, + &loop_attr_offset.attr, + &loop_attr_sizelimit.attr, + &loop_attr_autoclear.attr, + NULL, +}; + +static struct attribute_group loop_attribute_group = { + .name = "loop", + .attrs= loop_attrs, +}; + +static int loop_sysfs_init(struct loop_device *lo) +{ + return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, + &loop_attribute_group); +} + +static void loop_sysfs_exit(struct loop_device *lo) +{ + sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, + &loop_attribute_group); +} + static int loop_set_fd(struct loop_device *lo, fmode_t mode, struct block_device *bdev, unsigned int arg) { @@ -832,10 +931,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, lo->lo_queue->unplug_fn = loop_unplug; if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) - blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN); + blk_queue_flush(lo->lo_queue, REQ_FLUSH); set_capacity(lo->lo_disk, size); bd_set_size(bdev, size << 9); + loop_sysfs_init(lo); /* let user-space know about the new size */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); @@ -854,6 +954,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, return 0; out_clr: + loop_sysfs_exit(lo); lo->lo_thread = NULL; lo->lo_device = NULL; lo->lo_backing_file = NULL; @@ -950,6 +1051,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) set_capacity(lo->lo_disk, 0); if (bdev) { bd_set_size(bdev, 0); + loop_sysfs_exit(lo); /* let user-space know about this change */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); } @@ -1409,11 +1511,11 @@ static int lo_open(struct block_device *bdev, fmode_t mode) { struct loop_device *lo = bdev->bd_disk->private_data; - lock_kernel(); + mutex_lock(&loop_mutex); mutex_lock(&lo->lo_ctl_mutex); lo->lo_refcnt++; mutex_unlock(&lo->lo_ctl_mutex); - unlock_kernel(); + mutex_unlock(&loop_mutex); return 0; } @@ -1423,7 +1525,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode) struct loop_device *lo = disk->private_data; int err; - lock_kernel(); + mutex_lock(&loop_mutex); mutex_lock(&lo->lo_ctl_mutex); if (--lo->lo_refcnt) @@ -1448,7 +1550,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode) out: mutex_unlock(&lo->lo_ctl_mutex); out_unlocked: - lock_kernel(); + mutex_unlock(&loop_mutex); return 0; } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 0daa422aa28..a32fb41246f 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -24,7 +24,7 @@ #include <linux/errno.h> #include <linux/file.h> #include <linux/ioctl.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/compiler.h> #include <linux/err.h> #include <linux/kernel.h> @@ -53,6 +53,7 @@ #define DBG_BLKDEV 0x0100 #define DBG_RX 0x0200 #define DBG_TX 0x0400 +static DEFINE_MUTEX(nbd_mutex); static unsigned int debugflags; #endif /* NDEBUG */ @@ -717,11 +718,11 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode, dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); - lock_kernel(); + mutex_lock(&nbd_mutex); mutex_lock(&lo->tx_lock); error = __nbd_ioctl(bdev, lo, cmd, arg); mutex_unlock(&lo->tx_lock); - unlock_kernel(); + mutex_unlock(&nbd_mutex); return error; } diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c index 2284b4f05c6..87311ebac0d 100644 --- a/drivers/block/osdblk.c +++ b/drivers/block/osdblk.c @@ -310,8 +310,7 @@ static void osdblk_rq_fn(struct request_queue *q) break; /* filter out block requests we don't understand */ - if (rq->cmd_type != REQ_TYPE_FS && - !(rq->cmd_flags & REQ_HARDBARRIER)) { + if (rq->cmd_type != REQ_TYPE_FS) { blk_end_request_all(rq, 0); continue; } @@ -439,7 +438,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev) blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); blk_queue_prep_rq(q, blk_queue_start_tag); - blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); + blk_queue_flush(q, REQ_FLUSH); disk->queue = q; diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 76f8565e1e8..62cec6afd7a 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -138,9 +138,10 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY}; #include <linux/cdrom.h> #include <linux/spinlock.h> #include <linux/blkdev.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/uaccess.h> +static DEFINE_MUTEX(pcd_mutex); static DEFINE_SPINLOCK(pcd_lock); module_param(verbose, bool, 0644); @@ -227,9 +228,9 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode) struct pcd_unit *cd = bdev->bd_disk->private_data; int ret; - lock_kernel(); + mutex_lock(&pcd_mutex); ret = cdrom_open(&cd->info, bdev, mode); - unlock_kernel(); + mutex_unlock(&pcd_mutex); return ret; } @@ -237,9 +238,9 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode) static int pcd_block_release(struct gendisk *disk, fmode_t mode) { struct pcd_unit *cd = disk->private_data; - lock_kernel(); + mutex_lock(&pcd_mutex); cdrom_release(&cd->info, mode); - unlock_kernel(); + mutex_unlock(&pcd_mutex); return 0; } @@ -249,9 +250,9 @@ static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode, struct pcd_unit *cd = bdev->bd_disk->private_data; int ret; - lock_kernel(); + mutex_lock(&pcd_mutex); ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg); - unlock_kernel(); + mutex_unlock(&pcd_mutex); return ret; } diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 985f0d4f1d1..c0ee1558b9b 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -153,10 +153,11 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV}; #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/kernel.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <linux/workqueue.h> +static DEFINE_MUTEX(pd_mutex); static DEFINE_SPINLOCK(pd_lock); module_param(verbose, bool, 0); @@ -736,14 +737,14 @@ static int pd_open(struct block_device *bdev, fmode_t mode) { struct pd_unit *disk = bdev->bd_disk->private_data; - lock_kernel(); + mutex_lock(&pd_mutex); disk->access++; if (disk->removable) { pd_special_command(disk, pd_media_check); pd_special_command(disk, pd_door_lock); } - unlock_kernel(); + mutex_unlock(&pd_mutex); return 0; } @@ -771,10 +772,10 @@ static int pd_ioctl(struct block_device *bdev, fmode_t mode, switch (cmd) { case CDROMEJECT: - lock_kernel(); + mutex_lock(&pd_mutex); if (disk->access == 1) pd_special_command(disk, pd_eject); - unlock_kernel(); + mutex_unlock(&pd_mutex); return 0; default: return -EINVAL; @@ -785,10 +786,10 @@ static int pd_release(struct gendisk *p, fmode_t mode) { struct pd_unit *disk = p->private_data; - lock_kernel(); + mutex_lock(&pd_mutex); if (!--disk->access && disk->removable) pd_special_command(disk, pd_door_unlock); - unlock_kernel(); + mutex_unlock(&pd_mutex); return 0; } diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 4457b494882..635f25dd9e1 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -152,9 +152,10 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY}; #include <linux/spinlock.h> #include <linux/blkdev.h> #include <linux/blkpg.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/uaccess.h> +static DEFINE_MUTEX(pf_mutex); static DEFINE_SPINLOCK(pf_spin_lock); module_param(verbose, bool, 0644); @@ -302,7 +303,7 @@ static int pf_open(struct block_device *bdev, fmode_t mode) struct pf_unit *pf = bdev->bd_disk->private_data; int ret; - lock_kernel(); + mutex_lock(&pf_mutex); pf_identify(pf); ret = -ENODEV; @@ -318,7 +319,7 @@ static int pf_open(struct block_device *bdev, fmode_t mode) if (pf->removable) pf_lock(pf, 1); out: - unlock_kernel(); + mutex_unlock(&pf_mutex); return ret; } @@ -349,9 +350,9 @@ static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, u if (pf->access != 1) return -EBUSY; - lock_kernel(); + mutex_lock(&pf_mutex); pf_eject(pf); - unlock_kernel(); + mutex_unlock(&pf_mutex); return 0; } @@ -360,9 +361,9 @@ static int pf_release(struct gendisk *disk, fmode_t mode) { struct pf_unit *pf = disk->private_data; - lock_kernel(); + mutex_lock(&pf_mutex); if (pf->access <= 0) { - unlock_kernel(); + mutex_unlock(&pf_mutex); return -EINVAL; } @@ -371,7 +372,7 @@ static int pf_release(struct gendisk *disk, fmode_t mode) if (!pf->access && pf->removable) pf_lock(pf, 0); - unlock_kernel(); + mutex_unlock(&pf_mutex); return 0; } diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index c397b3ddba9..6b9a2000d56 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c @@ -162,7 +162,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY}; #include <linux/pg.h> #include <linux/device.h> #include <linux/sched.h> /* current, TASK_* */ -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/jiffies.h> #include <asm/uaccess.h> @@ -193,6 +193,7 @@ module_param_array(drive3, int, NULL, 0); #define ATAPI_IDENTIFY 0x12 +static DEFINE_MUTEX(pg_mutex); static int pg_open(struct inode *inode, struct file *file); static int pg_release(struct inode *inode, struct file *file); static ssize_t pg_read(struct file *filp, char __user *buf, @@ -234,6 +235,7 @@ static const struct file_operations pg_fops = { .write = pg_write, .open = pg_open, .release = pg_release, + .llseek = noop_llseek, }; static void pg_init_units(void) @@ -518,7 +520,7 @@ static int pg_open(struct inode *inode, struct file *file) struct pg *dev = &devices[unit]; int ret = 0; - lock_kernel(); + mutex_lock(&pg_mutex); if ((unit >= PG_UNITS) || (!dev->present)) { ret = -ENODEV; goto out; @@ -547,7 +549,7 @@ static int pg_open(struct inode *inode, struct file *file) file->private_data = dev; out: - unlock_kernel(); + mutex_unlock(&pg_mutex); return ret; } diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c index bc5825fdeaa..7179f79d746 100644 --- a/drivers/block/paride/pt.c +++ b/drivers/block/paride/pt.c @@ -146,7 +146,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3}; #include <linux/mtio.h> #include <linux/device.h> #include <linux/sched.h> /* current, TASK_*, schedule_timeout() */ -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/uaccess.h> @@ -189,6 +189,7 @@ module_param_array(drive3, int, NULL, 0); #define ATAPI_MODE_SENSE 0x1a #define ATAPI_LOG_SENSE 0x4d +static DEFINE_MUTEX(pt_mutex); static int pt_open(struct inode *inode, struct file *file); static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static int pt_release(struct inode *inode, struct file *file); @@ -239,6 +240,7 @@ static const struct file_operations pt_fops = { .unlocked_ioctl = pt_ioctl, .open = pt_open, .release = pt_release, + .llseek = noop_llseek, }; /* sysfs class support */ @@ -650,9 +652,9 @@ static int pt_open(struct inode *inode, struct file *file) struct pt_unit *tape = pt + unit; int err; - lock_kernel(); + mutex_lock(&pt_mutex); if (unit >= PT_UNITS || (!tape->present)) { - unlock_kernel(); + mutex_unlock(&pt_mutex); return -ENODEV; } @@ -681,12 +683,12 @@ static int pt_open(struct inode *inode, struct file *file) } file->private_data = tape; - unlock_kernel(); + mutex_unlock(&pt_mutex); return 0; out: atomic_inc(&tape->available); - unlock_kernel(); + mutex_unlock(&pt_mutex); return err; } @@ -704,15 +706,15 @@ static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) switch (mtop.mt_op) { case MTREW: - lock_kernel(); + mutex_lock(&pt_mutex); pt_rewind(tape); - unlock_kernel(); + mutex_unlock(&pt_mutex); return 0; case MTWEOF: - lock_kernel(); + mutex_lock(&pt_mutex); pt_write_fm(tape); - unlock_kernel(); + mutex_unlock(&pt_mutex); return 0; default: diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index b1cbeb59bb7..19b3568e932 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -57,7 +57,6 @@ #include <linux/seq_file.h> #include <linux/miscdevice.h> #include <linux/freezer.h> -#include <linux/smp_lock.h> #include <linux/mutex.h> #include <linux/slab.h> #include <scsi/scsi_cmnd.h> @@ -86,6 +85,7 @@ #define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1)) +static DEFINE_MUTEX(pktcdvd_mutex); static struct pktcdvd_device *pkt_devs[MAX_WRITERS]; static struct proc_dir_entry *pkt_proc; static int pktdev_major; @@ -753,7 +753,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * rq->timeout = 60*HZ; rq->cmd_type = REQ_TYPE_BLOCK_PC; - rq->cmd_flags |= REQ_HARDBARRIER; if (cgc->quiet) rq->cmd_flags |= REQ_QUIET; @@ -2369,7 +2368,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush) pkt_shrink_pktlist(pd); } -static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor) +static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) { if (dev_minor >= MAX_WRITERS) return NULL; @@ -2383,7 +2382,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode) VPRINTK(DRIVER_NAME": entering open\n"); - lock_kernel(); + mutex_lock(&pktcdvd_mutex); mutex_lock(&ctl_mutex); pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); if (!pd) { @@ -2411,7 +2410,7 @@ static int pkt_open(struct block_device *bdev, fmode_t mode) } mutex_unlock(&ctl_mutex); - unlock_kernel(); + mutex_unlock(&pktcdvd_mutex); return 0; out_dec: @@ -2419,7 +2418,7 @@ out_dec: out: VPRINTK(DRIVER_NAME": failed open (%d)\n", ret); mutex_unlock(&ctl_mutex); - unlock_kernel(); + mutex_unlock(&pktcdvd_mutex); return ret; } @@ -2428,7 +2427,7 @@ static int pkt_close(struct gendisk *disk, fmode_t mode) struct pktcdvd_device *pd = disk->private_data; int ret = 0; - lock_kernel(); + mutex_lock(&pktcdvd_mutex); mutex_lock(&ctl_mutex); pd->refcnt--; BUG_ON(pd->refcnt < 0); @@ -2437,7 +2436,7 @@ static int pkt_close(struct gendisk *disk, fmode_t mode) pkt_release_dev(pd, flush); } mutex_unlock(&ctl_mutex); - unlock_kernel(); + mutex_unlock(&pktcdvd_mutex); return ret; } @@ -2773,7 +2772,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); - lock_kernel(); + mutex_lock(&pktcdvd_mutex); switch (cmd) { case CDROMEJECT: /* @@ -2798,7 +2797,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); ret = -ENOTTY; } - unlock_kernel(); + mutex_unlock(&pktcdvd_mutex); return ret; } @@ -3046,6 +3045,7 @@ static const struct file_operations pkt_ctl_fops = { .compat_ioctl = pkt_ctl_compat_ioctl, #endif .owner = THIS_MODULE, + .llseek = no_llseek, }; static struct miscdevice pkt_misc = { diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index e9da874d041..8e1ce2e2916 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c @@ -113,7 +113,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev, memcpy(buf, dev->bounce_buf+offset, size); offset += size; flush_kernel_dcache_page(bvec->bv_page); - bvec_kunmap_irq(bvec, &flags); + bvec_kunmap_irq(buf, &flags); i++; } } @@ -468,7 +468,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) blk_queue_dma_alignment(queue, dev->blk_size-1); blk_queue_logical_block_size(queue, dev->blk_size); - blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH); + blk_queue_flush(queue, REQ_FLUSH); blk_queue_max_segments(queue, -1); blk_queue_max_segment_size(queue, dev->bounce_size); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c new file mode 100644 index 00000000000..6ec9d53806c --- /dev/null +++ b/drivers/block/rbd.c @@ -0,0 +1,1841 @@ +/* + rbd.c -- Export ceph rados objects as a Linux block device + + + based on drivers/block/osdblk.c: + + Copyright 2009 Red Hat, Inc. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + + + + Instructions for use + -------------------- + + 1) Map a Linux block device to an existing rbd image. + + Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name] + + $ echo "192.168.0.1 name=admin rbd foo" > /sys/class/rbd/add + + The snapshot name can be "-" or omitted to map the image read/write. + + 2) List all active blkdev<->object mappings. + + In this example, we have performed step #1 twice, creating two blkdevs, + mapped to two separate rados objects in the rados rbd pool + + $ cat /sys/class/rbd/list + #id major client_name pool name snap KB + 0 254 client4143 rbd foo - 1024000 + + The columns, in order, are: + - blkdev unique id + - blkdev assigned major + - rados client id + - rados pool name + - rados block device name + - mapped snapshot ("-" if none) + - device size in KB + + + 3) Create a snapshot. + + Usage: <blkdev id> <snapname> + + $ echo "0 mysnap" > /sys/class/rbd/snap_create + + + 4) Listing a snapshot. + + $ cat /sys/class/rbd/snaps_list + #id snap KB + 0 - 1024000 (*) + 0 foo 1024000 + + The columns, in order, are: + - blkdev unique id + - snapshot name, '-' means none (active read/write version) + - size of device at time of snapshot + - the (*) indicates this is the active version + + 5) Rollback to snapshot. + + Usage: <blkdev id> <snapname> + + $ echo "0 mysnap" > /sys/class/rbd/snap_rollback + + + 6) Mapping an image using snapshot. + + A snapshot mapping is read-only. This is being done by passing + snap=<snapname> to the options when adding a device. + + $ echo "192.168.0.1 name=admin,snap=mysnap rbd foo" > /sys/class/rbd/add + + + 7) Remove an active blkdev<->rbd image mapping. + + In this example, we remove the mapping with blkdev unique id 1. + + $ echo 1 > /sys/class/rbd/remove + + + NOTE: The actual creation and deletion of rados objects is outside the scope + of this driver. + + */ + +#include <linux/ceph/libceph.h> +#include <linux/ceph/osd_client.h> +#include <linux/ceph/mon_client.h> +#include <linux/ceph/decode.h> + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/blkdev.h> + +#include "rbd_types.h" + +#define DRV_NAME "rbd" +#define DRV_NAME_LONG "rbd (rados block device)" + +#define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */ + +#define RBD_MAX_MD_NAME_LEN (96 + sizeof(RBD_SUFFIX)) +#define RBD_MAX_POOL_NAME_LEN 64 +#define RBD_MAX_SNAP_NAME_LEN 32 +#define RBD_MAX_OPT_LEN 1024 + +#define RBD_SNAP_HEAD_NAME "-" + +#define DEV_NAME_LEN 32 + +/* + * block device image metadata (in-memory version) + */ +struct rbd_image_header { + u64 image_size; + char block_name[32]; + __u8 obj_order; + __u8 crypt_type; + __u8 comp_type; + struct rw_semaphore snap_rwsem; + struct ceph_snap_context *snapc; + size_t snap_names_len; + u64 snap_seq; + u32 total_snaps; + + char *snap_names; + u64 *snap_sizes; +}; + +/* + * an instance of the client. multiple devices may share a client. + */ +struct rbd_client { + struct ceph_client *client; + struct kref kref; + struct list_head node; +}; + +/* + * a single io request + */ +struct rbd_request { + struct request *rq; /* blk layer request */ + struct bio *bio; /* cloned bio */ + struct page **pages; /* list of used pages */ + u64 len; +}; + +/* + * a single device + */ +struct rbd_device { + int id; /* blkdev unique id */ + + int major; /* blkdev assigned major */ + struct gendisk *disk; /* blkdev's gendisk and rq */ + struct request_queue *q; + + struct ceph_client *client; + struct rbd_client *rbd_client; + + char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ + + spinlock_t lock; /* queue lock */ + + struct rbd_image_header header; + char obj[RBD_MAX_OBJ_NAME_LEN]; /* rbd image name */ + int obj_len; + char obj_md_name[RBD_MAX_MD_NAME_LEN]; /* hdr nm. */ + char pool_name[RBD_MAX_POOL_NAME_LEN]; + int poolid; + + char snap_name[RBD_MAX_SNAP_NAME_LEN]; + u32 cur_snap; /* index+1 of current snapshot within snap context + 0 - for the head */ + int read_only; + + struct list_head node; +}; + +static spinlock_t node_lock; /* protects client get/put */ + +static struct class *class_rbd; /* /sys/class/rbd */ +static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ +static LIST_HEAD(rbd_dev_list); /* devices */ +static LIST_HEAD(rbd_client_list); /* clients */ + + +static int rbd_open(struct block_device *bdev, fmode_t mode) +{ + struct gendisk *disk = bdev->bd_disk; + struct rbd_device *rbd_dev = disk->private_data; + + set_device_ro(bdev, rbd_dev->read_only); + + if ((mode & FMODE_WRITE) && rbd_dev->read_only) + return -EROFS; + + return 0; +} + +static const struct block_device_operations rbd_bd_ops = { + .owner = THIS_MODULE, + .open = rbd_open, +}; + +/* + * Initialize an rbd client instance. + * We own *opt. + */ +static struct rbd_client *rbd_client_create(struct ceph_options *opt) +{ + struct rbd_client *rbdc; + int ret = -ENOMEM; + + dout("rbd_client_create\n"); + rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL); + if (!rbdc) + goto out_opt; + + kref_init(&rbdc->kref); + INIT_LIST_HEAD(&rbdc->node); + + rbdc->client = ceph_create_client(opt, rbdc); + if (IS_ERR(rbdc->client)) + goto out_rbdc; + opt = NULL; /* Now rbdc->client is responsible for opt */ + + ret = ceph_open_session(rbdc->client); + if (ret < 0) + goto out_err; + + spin_lock(&node_lock); + list_add_tail(&rbdc->node, &rbd_client_list); + spin_unlock(&node_lock); + + dout("rbd_client_create created %p\n", rbdc); + return rbdc; + +out_err: + ceph_destroy_client(rbdc->client); +out_rbdc: + kfree(rbdc); +out_opt: + if (opt) + ceph_destroy_options(opt); + return ERR_PTR(ret); +} + +/* + * Find a ceph client with specific addr and configuration. + */ +static struct rbd_client *__rbd_client_find(struct ceph_options *opt) +{ + struct rbd_client *client_node; + + if (opt->flags & CEPH_OPT_NOSHARE) + return NULL; + + list_for_each_entry(client_node, &rbd_client_list, node) + if (ceph_compare_options(opt, client_node->client) == 0) + return client_node; + return NULL; +} + +/* + * Get a ceph client with specific addr and configuration, if one does + * not exist create it. + */ +static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr, + char *options) +{ + struct rbd_client *rbdc; + struct ceph_options *opt; + int ret; + + ret = ceph_parse_options(&opt, options, mon_addr, + mon_addr + strlen(mon_addr), NULL, NULL); + if (ret < 0) + return ret; + + spin_lock(&node_lock); + rbdc = __rbd_client_find(opt); + if (rbdc) { + ceph_destroy_options(opt); + + /* using an existing client */ + kref_get(&rbdc->kref); + rbd_dev->rbd_client = rbdc; + rbd_dev->client = rbdc->client; + spin_unlock(&node_lock); + return 0; + } + spin_unlock(&node_lock); + + rbdc = rbd_client_create(opt); + if (IS_ERR(rbdc)) + return PTR_ERR(rbdc); + + rbd_dev->rbd_client = rbdc; + rbd_dev->client = rbdc->client; + return 0; +} + +/* + * Destroy ceph client + */ +static void rbd_client_release(struct kref *kref) +{ + struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref); + + dout("rbd_release_client %p\n", rbdc); + spin_lock(&node_lock); + list_del(&rbdc->node); + spin_unlock(&node_lock); + + ceph_destroy_client(rbdc->client); + kfree(rbdc); +} + +/* + * Drop reference to ceph client node. If it's not referenced anymore, release + * it. + */ +static void rbd_put_client(struct rbd_device *rbd_dev) +{ + kref_put(&rbd_dev->rbd_client->kref, rbd_client_release); + rbd_dev->rbd_client = NULL; + rbd_dev->client = NULL; +} + + +/* + * Create a new header structure, translate header format from the on-disk + * header. + */ +static int rbd_header_from_disk(struct rbd_image_header *header, + struct rbd_image_header_ondisk *ondisk, + int allocated_snaps, + gfp_t gfp_flags) +{ + int i; + u32 snap_count = le32_to_cpu(ondisk->snap_count); + int ret = -ENOMEM; + + init_rwsem(&header->snap_rwsem); + + header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); + header->snapc = kmalloc(sizeof(struct ceph_snap_context) + + snap_count * + sizeof(struct rbd_image_snap_ondisk), + gfp_flags); + if (!header->snapc) + return -ENOMEM; + if (snap_count) { + header->snap_names = kmalloc(header->snap_names_len, + GFP_KERNEL); + if (!header->snap_names) + goto err_snapc; + header->snap_sizes = kmalloc(snap_count * sizeof(u64), + GFP_KERNEL); + if (!header->snap_sizes) + goto err_names; + } else { + header->snap_names = NULL; + header->snap_sizes = NULL; + } + memcpy(header->block_name, ondisk->block_name, + sizeof(ondisk->block_name)); + + header->image_size = le64_to_cpu(ondisk->image_size); + header->obj_order = ondisk->options.order; + header->crypt_type = ondisk->options.crypt_type; + header->comp_type = ondisk->options.comp_type; + + atomic_set(&header->snapc->nref, 1); + header->snap_seq = le64_to_cpu(ondisk->snap_seq); + header->snapc->num_snaps = snap_count; + header->total_snaps = snap_count; + + if (snap_count && + allocated_snaps == snap_count) { + for (i = 0; i < snap_count; i++) { + header->snapc->snaps[i] = + le64_to_cpu(ondisk->snaps[i].id); + header->snap_sizes[i] = + le64_to_cpu(ondisk->snaps[i].image_size); + } + + /* copy snapshot names */ + memcpy(header->snap_names, &ondisk->snaps[i], + header->snap_names_len); + } + + return 0; + +err_names: + kfree(header->snap_names); +err_snapc: + kfree(header->snapc); + return ret; +} + +static int snap_index(struct rbd_image_header *header, int snap_num) +{ + return header->total_snaps - snap_num; +} + +static u64 cur_snap_id(struct rbd_device *rbd_dev) +{ + struct rbd_image_header *header = &rbd_dev->header; + + if (!rbd_dev->cur_snap) + return 0; + + return header->snapc->snaps[snap_index(header, rbd_dev->cur_snap)]; +} + +static int snap_by_name(struct rbd_image_header *header, const char *snap_name, + u64 *seq, u64 *size) +{ + int i; + char *p = header->snap_names; + + for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) { + if (strcmp(snap_name, p) == 0) + break; + } + if (i == header->total_snaps) + return -ENOENT; + if (seq) + *seq = header->snapc->snaps[i]; + + if (size) + *size = header->snap_sizes[i]; + + return i; +} + +static int rbd_header_set_snap(struct rbd_device *dev, + const char *snap_name, + u64 *size) +{ + struct rbd_image_header *header = &dev->header; + struct ceph_snap_context *snapc = header->snapc; + int ret = -ENOENT; + + down_write(&header->snap_rwsem); + + if (!snap_name || + !*snap_name || + strcmp(snap_name, "-") == 0 || + strcmp(snap_name, RBD_SNAP_HEAD_NAME) == 0) { + if (header->total_snaps) + snapc->seq = header->snap_seq; + else + snapc->seq = 0; + dev->cur_snap = 0; + dev->read_only = 0; + if (size) + *size = header->image_size; + } else { + ret = snap_by_name(header, snap_name, &snapc->seq, size); + if (ret < 0) + goto done; + + dev->cur_snap = header->total_snaps - ret; + dev->read_only = 1; + } + + ret = 0; +done: + up_write(&header->snap_rwsem); + return ret; +} + +static void rbd_header_free(struct rbd_image_header *header) +{ + kfree(header->snapc); + kfree(header->snap_names); + kfree(header->snap_sizes); +} + +/* + * get the actual striped segment name, offset and length + */ +static u64 rbd_get_segment(struct rbd_image_header *header, + const char *block_name, + u64 ofs, u64 len, + char *seg_name, u64 *segofs) +{ + u64 seg = ofs >> header->obj_order; + + if (seg_name) + snprintf(seg_name, RBD_MAX_SEG_NAME_LEN, + "%s.%012llx", block_name, seg); + + ofs = ofs & ((1 << header->obj_order) - 1); + len = min_t(u64, len, (1 << header->obj_order) - ofs); + + if (segofs) + *segofs = ofs; + + return len; +} + +/* + * bio helpers + */ + +static void bio_chain_put(struct bio *chain) +{ + struct bio *tmp; + + while (chain) { + tmp = chain; + chain = chain->bi_next; + bio_put(tmp); + } +} + +/* + * zeros a bio chain, starting at specific offset + */ +static void zero_bio_chain(struct bio *chain, int start_ofs) +{ + struct bio_vec *bv; + unsigned long flags; + void *buf; + int i; + int pos = 0; + + while (chain) { + bio_for_each_segment(bv, chain, i) { + if (pos + bv->bv_len > start_ofs) { + int remainder = max(start_ofs - pos, 0); + buf = bvec_kmap_irq(bv, &flags); + memset(buf + remainder, 0, + bv->bv_len - remainder); + bvec_kunmap_irq(buf, &flags); + } + pos += bv->bv_len; + } + + chain = chain->bi_next; + } +} + +/* + * bio_chain_clone - clone a chain of bios up to a certain length. + * might return a bio_pair that will need to be released. + */ +static struct bio *bio_chain_clone(struct bio **old, struct bio **next, + struct bio_pair **bp, + int len, gfp_t gfpmask) +{ + struct bio *tmp, *old_chain = *old, *new_chain = NULL, *tail = NULL; + int total = 0; + + if (*bp) { + bio_pair_release(*bp); + *bp = NULL; + } + + while (old_chain && (total < len)) { + tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs); + if (!tmp) + goto err_out; + + if (total + old_chain->bi_size > len) { + struct bio_pair *bp; + + /* + * this split can only happen with a single paged bio, + * split_bio will BUG_ON if this is not the case + */ + dout("bio_chain_clone split! total=%d remaining=%d" + "bi_size=%d\n", + (int)total, (int)len-total, + (int)old_chain->bi_size); + + /* split the bio. We'll release it either in the next + call, or it will have to be released outside */ + bp = bio_split(old_chain, (len - total) / 512ULL); + if (!bp) + goto err_out; + + __bio_clone(tmp, &bp->bio1); + + *next = &bp->bio2; + } else { + __bio_clone(tmp, old_chain); + *next = old_chain->bi_next; + } + + tmp->bi_bdev = NULL; + gfpmask &= ~__GFP_WAIT; + tmp->bi_next = NULL; + + if (!new_chain) { + new_chain = tail = tmp; + } else { + tail->bi_next = tmp; + tail = tmp; + } + old_chain = old_chain->bi_next; + + total += tmp->bi_size; + } + + BUG_ON(total < len); + + if (tail) + tail->bi_next = NULL; + + *old = old_chain; + + return new_chain; + +err_out: + dout("bio_chain_clone with err\n"); + bio_chain_put(new_chain); + return NULL; +} + +/* + * helpers for osd request op vectors. + */ +static int rbd_create_rw_ops(struct ceph_osd_req_op **ops, + int num_ops, + int opcode, + u32 payload_len) +{ + *ops = kzalloc(sizeof(struct ceph_osd_req_op) * (num_ops + 1), + GFP_NOIO); + if (!*ops) + return -ENOMEM; + (*ops)[0].op = opcode; + /* + * op extent offset and length will be set later on + * in calc_raw_layout() + */ + (*ops)[0].payload_len = payload_len; + return 0; +} + +static void rbd_destroy_ops(struct ceph_osd_req_op *ops) +{ + kfree(ops); +} + +/* + * Send ceph osd request + */ +static int rbd_do_request(struct request *rq, + struct rbd_device *dev, + struct ceph_snap_context *snapc, + u64 snapid, + const char *obj, u64 ofs, u64 len, + struct bio *bio, + struct page **pages, + int num_pages, + int flags, + struct ceph_osd_req_op *ops, + int num_reply, + void (*rbd_cb)(struct ceph_osd_request *req, + struct ceph_msg *msg)) +{ + struct ceph_osd_request *req; + struct ceph_file_layout *layout; + int ret; + u64 bno; + struct timespec mtime = CURRENT_TIME; + struct rbd_request *req_data; + struct ceph_osd_request_head *reqhead; + struct rbd_image_header *header = &dev->header; + + ret = -ENOMEM; + req_data = kzalloc(sizeof(*req_data), GFP_NOIO); + if (!req_data) + goto done; + + dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); + + down_read(&header->snap_rwsem); + + req = ceph_osdc_alloc_request(&dev->client->osdc, flags, + snapc, + ops, + false, + GFP_NOIO, pages, bio); + if (IS_ERR(req)) { + up_read(&header->snap_rwsem); + ret = PTR_ERR(req); + goto done_pages; + } + + req->r_callback = rbd_cb; + + req_data->rq = rq; + req_data->bio = bio; + req_data->pages = pages; + req_data->len = len; + + req->r_priv = req_data; + + reqhead = req->r_request->front.iov_base; + reqhead->snapid = cpu_to_le64(CEPH_NOSNAP); + + strncpy(req->r_oid, obj, sizeof(req->r_oid)); + req->r_oid_len = strlen(req->r_oid); + + layout = &req->r_file_layout; + memset(layout, 0, sizeof(*layout)); + layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); + layout->fl_stripe_count = cpu_to_le32(1); + layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); + layout->fl_pg_preferred = cpu_to_le32(-1); + layout->fl_pg_pool = cpu_to_le32(dev->poolid); + ceph_calc_raw_layout(&dev->client->osdc, layout, snapid, + ofs, &len, &bno, req, ops); + + ceph_osdc_build_request(req, ofs, &len, + ops, + snapc, + &mtime, + req->r_oid, req->r_oid_len); + up_read(&header->snap_rwsem); + + ret = ceph_osdc_start_request(&dev->client->osdc, req, false); + if (ret < 0) + goto done_err; + + if (!rbd_cb) { + ret = ceph_osdc_wait_request(&dev->client->osdc, req); + ceph_osdc_put_request(req); + } + return ret; + +done_err: + bio_chain_put(req_data->bio); + ceph_osdc_put_request(req); +done_pages: + kfree(req_data); +done: + if (rq) + blk_end_request(rq, ret, len); + return ret; +} + +/* + * Ceph osd op callback + */ +static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) +{ + struct rbd_request *req_data = req->r_priv; + struct ceph_osd_reply_head *replyhead; + struct ceph_osd_op *op; + __s32 rc; + u64 bytes; + int read_op; + + /* parse reply */ + replyhead = msg->front.iov_base; + WARN_ON(le32_to_cpu(replyhead->num_ops) == 0); + op = (void *)(replyhead + 1); + rc = le32_to_cpu(replyhead->result); + bytes = le64_to_cpu(op->extent.length); + read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ); + + dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc); + + if (rc == -ENOENT && read_op) { + zero_bio_chain(req_data->bio, 0); + rc = 0; + } else if (rc == 0 && read_op && bytes < req_data->len) { + zero_bio_chain(req_data->bio, bytes); + bytes = req_data->len; + } + + blk_end_request(req_data->rq, rc, bytes); + + if (req_data->bio) + bio_chain_put(req_data->bio); + + ceph_osdc_put_request(req); + kfree(req_data); +} + +/* + * Do a synchronous ceph osd operation + */ +static int rbd_req_sync_op(struct rbd_device *dev, + struct ceph_snap_context *snapc, + u64 snapid, + int opcode, + int flags, + struct ceph_osd_req_op *orig_ops, + int num_reply, + const char *obj, + u64 ofs, u64 len, + char *buf) +{ + int ret; + struct page **pages; + int num_pages; + struct ceph_osd_req_op *ops = orig_ops; + u32 payload_len; + + num_pages = calc_pages_for(ofs , len); + pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + if (!orig_ops) { + payload_len = (flags & CEPH_OSD_FLAG_WRITE ? len : 0); + ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len); + if (ret < 0) + goto done; + + if ((flags & CEPH_OSD_FLAG_WRITE) && buf) { + ret = ceph_copy_to_page_vector(pages, buf, ofs, len); + if (ret < 0) + goto done_ops; + } + } + + ret = rbd_do_request(NULL, dev, snapc, snapid, + obj, ofs, len, NULL, + pages, num_pages, + flags, + ops, + 2, + NULL); + if (ret < 0) + goto done_ops; + + if ((flags & CEPH_OSD_FLAG_READ) && buf) + ret = ceph_copy_from_page_vector(pages, buf, ofs, ret); + +done_ops: + if (!orig_ops) + rbd_destroy_ops(ops); +done: + ceph_release_page_vector(pages, num_pages); + return ret; +} + +/* + * Do an asynchronous ceph osd operation + */ +static int rbd_do_op(struct request *rq, + struct rbd_device *rbd_dev , + struct ceph_snap_context *snapc, + u64 snapid, + int opcode, int flags, int num_reply, + u64 ofs, u64 len, + struct bio *bio) +{ + char *seg_name; + u64 seg_ofs; + u64 seg_len; + int ret; + struct ceph_osd_req_op *ops; + u32 payload_len; + + seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); + if (!seg_name) + return -ENOMEM; + + seg_len = rbd_get_segment(&rbd_dev->header, + rbd_dev->header.block_name, + ofs, len, + seg_name, &seg_ofs); + + payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0); + + ret = rbd_create_rw_ops(&ops, 1, opcode, payload_len); + if (ret < 0) + goto done; + + /* we've taken care of segment sizes earlier when we + cloned the bios. We should never have a segment + truncated at this point */ + BUG_ON(seg_len < len); + + ret = rbd_do_request(rq, rbd_dev, snapc, snapid, + seg_name, seg_ofs, seg_len, + bio, + NULL, 0, + flags, + ops, + num_reply, + rbd_req_cb); +done: + kfree(seg_name); + return ret; +} + +/* + * Request async osd write + */ +static int rbd_req_write(struct request *rq, + struct rbd_device *rbd_dev, + struct ceph_snap_context *snapc, + u64 ofs, u64 len, + struct bio *bio) +{ + return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, + CEPH_OSD_OP_WRITE, + CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, + 2, + ofs, len, bio); +} + +/* + * Request async osd read + */ +static int rbd_req_read(struct request *rq, + struct rbd_device *rbd_dev, + u64 snapid, + u64 ofs, u64 len, + struct bio *bio) +{ + return rbd_do_op(rq, rbd_dev, NULL, + (snapid ? snapid : CEPH_NOSNAP), + CEPH_OSD_OP_READ, + CEPH_OSD_FLAG_READ, + 2, + ofs, len, bio); +} + +/* + * Request sync osd read + */ +static int rbd_req_sync_read(struct rbd_device *dev, + struct ceph_snap_context *snapc, + u64 snapid, + const char *obj, + u64 ofs, u64 len, + char *buf) +{ + return rbd_req_sync_op(dev, NULL, + (snapid ? snapid : CEPH_NOSNAP), + CEPH_OSD_OP_READ, + CEPH_OSD_FLAG_READ, + NULL, + 1, obj, ofs, len, buf); +} + +/* + * Request sync osd read + */ +static int rbd_req_sync_rollback_obj(struct rbd_device *dev, + u64 snapid, + const char *obj) +{ + struct ceph_osd_req_op *ops; + int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0); + if (ret < 0) + return ret; + + ops[0].snap.snapid = snapid; + + ret = rbd_req_sync_op(dev, NULL, + CEPH_NOSNAP, + 0, + CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, + ops, + 1, obj, 0, 0, NULL); + + rbd_destroy_ops(ops); + + if (ret < 0) + return ret; + + return ret; +} + +/* + * Request sync osd read + */ +static int rbd_req_sync_exec(struct rbd_device *dev, + const char *obj, + const char *cls, + const char *method, + const char *data, + int len) +{ + struct ceph_osd_req_op *ops; + int cls_len = strlen(cls); + int method_len = strlen(method); + int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_CALL, + cls_len + method_len + len); + if (ret < 0) + return ret; + + ops[0].cls.class_name = cls; + ops[0].cls.class_len = (__u8)cls_len; + ops[0].cls.method_name = method; + ops[0].cls.method_len = (__u8)method_len; + ops[0].cls.argc = 0; + ops[0].cls.indata = data; + ops[0].cls.indata_len = len; + + ret = rbd_req_sync_op(dev, NULL, + CEPH_NOSNAP, + 0, + CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, + ops, + 1, obj, 0, 0, NULL); + + rbd_destroy_ops(ops); + + dout("cls_exec returned %d\n", ret); + return ret; +} + +/* + * block device queue callback + */ +static void rbd_rq_fn(struct request_queue *q) +{ + struct rbd_device *rbd_dev = q->queuedata; + struct request *rq; + struct bio_pair *bp = NULL; + + rq = blk_fetch_request(q); + + while (1) { + struct bio *bio; + struct bio *rq_bio, *next_bio = NULL; + bool do_write; + int size, op_size = 0; + u64 ofs; + + /* peek at request from block layer */ + if (!rq) + break; + + dout("fetched request\n"); + + /* filter out block requests we don't understand */ + if ((rq->cmd_type != REQ_TYPE_FS)) { + __blk_end_request_all(rq, 0); + goto next; + } + + /* deduce our operation (read, write) */ + do_write = (rq_data_dir(rq) == WRITE); + + size = blk_rq_bytes(rq); + ofs = blk_rq_pos(rq) * 512ULL; + rq_bio = rq->bio; + if (do_write && rbd_dev->read_only) { + __blk_end_request_all(rq, -EROFS); + goto next; + } + + spin_unlock_irq(q->queue_lock); + + dout("%s 0x%x bytes at 0x%llx\n", + do_write ? "write" : "read", + size, blk_rq_pos(rq) * 512ULL); + + do { + /* a bio clone to be passed down to OSD req */ + dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); + op_size = rbd_get_segment(&rbd_dev->header, + rbd_dev->header.block_name, + ofs, size, + NULL, NULL); + bio = bio_chain_clone(&rq_bio, &next_bio, &bp, + op_size, GFP_ATOMIC); + if (!bio) { + spin_lock_irq(q->queue_lock); + __blk_end_request_all(rq, -ENOMEM); + goto next; + } + + /* init OSD command: write or read */ + if (do_write) + rbd_req_write(rq, rbd_dev, + rbd_dev->header.snapc, + ofs, + op_size, bio); + else + rbd_req_read(rq, rbd_dev, + cur_snap_id(rbd_dev), + ofs, + op_size, bio); + + size -= op_size; + ofs += op_size; + + rq_bio = next_bio; + } while (size > 0); + + if (bp) + bio_pair_release(bp); + + spin_lock_irq(q->queue_lock); +next: + rq = blk_fetch_request(q); + } +} + +/* + * a queue callback. Makes sure that we don't create a bio that spans across + * multiple osd objects. One exception would be with a single page bios, + * which we handle later at bio_chain_clone + */ +static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, + struct bio_vec *bvec) +{ + struct rbd_device *rbd_dev = q->queuedata; + unsigned int chunk_sectors = 1 << (rbd_dev->header.obj_order - 9); + sector_t sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev); + unsigned int bio_sectors = bmd->bi_size >> 9; + int max; + + max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + + bio_sectors)) << 9; + if (max < 0) + max = 0; /* bio_add cannot handle a negative return */ + if (max <= bvec->bv_len && bio_sectors == 0) + return bvec->bv_len; + return max; +} + +static void rbd_free_disk(struct rbd_device *rbd_dev) +{ + struct gendisk *disk = rbd_dev->disk; + + if (!disk) + return; + + rbd_header_free(&rbd_dev->header); + + if (disk->flags & GENHD_FL_UP) + del_gendisk(disk); + if (disk->queue) + blk_cleanup_queue(disk->queue); + put_disk(disk); +} + +/* + * reload the ondisk the header + */ +static int rbd_read_header(struct rbd_device *rbd_dev, + struct rbd_image_header *header) +{ + ssize_t rc; + struct rbd_image_header_ondisk *dh; + int snap_count = 0; + u64 snap_names_len = 0; + + while (1) { + int len = sizeof(*dh) + + snap_count * sizeof(struct rbd_image_snap_ondisk) + + snap_names_len; + + rc = -ENOMEM; + dh = kmalloc(len, GFP_KERNEL); + if (!dh) + return -ENOMEM; + + rc = rbd_req_sync_read(rbd_dev, + NULL, CEPH_NOSNAP, + rbd_dev->obj_md_name, + 0, len, + (char *)dh); + if (rc < 0) + goto out_dh; + + rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL); + if (rc < 0) + goto out_dh; + + if (snap_count != header->total_snaps) { + snap_count = header->total_snaps; + snap_names_len = header->snap_names_len; + rbd_header_free(header); + kfree(dh); + continue; + } + break; + } + +out_dh: + kfree(dh); + return rc; +} + +/* + * create a snapshot + */ +static int rbd_header_add_snap(struct rbd_device *dev, + const char *snap_name, + gfp_t gfp_flags) +{ + int name_len = strlen(snap_name); + u64 new_snapid; + int ret; + void *data, *data_start, *data_end; + + /* we should create a snapshot only if we're pointing at the head */ + if (dev->cur_snap) + return -EINVAL; + + ret = ceph_monc_create_snapid(&dev->client->monc, dev->poolid, + &new_snapid); + dout("created snapid=%lld\n", new_snapid); + if (ret < 0) + return ret; + + data = kmalloc(name_len + 16, gfp_flags); + if (!data) + return -ENOMEM; + + data_start = data; + data_end = data + name_len + 16; + + ceph_encode_string_safe(&data, data_end, snap_name, name_len, bad); + ceph_encode_64_safe(&data, data_end, new_snapid, bad); + + ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add", + data_start, data - data_start); + + kfree(data_start); + + if (ret < 0) + return ret; + + dev->header.snapc->seq = new_snapid; + + return 0; +bad: + return -ERANGE; +} + +/* + * only read the first part of the ondisk header, without the snaps info + */ +static int rbd_update_snaps(struct rbd_device *rbd_dev) +{ + int ret; + struct rbd_image_header h; + u64 snap_seq; + + ret = rbd_read_header(rbd_dev, &h); + if (ret < 0) + return ret; + + down_write(&rbd_dev->header.snap_rwsem); + + snap_seq = rbd_dev->header.snapc->seq; + + kfree(rbd_dev->header.snapc); + kfree(rbd_dev->header.snap_names); + kfree(rbd_dev->header.snap_sizes); + + rbd_dev->header.total_snaps = h.total_snaps; + rbd_dev->header.snapc = h.snapc; + rbd_dev->header.snap_names = h.snap_names; + rbd_dev->header.snap_sizes = h.snap_sizes; + rbd_dev->header.snapc->seq = snap_seq; + + up_write(&rbd_dev->header.snap_rwsem); + + return 0; +} + +static int rbd_init_disk(struct rbd_device *rbd_dev) +{ + struct gendisk *disk; + struct request_queue *q; + int rc; + u64 total_size = 0; + + /* contact OSD, request size info about the object being mapped */ + rc = rbd_read_header(rbd_dev, &rbd_dev->header); + if (rc) + return rc; + + rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size); + if (rc) + return rc; + + /* create gendisk info */ + rc = -ENOMEM; + disk = alloc_disk(RBD_MINORS_PER_MAJOR); + if (!disk) + goto out; + + sprintf(disk->disk_name, DRV_NAME "%d", rbd_dev->id); + disk->major = rbd_dev->major; + disk->first_minor = 0; + disk->fops = &rbd_bd_ops; + disk->private_data = rbd_dev; + + /* init rq */ + rc = -ENOMEM; + q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock); + if (!q) + goto out_disk; + blk_queue_merge_bvec(q, rbd_merge_bvec); + disk->queue = q; + + q->queuedata = rbd_dev; + + rbd_dev->disk = disk; + rbd_dev->q = q; + + /* finally, announce the disk to the world */ + set_capacity(disk, total_size / 512ULL); + add_disk(disk); + + pr_info("%s: added with size 0x%llx\n", + disk->disk_name, (unsigned long long)total_size); + return 0; + +out_disk: + put_disk(disk); +out: + return rc; +} + +/******************************************************************** + * /sys/class/rbd/ + * add map rados objects to blkdev + * remove unmap rados objects + * list show mappings + *******************************************************************/ + +static void class_rbd_release(struct class *cls) +{ + kfree(cls); +} + +static ssize_t class_rbd_list(struct class *c, + struct class_attribute *attr, + char *data) +{ + int n = 0; + struct list_head *tmp; + int max = PAGE_SIZE; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + n += snprintf(data, max, + "#id\tmajor\tclient_name\tpool\tname\tsnap\tKB\n"); + + list_for_each(tmp, &rbd_dev_list) { + struct rbd_device *rbd_dev; + + rbd_dev = list_entry(tmp, struct rbd_device, node); + n += snprintf(data+n, max-n, + "%d\t%d\tclient%lld\t%s\t%s\t%s\t%lld\n", + rbd_dev->id, + rbd_dev->major, + ceph_client_id(rbd_dev->client), + rbd_dev->pool_name, + rbd_dev->obj, rbd_dev->snap_name, + rbd_dev->header.image_size >> 10); + if (n == max) + break; + } + + mutex_unlock(&ctl_mutex); + return n; +} + +static ssize_t class_rbd_add(struct class *c, + struct class_attribute *attr, + const char *buf, size_t count) +{ + struct ceph_osd_client *osdc; + struct rbd_device *rbd_dev; + ssize_t rc = -ENOMEM; + int irc, new_id = 0; + struct list_head *tmp; + char *mon_dev_name; + char *options; + + if (!try_module_get(THIS_MODULE)) + return -ENODEV; + + mon_dev_name = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL); + if (!mon_dev_name) + goto err_out_mod; + + options = kmalloc(RBD_MAX_OPT_LEN, GFP_KERNEL); + if (!options) + goto err_mon_dev; + + /* new rbd_device object */ + rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL); + if (!rbd_dev) + goto err_out_opt; + + /* static rbd_device initialization */ + spin_lock_init(&rbd_dev->lock); + INIT_LIST_HEAD(&rbd_dev->node); + + /* generate unique id: find highest unique id, add one */ + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + list_for_each(tmp, &rbd_dev_list) { + struct rbd_device *rbd_dev; + + rbd_dev = list_entry(tmp, struct rbd_device, node); + if (rbd_dev->id >= new_id) + new_id = rbd_dev->id + 1; + } + + rbd_dev->id = new_id; + + /* add to global list */ + list_add_tail(&rbd_dev->node, &rbd_dev_list); + + /* parse add command */ + if (sscanf(buf, "%" __stringify(RBD_MAX_OPT_LEN) "s " + "%" __stringify(RBD_MAX_OPT_LEN) "s " + "%" __stringify(RBD_MAX_POOL_NAME_LEN) "s " + "%" __stringify(RBD_MAX_OBJ_NAME_LEN) "s" + "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", + mon_dev_name, options, rbd_dev->pool_name, + rbd_dev->obj, rbd_dev->snap_name) < 4) { + rc = -EINVAL; + goto err_out_slot; + } + + if (rbd_dev->snap_name[0] == 0) + rbd_dev->snap_name[0] = '-'; + + rbd_dev->obj_len = strlen(rbd_dev->obj); + snprintf(rbd_dev->obj_md_name, sizeof(rbd_dev->obj_md_name), "%s%s", + rbd_dev->obj, RBD_SUFFIX); + + /* initialize rest of new object */ + snprintf(rbd_dev->name, DEV_NAME_LEN, DRV_NAME "%d", rbd_dev->id); + rc = rbd_get_client(rbd_dev, mon_dev_name, options); + if (rc < 0) + goto err_out_slot; + + mutex_unlock(&ctl_mutex); + + /* pick the pool */ + osdc = &rbd_dev->client->osdc; + rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name); + if (rc < 0) + goto err_out_client; + rbd_dev->poolid = rc; + + /* register our block device */ + irc = register_blkdev(0, rbd_dev->name); + if (irc < 0) { + rc = irc; + goto err_out_client; + } + rbd_dev->major = irc; + + /* set up and announce blkdev mapping */ + rc = rbd_init_disk(rbd_dev); + if (rc) + goto err_out_blkdev; + + return count; + +err_out_blkdev: + unregister_blkdev(rbd_dev->major, rbd_dev->name); +err_out_client: + rbd_put_client(rbd_dev); + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); +err_out_slot: + list_del_init(&rbd_dev->node); + mutex_unlock(&ctl_mutex); + + kfree(rbd_dev); +err_out_opt: + kfree(options); +err_mon_dev: + kfree(mon_dev_name); +err_out_mod: + dout("Error adding device %s\n", buf); + module_put(THIS_MODULE); + return rc; +} + +static struct rbd_device *__rbd_get_dev(unsigned long id) +{ + struct list_head *tmp; + struct rbd_device *rbd_dev; + + list_for_each(tmp, &rbd_dev_list) { + rbd_dev = list_entry(tmp, struct rbd_device, node); + if (rbd_dev->id == id) + return rbd_dev; + } + return NULL; +} + +static ssize_t class_rbd_remove(struct class *c, + struct class_attribute *attr, + const char *buf, + size_t count) +{ + struct rbd_device *rbd_dev = NULL; + int target_id, rc; + unsigned long ul; + + rc = strict_strtoul(buf, 10, &ul); + if (rc) + return rc; + + /* convert to int; abort if we lost anything in the conversion */ + target_id = (int) ul; + if (target_id != ul) + return -EINVAL; + + /* remove object from list immediately */ + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + rbd_dev = __rbd_get_dev(target_id); + if (rbd_dev) + list_del_init(&rbd_dev->node); + + mutex_unlock(&ctl_mutex); + + if (!rbd_dev) + return -ENOENT; + + rbd_put_client(rbd_dev); + + /* clean up and free blkdev */ + rbd_free_disk(rbd_dev); + unregister_blkdev(rbd_dev->major, rbd_dev->name); + kfree(rbd_dev); + + /* release module ref */ + module_put(THIS_MODULE); + + return count; +} + +static ssize_t class_rbd_snaps_list(struct class *c, + struct class_attribute *attr, + char *data) +{ + struct rbd_device *rbd_dev = NULL; + struct list_head *tmp; + struct rbd_image_header *header; + int i, n = 0, max = PAGE_SIZE; + int ret; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + n += snprintf(data, max, "#id\tsnap\tKB\n"); + + list_for_each(tmp, &rbd_dev_list) { + char *names, *p; + struct ceph_snap_context *snapc; + + rbd_dev = list_entry(tmp, struct rbd_device, node); + header = &rbd_dev->header; + + down_read(&header->snap_rwsem); + + names = header->snap_names; + snapc = header->snapc; + + n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n", + rbd_dev->id, RBD_SNAP_HEAD_NAME, + header->image_size >> 10, + (!rbd_dev->cur_snap ? " (*)" : "")); + if (n == max) + break; + + p = names; + for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) { + n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n", + rbd_dev->id, p, header->snap_sizes[i] >> 10, + (rbd_dev->cur_snap && + (snap_index(header, i) == rbd_dev->cur_snap) ? + " (*)" : "")); + if (n == max) + break; + } + + up_read(&header->snap_rwsem); + } + + + ret = n; + mutex_unlock(&ctl_mutex); + return ret; +} + +static ssize_t class_rbd_snaps_refresh(struct class *c, + struct class_attribute *attr, + const char *buf, + size_t count) +{ + struct rbd_device *rbd_dev = NULL; + int target_id, rc; + unsigned long ul; + int ret = count; + + rc = strict_strtoul(buf, 10, &ul); + if (rc) + return rc; + + /* convert to int; abort if we lost anything in the conversion */ + target_id = (int) ul; + if (target_id != ul) + return -EINVAL; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + rbd_dev = __rbd_get_dev(target_id); + if (!rbd_dev) { + ret = -ENOENT; + goto done; + } + + rc = rbd_update_snaps(rbd_dev); + if (rc < 0) + ret = rc; + +done: + mutex_unlock(&ctl_mutex); + return ret; +} + +static ssize_t class_rbd_snap_create(struct class *c, + struct class_attribute *attr, + const char *buf, + size_t count) +{ + struct rbd_device *rbd_dev = NULL; + int target_id, ret; + char *name; + + name = kmalloc(RBD_MAX_SNAP_NAME_LEN + 1, GFP_KERNEL); + if (!name) + return -ENOMEM; + + /* parse snaps add command */ + if (sscanf(buf, "%d " + "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", + &target_id, + name) != 2) { + ret = -EINVAL; + goto done; + } + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + rbd_dev = __rbd_get_dev(target_id); + if (!rbd_dev) { + ret = -ENOENT; + goto done_unlock; + } + + ret = rbd_header_add_snap(rbd_dev, + name, GFP_KERNEL); + if (ret < 0) + goto done_unlock; + + ret = rbd_update_snaps(rbd_dev); + if (ret < 0) + goto done_unlock; + + ret = count; +done_unlock: + mutex_unlock(&ctl_mutex); +done: + kfree(name); + return ret; +} + +static ssize_t class_rbd_rollback(struct class *c, + struct class_attribute *attr, + const char *buf, + size_t count) +{ + struct rbd_device *rbd_dev = NULL; + int target_id, ret; + u64 snapid; + char snap_name[RBD_MAX_SNAP_NAME_LEN]; + u64 cur_ofs; + char *seg_name; + + /* parse snaps add command */ + if (sscanf(buf, "%d " + "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", + &target_id, + snap_name) != 2) { + return -EINVAL; + } + + ret = -ENOMEM; + seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); + if (!seg_name) + return ret; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + + rbd_dev = __rbd_get_dev(target_id); + if (!rbd_dev) { + ret = -ENOENT; + goto done_unlock; + } + + ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL); + if (ret < 0) + goto done_unlock; + + dout("snapid=%lld\n", snapid); + + cur_ofs = 0; + while (cur_ofs < rbd_dev->header.image_size) { + cur_ofs += rbd_get_segment(&rbd_dev->header, + rbd_dev->obj, + cur_ofs, (u64)-1, + seg_name, NULL); + dout("seg_name=%s\n", seg_name); + + ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name); + if (ret < 0) + pr_warning("could not roll back obj %s err=%d\n", + seg_name, ret); + } + + ret = rbd_update_snaps(rbd_dev); + if (ret < 0) + goto done_unlock; + + ret = count; + +done_unlock: + mutex_unlock(&ctl_mutex); + kfree(seg_name); + + return ret; +} + +static struct class_attribute class_rbd_attrs[] = { + __ATTR(add, 0200, NULL, class_rbd_add), + __ATTR(remove, 0200, NULL, class_rbd_remove), + __ATTR(list, 0444, class_rbd_list, NULL), + __ATTR(snaps_refresh, 0200, NULL, class_rbd_snaps_refresh), + __ATTR(snap_create, 0200, NULL, class_rbd_snap_create), + __ATTR(snaps_list, 0444, class_rbd_snaps_list, NULL), + __ATTR(snap_rollback, 0200, NULL, class_rbd_rollback), + __ATTR_NULL +}; + +/* + * create control files in sysfs + * /sys/class/rbd/... + */ +static int rbd_sysfs_init(void) +{ + int ret = -ENOMEM; + + class_rbd = kzalloc(sizeof(*class_rbd), GFP_KERNEL); + if (!class_rbd) + goto out; + + class_rbd->name = DRV_NAME; + class_rbd->owner = THIS_MODULE; + class_rbd->class_release = class_rbd_release; + class_rbd->class_attrs = class_rbd_attrs; + + ret = class_register(class_rbd); + if (ret) + goto out_class; + return 0; + +out_class: + kfree(class_rbd); + class_rbd = NULL; + pr_err(DRV_NAME ": failed to create class rbd\n"); +out: + return ret; +} + +static void rbd_sysfs_cleanup(void) +{ + if (class_rbd) + class_destroy(class_rbd); + class_rbd = NULL; +} + +int __init rbd_init(void) +{ + int rc; + + rc = rbd_sysfs_init(); + if (rc) + return rc; + spin_lock_init(&node_lock); + pr_info("loaded " DRV_NAME_LONG "\n"); + return 0; +} + +void __exit rbd_exit(void) +{ + rbd_sysfs_cleanup(); +} + +module_init(rbd_init); +module_exit(rbd_exit); + +MODULE_AUTHOR("Sage Weil <sage@newdream.net>"); +MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>"); +MODULE_DESCRIPTION("rados block device"); + +/* following authorship retained from original osdblk.c */ +MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>"); + +MODULE_LICENSE("GPL"); diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h new file mode 100644 index 00000000000..fc6c678aa2c --- /dev/null +++ b/drivers/block/rbd_types.h @@ -0,0 +1,73 @@ +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2004-2010 Sage Weil <sage@newdream.net> + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#ifndef CEPH_RBD_TYPES_H +#define CEPH_RBD_TYPES_H + +#include <linux/types.h> + +/* + * rbd image 'foo' consists of objects + * foo.rbd - image metadata + * foo.00000000 + * foo.00000001 + * ... - data + */ + +#define RBD_SUFFIX ".rbd" +#define RBD_DIRECTORY "rbd_directory" +#define RBD_INFO "rbd_info" + +#define RBD_DEFAULT_OBJ_ORDER 22 /* 4MB */ +#define RBD_MIN_OBJ_ORDER 16 +#define RBD_MAX_OBJ_ORDER 30 + +#define RBD_MAX_OBJ_NAME_LEN 96 +#define RBD_MAX_SEG_NAME_LEN 128 + +#define RBD_COMP_NONE 0 +#define RBD_CRYPT_NONE 0 + +#define RBD_HEADER_TEXT "<<< Rados Block Device Image >>>\n" +#define RBD_HEADER_SIGNATURE "RBD" +#define RBD_HEADER_VERSION "001.005" + +struct rbd_info { + __le64 max_id; +} __attribute__ ((packed)); + +struct rbd_image_snap_ondisk { + __le64 id; + __le64 image_size; +} __attribute__((packed)); + +struct rbd_image_header_ondisk { + char text[40]; + char block_name[24]; + char signature[4]; + char version[8]; + struct { + __u8 order; + __u8 crypt_type; + __u8 comp_type; + __u8 unused; + } __attribute__((packed)) options; + __le64 image_size; + __le64 snap_seq; + __le32 snap_count; + __le32 reserved; + __le64 snap_names_len; + struct rbd_image_snap_ondisk snaps[0]; +} __attribute__((packed)); + + +#endif diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 2e46815876d..75333d0a332 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -20,7 +20,7 @@ #include <linux/fd.h> #include <linux/slab.h> #include <linux/blkdev.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/hdreg.h> #include <linux/kernel.h> #include <linux/delay.h> @@ -222,6 +222,7 @@ extern int swim_read_sector_header(struct swim __iomem *base, extern int swim_read_sector_data(struct swim __iomem *base, unsigned char *data); +static DEFINE_MUTEX(swim_mutex); static inline void set_swim_mode(struct swim __iomem *base, int enable) { struct iwm __iomem *iwm_base; @@ -666,9 +667,9 @@ static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; - lock_kernel(); + mutex_lock(&swim_mutex); ret = floppy_open(bdev, mode); - unlock_kernel(); + mutex_unlock(&swim_mutex); return ret; } @@ -678,7 +679,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) struct floppy_state *fs = disk->private_data; struct swim __iomem *base = fs->swd->base; - lock_kernel(); + mutex_lock(&swim_mutex); if (fs->ref_count < 0) fs->ref_count = 0; else if (fs->ref_count > 0) @@ -686,7 +687,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) if (fs->ref_count == 0) swim_motor(base, OFF); - unlock_kernel(); + mutex_unlock(&swim_mutex); return 0; } @@ -704,9 +705,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, case FDEJECT: if (fs->ref_count != 1) return -EBUSY; - lock_kernel(); + mutex_lock(&swim_mutex); err = floppy_eject(fs); - unlock_kernel(); + mutex_unlock(&swim_mutex); return err; case FDGETPRM: diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index cc6a3864822..bf3a5b85929 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -25,7 +25,7 @@ #include <linux/ioctl.h> #include <linux/blkdev.h> #include <linux/interrupt.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/module.h> #include <linux/spinlock.h> #include <asm/io.h> @@ -36,6 +36,7 @@ #include <asm/machdep.h> #include <asm/pmac_feature.h> +static DEFINE_MUTEX(swim3_mutex); static struct request_queue *swim3_queue; static struct gendisk *disks[2]; static struct request *fd_req; @@ -873,9 +874,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, { int ret; - lock_kernel(); + mutex_lock(&swim3_mutex); ret = floppy_locked_ioctl(bdev, mode, cmd, param); - unlock_kernel(); + mutex_unlock(&swim3_mutex); return ret; } @@ -953,9 +954,9 @@ static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; - lock_kernel(); + mutex_lock(&swim3_mutex); ret = floppy_open(bdev, mode); - unlock_kernel(); + mutex_unlock(&swim3_mutex); return ret; } @@ -964,13 +965,13 @@ static int floppy_release(struct gendisk *disk, fmode_t mode) { struct floppy_state *fs = disk->private_data; struct swim3 __iomem *sw = fs->swim3; - lock_kernel(); + mutex_lock(&swim3_mutex); if (fs->ref_count > 0 && --fs->ref_count == 0) { swim3_action(fs, MOTOR_OFF); out_8(&sw->control_bic, 0xff); swim3_select(fs, RELAX); } - unlock_kernel(); + mutex_unlock(&swim3_mutex); return 0; } diff --git a/drivers/block/ub.c b/drivers/block/ub.c index c48e1487858..b5690a045a0 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -28,7 +28,7 @@ #include <linux/timer.h> #include <linux/scatterlist.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <scsi/scsi.h> #define DRV_NAME "ub" @@ -248,6 +248,7 @@ struct ub_completion { spinlock_t lock; }; +static DEFINE_MUTEX(ub_mutex); static inline void ub_init_completion(struct ub_completion *x) { x->done = 0; @@ -1715,9 +1716,9 @@ static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; - lock_kernel(); + mutex_lock(&ub_mutex); ret = ub_bd_open(bdev, mode); - unlock_kernel(); + mutex_unlock(&ub_mutex); return ret; } @@ -1730,9 +1731,9 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode) struct ub_lun *lun = disk->private_data; struct ub_dev *sc = lun->udev; - lock_kernel(); + mutex_lock(&ub_mutex); ub_put(sc); - unlock_kernel(); + mutex_unlock(&ub_mutex); return 0; } @@ -1747,9 +1748,9 @@ static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode, void __user *usermem = (void __user *) arg; int ret; - lock_kernel(); + mutex_lock(&ub_mutex); ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem); - unlock_kernel(); + mutex_unlock(&ub_mutex); return ret; } diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index f651e51a331..e2ff697697c 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c @@ -41,7 +41,7 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/string.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/dma-mapping.h> #include <linux/completion.h> #include <linux/device.h> @@ -73,6 +73,7 @@ enum { MAX_DISK_NAME = FIELD_SIZEOF(struct gendisk, disk_name) }; +static DEFINE_MUTEX(viodasd_mutex); static DEFINE_SPINLOCK(viodasd_spinlock); #define VIOMAXREQ 16 @@ -180,9 +181,9 @@ static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; - lock_kernel(); + mutex_lock(&viodasd_mutex); ret = viodasd_open(bdev, mode); - unlock_kernel(); + mutex_unlock(&viodasd_mutex); return ret; } @@ -196,7 +197,7 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode) struct viodasd_device *d = disk->private_data; HvLpEvent_Rc hvrc; - lock_kernel(); + mutex_lock(&viodasd_mutex); /* Send the event to OS/400. We DON'T expect a response */ hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, HvLpEvent_Type_VirtualIo, @@ -210,7 +211,7 @@ static int viodasd_release(struct gendisk *disk, fmode_t mode) if (hvrc != 0) pr_warning("HV close call failed %d\n", (int)hvrc); - unlock_kernel(); + mutex_unlock(&viodasd_mutex); return 0; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 2aafafca2b1..6ecf89cdf00 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -2,7 +2,6 @@ #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> -#include <linux/smp_lock.h> #include <linux/hdreg.h> #include <linux/virtio.h> #include <linux/virtio_blk.h> @@ -128,9 +127,6 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, } } - if (vbr->req->cmd_flags & REQ_HARDBARRIER) - vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; - sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); /* @@ -202,6 +198,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) struct virtio_blk *vblk = disk->private_data; struct request *req; struct bio *bio; + int err; bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); @@ -215,11 +212,14 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) } req->cmd_type = REQ_TYPE_SPECIAL; - return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); + err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); + blk_put_request(req); + + return err; } -static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode, - unsigned cmd, unsigned long data) +static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long data) { struct gendisk *disk = bdev->bd_disk; struct virtio_blk *vblk = disk->private_data; @@ -234,18 +234,6 @@ static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode, (void __user *)data); } -static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long param) -{ - int ret; - - lock_kernel(); - ret = virtblk_locked_ioctl(bdev, mode, cmd, param); - unlock_kernel(); - - return ret; -} - /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { @@ -388,31 +376,9 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) vblk->disk->driverfs_dev = &vdev->dev; index++; - if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) { - /* - * If the FLUSH feature is supported we do have support for - * flushing a volatile write cache on the host. Use that - * to implement write barrier support. - */ - blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); - } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) { - /* - * If the BARRIER feature is supported the host expects us - * to order request by tags. This implies there is not - * volatile write cache on the host, and that the host - * never re-orders outstanding I/O. This feature is not - * useful for real life scenarious and deprecated. - */ - blk_queue_ordered(q, QUEUE_ORDERED_TAG); - } else { - /* - * If the FLUSH feature is not supported we must assume that - * the host does not perform any kind of volatile write - * caching. We still need to drain the queue to provider - * proper barrier semantics. - */ - blk_queue_ordered(q, QUEUE_ORDERED_DRAIN); - } + /* configure queue flush support */ + if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) + blk_queue_flush(q, REQ_FLUSH); /* If disk is read-only in the host, the guest should obey */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) @@ -531,9 +497,9 @@ static const struct virtio_device_id id_table[] = { }; static unsigned int features[] = { - VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, - VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, - VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY + VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, + VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, + VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY }; /* diff --git a/drivers/block/xd.c b/drivers/block/xd.c index d5a3cd75056..4abd2bcd20f 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c @@ -46,7 +46,7 @@ #include <linux/init.h> #include <linux/wait.h> #include <linux/blkdev.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/blkpg.h> #include <linux/delay.h> #include <linux/io.h> @@ -58,6 +58,7 @@ #include "xd.h" +static DEFINE_MUTEX(xd_mutex); static void __init do_xd_setup (int *integers); #ifdef MODULE static int xd[5] = { -1,-1,-1,-1, }; @@ -381,9 +382,9 @@ static int xd_ioctl(struct block_device *bdev, fmode_t mode, { int ret; - lock_kernel(); + mutex_lock(&xd_mutex); ret = xd_locked_ioctl(bdev, mode, cmd, param); - unlock_kernel(); + mutex_unlock(&xd_mutex); return ret; } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ab735a605cf..4b33a18c32e 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -41,7 +41,7 @@ #include <linux/cdrom.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/scatterlist.h> #include <xen/xen.h> @@ -69,6 +69,7 @@ struct blk_shadow { unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; +static DEFINE_MUTEX(blkfront_mutex); static const struct block_device_operations xlvbd_block_fops; #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) @@ -95,7 +96,7 @@ struct blkfront_info struct gnttab_free_callback callback; struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; - int feature_barrier; + unsigned int feature_flush; int is_ready; }; @@ -418,26 +419,12 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) } -static int xlvbd_barrier(struct blkfront_info *info) +static void xlvbd_flush(struct blkfront_info *info) { - int err; - const char *barrier; - - switch (info->feature_barrier) { - case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; - case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; - case QUEUE_ORDERED_NONE: barrier = "disabled"; break; - default: return -EINVAL; - } - - err = blk_queue_ordered(info->rq, info->feature_barrier); - - if (err) - return err; - + blk_queue_flush(info->rq, info->feature_flush); printk(KERN_INFO "blkfront: %s: barriers %s\n", - info->gd->disk_name, barrier); - return 0; + info->gd->disk_name, + info->feature_flush ? "enabled" : "disabled"); } @@ -516,7 +503,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity, info->rq = gd->queue; info->gd = gd; - xlvbd_barrier(info); + xlvbd_flush(info); if (vdisk_info & VDISK_READONLY) set_disk_ro(gd, 1); @@ -662,8 +649,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", info->gd->disk_name); error = -EOPNOTSUPP; - info->feature_barrier = QUEUE_ORDERED_NONE; - xlvbd_barrier(info); + info->feature_flush = 0; + xlvbd_flush(info); } /* fall through */ case BLKIF_OP_READ: @@ -1076,20 +1063,20 @@ static void blkfront_connect(struct blkfront_info *info) /* * If there's no "feature-barrier" defined, then it means * we're dealing with a very old backend which writes - * synchronously; draining will do what needs to get done. + * synchronously; nothing to do. * - * If there are barriers, then we can do full queued writes - * with tagged barriers. - * - * If barriers are not supported, then there's no much we can - * do, so just set ordering to NONE. + * If there are barriers, then we use flush. */ - if (err) - info->feature_barrier = QUEUE_ORDERED_DRAIN; - else if (barrier) - info->feature_barrier = QUEUE_ORDERED_TAG; - else - info->feature_barrier = QUEUE_ORDERED_NONE; + info->feature_flush = 0; + + /* + * The driver doesn't properly handled empty flushes, so + * lets disable barrier support for now. + */ +#if 0 + if (!err && barrier) + info->feature_flush = REQ_FLUSH; +#endif err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); if (err) { @@ -1201,7 +1188,7 @@ static int blkif_open(struct block_device *bdev, fmode_t mode) struct blkfront_info *info; int err = 0; - lock_kernel(); + mutex_lock(&blkfront_mutex); info = disk->private_data; if (!info) { @@ -1219,7 +1206,7 @@ static int blkif_open(struct block_device *bdev, fmode_t mode) mutex_unlock(&info->mutex); out: - unlock_kernel(); + mutex_unlock(&blkfront_mutex); return err; } @@ -1229,7 +1216,7 @@ static int blkif_release(struct gendisk *disk, fmode_t mode) struct block_device *bdev; struct xenbus_device *xbdev; - lock_kernel(); + mutex_lock(&blkfront_mutex); bdev = bdget_disk(disk, 0); bdput(bdev); @@ -1263,7 +1250,7 @@ static int blkif_release(struct gendisk *disk, fmode_t mode) } out: - unlock_kernel(); + mutex_unlock(&blkfront_mutex); return 0; } diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 057413bb16e..6e968cd4893 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -89,7 +89,7 @@ #include <linux/delay.h> #include <linux/slab.h> #include <linux/blkdev.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/ata.h> #include <linux/hdreg.h> #include <linux/platform_device.h> @@ -214,6 +214,7 @@ struct ace_device { u16 cf_id[ATA_ID_WORDS]; }; +static DEFINE_MUTEX(xsysace_mutex); static int ace_major; /* --------------------------------------------------------------------- @@ -903,13 +904,13 @@ static int ace_open(struct block_device *bdev, fmode_t mode) dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); - lock_kernel(); + mutex_lock(&xsysace_mutex); spin_lock_irqsave(&ace->lock, flags); ace->users++; spin_unlock_irqrestore(&ace->lock, flags); check_disk_change(bdev); - unlock_kernel(); + mutex_unlock(&xsysace_mutex); return 0; } @@ -922,7 +923,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode) dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); - lock_kernel(); + mutex_lock(&xsysace_mutex); spin_lock_irqsave(&ace->lock, flags); ace->users--; if (ace->users == 0) { @@ -930,7 +931,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode) ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); } spin_unlock_irqrestore(&ace->lock, flags); - unlock_kernel(); + mutex_unlock(&xsysace_mutex); return 0; } diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index d75b2bb601a..dcd4cfcf412 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -33,7 +33,7 @@ #include <linux/module.h> #include <linux/blkdev.h> #include <linux/bitops.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/slab.h> #include <asm/setup.h> @@ -57,6 +57,7 @@ extern struct mem_info m68k_memory[NUM_MEMINFO]; #define Z2RAM_CHUNK1024 ( Z2RAM_CHUNKSIZE >> 10 ) +static DEFINE_MUTEX(z2ram_mutex); static u_long *z2ram_map = NULL; static u_long z2ram_size = 0; static int z2_count = 0; @@ -154,7 +155,7 @@ static int z2_open(struct block_device *bdev, fmode_t mode) device = MINOR(bdev->bd_dev); - lock_kernel(); + mutex_lock(&z2ram_mutex); if ( current_device != -1 && current_device != device ) { rc = -EBUSY; @@ -296,25 +297,25 @@ static int z2_open(struct block_device *bdev, fmode_t mode) set_capacity(z2ram_gendisk, z2ram_size >> 9); } - unlock_kernel(); + mutex_unlock(&z2ram_mutex); return 0; err_out_kfree: kfree(z2ram_map); err_out: - unlock_kernel(); + mutex_unlock(&z2ram_mutex); return rc; } static int z2_release(struct gendisk *disk, fmode_t mode) { - lock_kernel(); + mutex_lock(&z2ram_mutex); if ( current_device == -1 ) { - unlock_kernel(); + mutex_unlock(&z2ram_mutex); return 0; } - unlock_kernel(); + mutex_unlock(&z2ram_mutex); /* * FIXME: unmap memory */ diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c index d52e90a5a61..4104b7feae6 100644 --- a/drivers/bluetooth/bluecard_cs.c +++ b/drivers/bluetooth/bluecard_cs.c @@ -39,7 +39,6 @@ #include <linux/skbuff.h> #include <linux/io.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> @@ -865,8 +864,7 @@ static int bluecard_probe(struct pcmcia_device *link) info->p_dev = link; link->priv = info; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ; return bluecard_config(link); } @@ -886,7 +884,7 @@ static int bluecard_config(struct pcmcia_device *link) bluecard_info_t *info = link->priv; int i, n; - link->conf.ConfigIndex = 0x20; + link->config_index = 0x20; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; link->resource[0]->end = 64; @@ -906,7 +904,7 @@ static int bluecard_config(struct pcmcia_device *link) if (i != 0) goto failed; - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i != 0) goto failed; @@ -942,9 +940,7 @@ MODULE_DEVICE_TABLE(pcmcia, bluecard_ids); static struct pcmcia_driver bluecard_driver = { .owner = THIS_MODULE, - .drv = { - .name = "bluecard_cs", - }, + .name = "bluecard_cs", .probe = bluecard_probe, .remove = bluecard_detach, .id_table = bluecard_ids, diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c index 7ab8f29d5e0..0c8a6558749 100644 --- a/drivers/bluetooth/bt3c_cs.c +++ b/drivers/bluetooth/bt3c_cs.c @@ -45,7 +45,6 @@ #include <linux/device.h> #include <linux/firmware.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> @@ -657,11 +656,8 @@ static int bt3c_probe(struct pcmcia_device *link) info->p_dev = link; link->priv = info; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[0]->end = 8; - - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | + CONF_AUTO_SET_IO; return bt3c_config(link); } @@ -675,43 +671,41 @@ static void bt3c_detach(struct pcmcia_device *link) kfree(info); } -static int bt3c_check_config(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data) { - unsigned long try = (unsigned long) priv_data; + int *try = priv_data; - p_dev->io_lines = (try == 0) ? 16 : cf->io.flags & CISTPL_IO_LINES_MASK; + if (try == 0) + p_dev->io_lines = 16; - if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000; - if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) && - (cf->io.win[0].base != 0)) { - p_dev->resource[0]->start = cf->io.win[0].base; - if (!pcmcia_request_io(p_dev)) - return 0; - } - return -ENODEV; + if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0)) + return -EINVAL; + + p_dev->resource[0]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + + return pcmcia_request_io(p_dev); } static int bt3c_check_config_notpicky(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data) { static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; int j; - if ((cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) { - for (j = 0; j < 5; j++) { - p_dev->resource[0]->start = base[j]; - p_dev->io_lines = base[j] ? 16 : 3; - if (!pcmcia_request_io(p_dev)) - return 0; - } + if (p_dev->io_lines > 3) + return -ENODEV; + + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->end = 8; + + for (j = 0; j < 5; j++) { + p_dev->resource[0]->start = base[j]; + p_dev->io_lines = base[j] ? 16 : 3; + if (!pcmcia_request_io(p_dev)) + return 0; } return -ENODEV; } @@ -742,7 +736,7 @@ found_port: if (i != 0) goto failed; - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i != 0) goto failed; @@ -775,9 +769,7 @@ MODULE_DEVICE_TABLE(pcmcia, bt3c_ids); static struct pcmcia_driver bt3c_driver = { .owner = THIS_MODULE, - .drv = { - .name = "bt3c_cs", - }, + .name = "bt3c_cs", .probe = bt3c_probe, .remove = bt3c_detach, .id_table = bt3c_ids, diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c index 54739b08c30..fd6305bf953 100644 --- a/drivers/bluetooth/btmrvl_debugfs.c +++ b/drivers/bluetooth/btmrvl_debugfs.c @@ -92,6 +92,7 @@ static const struct file_operations btmrvl_hscfgcmd_fops = { .read = btmrvl_hscfgcmd_read, .write = btmrvl_hscfgcmd_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf, @@ -130,6 +131,7 @@ static const struct file_operations btmrvl_psmode_fops = { .read = btmrvl_psmode_read, .write = btmrvl_psmode_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, @@ -173,6 +175,7 @@ static const struct file_operations btmrvl_pscmd_fops = { .read = btmrvl_pscmd_read, .write = btmrvl_pscmd_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf, @@ -211,6 +214,7 @@ static const struct file_operations btmrvl_gpiogap_fops = { .read = btmrvl_gpiogap_read, .write = btmrvl_gpiogap_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, @@ -252,6 +256,7 @@ static const struct file_operations btmrvl_hscmd_fops = { .read = btmrvl_hscmd_read, .write = btmrvl_hscmd_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf, @@ -289,6 +294,7 @@ static const struct file_operations btmrvl_hsmode_fops = { .read = btmrvl_hsmode_read, .write = btmrvl_hsmode_write, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_curpsmode_read(struct file *file, char __user *userbuf, @@ -306,6 +312,7 @@ static ssize_t btmrvl_curpsmode_read(struct file *file, char __user *userbuf, static const struct file_operations btmrvl_curpsmode_fops = { .read = btmrvl_curpsmode_read, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_psstate_read(struct file *file, char __user * userbuf, @@ -323,6 +330,7 @@ static ssize_t btmrvl_psstate_read(struct file *file, char __user * userbuf, static const struct file_operations btmrvl_psstate_fops = { .read = btmrvl_psstate_read, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_hsstate_read(struct file *file, char __user *userbuf, @@ -340,6 +348,7 @@ static ssize_t btmrvl_hsstate_read(struct file *file, char __user *userbuf, static const struct file_operations btmrvl_hsstate_fops = { .read = btmrvl_hsstate_read, .open = btmrvl_open_generic, + .llseek = default_llseek, }; static ssize_t btmrvl_txdnldready_read(struct file *file, char __user *userbuf, @@ -358,6 +367,7 @@ static ssize_t btmrvl_txdnldready_read(struct file *file, char __user *userbuf, static const struct file_operations btmrvl_txdnldready_fops = { .read = btmrvl_txdnldready_read, .open = btmrvl_open_generic, + .llseek = default_llseek, }; void btmrvl_debugfs_init(struct hci_dev *hdev) diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c index 1c4f5e863b0..f8a0708e231 100644 --- a/drivers/bluetooth/btuart_cs.c +++ b/drivers/bluetooth/btuart_cs.c @@ -41,7 +41,6 @@ #include <asm/system.h> #include <asm/io.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> @@ -586,11 +585,8 @@ static int btuart_probe(struct pcmcia_device *link) info->p_dev = link; link->priv = info; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[0]->end = 8; - - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | + CONF_AUTO_SET_IO; return btuart_config(link); } @@ -604,43 +600,41 @@ static void btuart_detach(struct pcmcia_device *link) kfree(info); } -static int btuart_check_config(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data) { int *try = priv_data; - p_dev->io_lines = (try == 0) ? 16 : cf->io.flags & CISTPL_IO_LINES_MASK; + if (try == 0) + p_dev->io_lines = 16; - if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = cf->vpp1.param[CISTPL_POWER_VNOM] / 10000; - if ((cf->io.nwin > 0) && (cf->io.win[0].len == 8) && - (cf->io.win[0].base != 0)) { - p_dev->resource[0]->start = cf->io.win[0].base; - if (!pcmcia_request_io(p_dev)) - return 0; - } - return -ENODEV; + if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0)) + return -EINVAL; + + p_dev->resource[0]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + + return pcmcia_request_io(p_dev); } static int btuart_check_config_notpicky(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data) { static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; int j; - if ((cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) { - for (j = 0; j < 5; j++) { - p_dev->resource[0]->start = base[j]; - p_dev->io_lines = base[j] ? 16 : 3; - if (!pcmcia_request_io(p_dev)) - return 0; - } + if (p_dev->io_lines > 3) + return -ENODEV; + + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->end = 8; + + for (j = 0; j < 5; j++) { + p_dev->resource[0]->start = base[j]; + p_dev->io_lines = base[j] ? 16 : 3; + if (!pcmcia_request_io(p_dev)) + return 0; } return -ENODEV; } @@ -671,7 +665,7 @@ found_port: if (i != 0) goto failed; - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i != 0) goto failed; @@ -703,9 +697,7 @@ MODULE_DEVICE_TABLE(pcmcia, btuart_ids); static struct pcmcia_driver btuart_driver = { .owner = THIS_MODULE, - .drv = { - .name = "btuart_cs", - }, + .name = "btuart_cs", .probe = btuart_probe, .remove = btuart_detach, .id_table = btuart_ids, diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c index db7c8db695f..26ee0cf88d2 100644 --- a/drivers/bluetooth/dtl1_cs.c +++ b/drivers/bluetooth/dtl1_cs.c @@ -41,7 +41,6 @@ #include <asm/system.h> #include <asm/io.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> @@ -572,11 +571,7 @@ static int dtl1_probe(struct pcmcia_device *link) info->p_dev = link; link->priv = info; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[0]->end = 8; - - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; return dtl1_config(link); } @@ -591,18 +586,14 @@ static void dtl1_detach(struct pcmcia_device *link) kfree(info); } -static int dtl1_confcheck(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data) { - if ((cf->io.nwin != 1) || (cf->io.win[0].len <= 8)) + if ((p_dev->resource[1]->end) || (p_dev->resource[1]->end < 8)) return -ENODEV; - p_dev->resource[0]->start = cf->io.win[0].base; - p_dev->resource[0]->end = cf->io.win[0].len; /*yo */ - p_dev->io_lines = cf->io.flags & CISTPL_IO_LINES_MASK; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + return pcmcia_request_io(p_dev); } @@ -620,7 +611,7 @@ static int dtl1_config(struct pcmcia_device *link) if (i != 0) goto failed; - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i != 0) goto failed; @@ -656,9 +647,7 @@ MODULE_DEVICE_TABLE(pcmcia, dtl1_ids); static struct pcmcia_driver dtl1_driver = { .owner = THIS_MODULE, - .drv = { - .name = "dtl1_cs", - }, + .name = "dtl1_cs", .probe = dtl1_probe, .remove = dtl1_detach, .id_table = dtl1_ids, diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 998833d93c1..17361bad46d 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -256,9 +256,16 @@ static int hci_uart_tty_open(struct tty_struct *tty) BT_DBG("tty %p", tty); + /* FIXME: This btw is bogus, nothing requires the old ldisc to clear + the pointer */ if (hu) return -EEXIST; + /* Error if the tty has no write op instead of leaving an exploitable + hole */ + if (tty->ops->write == NULL) + return -EOPNOTSUPP; + if (!(hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL))) { BT_ERR("Can't allocate control structure"); return -ENFILE; diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 3aa7b2a54b6..67c180c2c1e 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -282,6 +282,7 @@ static const struct file_operations vhci_fops = { .poll = vhci_poll, .open = vhci_open, .release = vhci_release, + .llseek = no_llseek, }; static struct miscdevice vhci_miscdev= { diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 261107d1457..3af6516919b 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -34,7 +34,7 @@ #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/device.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/platform_device.h> @@ -81,6 +81,7 @@ #define GDROM_DEFAULT_TIMEOUT (HZ * 7) +static DEFINE_MUTEX(gdrom_mutex); static const struct { int sense_key; const char * const text; @@ -494,17 +495,17 @@ static struct cdrom_device_ops gdrom_ops = { static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) { int ret; - lock_kernel(); + mutex_lock(&gdrom_mutex); ret = cdrom_open(gd.cd_info, bdev, mode); - unlock_kernel(); + mutex_unlock(&gdrom_mutex); return ret; } static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode) { - lock_kernel(); + mutex_lock(&gdrom_mutex); cdrom_release(gd.cd_info, mode); - unlock_kernel(); + mutex_unlock(&gdrom_mutex); return 0; } @@ -518,9 +519,9 @@ static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode, { int ret; - lock_kernel(); + mutex_lock(&gdrom_mutex); ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg); - unlock_kernel(); + mutex_unlock(&gdrom_mutex); return ret; } diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 56bf9f44700..be73a9b493a 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -42,7 +42,7 @@ #include <linux/module.h> #include <linux/completion.h> #include <linux/proc_fs.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/seq_file.h> #include <linux/scatterlist.h> @@ -61,6 +61,7 @@ */ #define VIOCD_MAX_CD HVMAXARCHITECTEDVIRTUALCDROMS +static DEFINE_MUTEX(viocd_mutex); static const struct vio_error_entry viocd_err_table[] = { {0x0201, EINVAL, "Invalid Range"}, {0x0202, EINVAL, "Invalid Token"}, @@ -156,9 +157,9 @@ static int viocd_blk_open(struct block_device *bdev, fmode_t mode) struct disk_info *di = bdev->bd_disk->private_data; int ret; - lock_kernel(); + mutex_lock(&viocd_mutex); ret = cdrom_open(&di->viocd_info, bdev, mode); - unlock_kernel(); + mutex_unlock(&viocd_mutex); return ret; } @@ -166,9 +167,9 @@ static int viocd_blk_open(struct block_device *bdev, fmode_t mode) static int viocd_blk_release(struct gendisk *disk, fmode_t mode) { struct disk_info *di = disk->private_data; - lock_kernel(); + mutex_lock(&viocd_mutex); cdrom_release(&di->viocd_info, mode); - unlock_kernel(); + mutex_unlock(&viocd_mutex); return 0; } @@ -178,9 +179,9 @@ static int viocd_blk_ioctl(struct block_device *bdev, fmode_t mode, struct disk_info *di = bdev->bd_disk->private_data; int ret; - lock_kernel(); + mutex_lock(&viocd_mutex); ret = cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg); - unlock_kernel(); + mutex_unlock(&viocd_mutex); return ret; } diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index 4b66c69eaf5..5ddf67e76f8 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -57,7 +57,7 @@ config AGP_AMD config AGP_AMD64 tristate "AMD Opteron/Athlon64 on-CPU GART support" - depends on AGP && X86 && K8_NB + depends on AGP && X86 && AMD_NB help This option gives you AGP support for the GLX component of X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 70312da4c96..42396df5555 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -15,7 +15,7 @@ #include <linux/mmzone.h> #include <asm/page.h> /* PAGE_SIZE */ #include <asm/e820.h> -#include <asm/k8.h> +#include <asm/amd_nb.h> #include <asm/gart.h> #include "agp.h" @@ -124,7 +124,7 @@ static int amd64_fetch_size(void) u32 temp; struct aper_size_info_32 *values; - dev = k8_northbridges[0]; + dev = k8_northbridges.nb_misc[0]; if (dev==NULL) return 0; @@ -181,10 +181,14 @@ static int amd_8151_configure(void) unsigned long gatt_bus = virt_to_phys(agp_bridge->gatt_table_real); int i; + if (!k8_northbridges.gart_supported) + return 0; + /* Configure AGP regs in each x86-64 host bridge. */ - for (i = 0; i < num_k8_northbridges; i++) { + for (i = 0; i < k8_northbridges.num; i++) { agp_bridge->gart_bus_addr = - amd64_configure(k8_northbridges[i], gatt_bus); + amd64_configure(k8_northbridges.nb_misc[i], + gatt_bus); } k8_flush_garts(); return 0; @@ -195,11 +199,15 @@ static void amd64_cleanup(void) { u32 tmp; int i; - for (i = 0; i < num_k8_northbridges; i++) { - struct pci_dev *dev = k8_northbridges[i]; + + if (!k8_northbridges.gart_supported) + return; + + for (i = 0; i < k8_northbridges.num; i++) { + struct pci_dev *dev = k8_northbridges.nb_misc[i]; /* disable gart translation */ pci_read_config_dword(dev, AMD64_GARTAPERTURECTL, &tmp); - tmp &= ~AMD64_GARTEN; + tmp &= ~GARTEN; pci_write_config_dword(dev, AMD64_GARTAPERTURECTL, tmp); } } @@ -313,22 +321,25 @@ static __devinit int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, if (order < 0 || !agp_aperture_valid(aper, (32*1024*1024)<<order)) return -1; - pci_write_config_dword(nb, AMD64_GARTAPERTURECTL, order << 1); + gart_set_size_and_enable(nb, order); pci_write_config_dword(nb, AMD64_GARTAPERTUREBASE, aper >> 25); return 0; } -static __devinit int cache_nbs (struct pci_dev *pdev, u32 cap_ptr) +static __devinit int cache_nbs(struct pci_dev *pdev, u32 cap_ptr) { int i; if (cache_k8_northbridges() < 0) return -ENODEV; + if (!k8_northbridges.gart_supported) + return -ENODEV; + i = 0; - for (i = 0; i < num_k8_northbridges; i++) { - struct pci_dev *dev = k8_northbridges[i]; + for (i = 0; i < k8_northbridges.num; i++) { + struct pci_dev *dev = k8_northbridges.nb_misc[i]; if (fix_northbridge(dev, pdev, cap_ptr) < 0) { dev_err(&dev->dev, "no usable aperture found\n"); #ifdef __x86_64__ @@ -405,7 +416,8 @@ static int __devinit uli_agp_init(struct pci_dev *pdev) } /* shadow x86-64 registers into ULi registers */ - pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &httfea); + pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, + &httfea); /* if x86-64 aperture base is beyond 4G, exit here */ if ((httfea & 0x7fff) >> (32 - 25)) { @@ -472,7 +484,8 @@ static int nforce3_agp_init(struct pci_dev *pdev) pci_write_config_dword(dev1, NVIDIA_X86_64_1_APSIZE, tmp); /* shadow x86-64 registers into NVIDIA registers */ - pci_read_config_dword (k8_northbridges[0], AMD64_GARTAPERTUREBASE, &apbase); + pci_read_config_dword (k8_northbridges.nb_misc[0], AMD64_GARTAPERTUREBASE, + &apbase); /* if x86-64 aperture base is beyond 4G, exit here */ if ( (apbase & 0x7fff) >> (32 - 25) ) { diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index d2abf514398..64255cef8a7 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -984,7 +984,9 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) bridge->driver->cache_flush(); #ifdef CONFIG_X86 - set_memory_uc((unsigned long)table, 1 << page_order); + if (set_memory_uc((unsigned long)table, 1 << page_order)) + printk(KERN_WARNING "Could not set GATT table memory to UC!"); + bridge->gatt_table = (void *)table; #else bridge->gatt_table = ioremap_nocache(virt_to_phys(table), diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index eab58db5f91..cd18493c952 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -806,6 +806,8 @@ static const struct intel_driver_description { "G45/G43", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, "B43", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG, + "B43", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, "G41", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index ee189c74d34..d09b1ab7e8a 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -186,6 +186,8 @@ #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 #define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 #define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 +#define PCI_DEVICE_ID_INTEL_B43_1_HB 0x2E90 +#define PCI_DEVICE_ID_INTEL_B43_1_IG 0x2E92 #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 #define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00 diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index 033e1505fca..3022801669b 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c @@ -13,7 +13,7 @@ #include <linux/module.h> #include <linux/poll.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/miscdevice.h> @@ -126,6 +126,7 @@ struct apm_user { /* * Local variables */ +static DEFINE_MUTEX(apm_mutex); static atomic_t suspend_acks_pending = ATOMIC_INIT(0); static atomic_t userspace_notification_inhibit = ATOMIC_INIT(0); static int apm_disabled; @@ -274,7 +275,7 @@ apm_ioctl(struct file *filp, u_int cmd, u_long arg) if (!as->suser || !as->writer) return -EPERM; - lock_kernel(); + mutex_lock(&apm_mutex); switch (cmd) { case APM_IOC_SUSPEND: mutex_lock(&state_lock); @@ -335,7 +336,7 @@ apm_ioctl(struct file *filp, u_int cmd, u_long arg) mutex_unlock(&state_lock); break; } - unlock_kernel(); + mutex_unlock(&apm_mutex); return err; } @@ -370,7 +371,7 @@ static int apm_open(struct inode * inode, struct file * filp) { struct apm_user *as; - lock_kernel(); + mutex_lock(&apm_mutex); as = kzalloc(sizeof(*as), GFP_KERNEL); if (as) { /* @@ -390,7 +391,7 @@ static int apm_open(struct inode * inode, struct file * filp) filp->private_data = as; } - unlock_kernel(); + mutex_unlock(&apm_mutex); return as ? 0 : -ENOMEM; } @@ -402,6 +403,7 @@ static const struct file_operations apm_bios_fops = { .unlocked_ioctl = apm_ioctl, .open = apm_open, .release = apm_release, + .llseek = noop_llseek, }; static struct miscdevice apm_device = { diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index f4ae0e0fb63..e7ba774beda 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c @@ -26,7 +26,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/miscdevice.h> #include <linux/pci.h> #include <linux/wait.h> @@ -60,6 +60,7 @@ #define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003 #endif +static DEFINE_MUTEX(ac_mutex); static char *applicom_pci_devnames[] = { "PCI board", "PCI2000IBS / PCI2000CAN", @@ -707,7 +708,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (IS_ERR(adgl)) return PTR_ERR(adgl); - lock_kernel(); + mutex_lock(&ac_mutex); IndexCard = adgl->num_card-1; if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) { @@ -717,7 +718,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) warncount--; } kfree(adgl); - unlock_kernel(); + mutex_unlock(&ac_mutex); return -EINVAL; } @@ -835,7 +836,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } Dummy = readb(apbs[IndexCard].RamIO + VERS); kfree(adgl); - unlock_kernel(); + mutex_unlock(&ac_mutex); return 0; } diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c index 836d4f0a876..44660f1c484 100644 --- a/drivers/char/bfin-otp.c +++ b/drivers/char/bfin-otp.c @@ -222,6 +222,7 @@ static const struct file_operations bfin_otp_fops = { .unlocked_ioctl = bfin_otp_ioctl, .read = bfin_otp_read, .write = bfin_otp_write, + .llseek = default_llseek, }; static struct miscdevice bfin_otp_misc_device = { diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c index d5fa113afe3..f6718f05dad 100644 --- a/drivers/char/briq_panel.c +++ b/drivers/char/briq_panel.c @@ -186,6 +186,7 @@ static const struct file_operations briq_panel_fops = { .write = briq_panel_write, .open = briq_panel_open, .release = briq_panel_release, + .llseek = noop_llseek, }; static struct miscdevice briq_panel_miscdev = { diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c index 91917133ae0..a4a6c2f044b 100644 --- a/drivers/char/bsr.c +++ b/drivers/char/bsr.c @@ -155,6 +155,7 @@ static const struct file_operations bsr_fops = { .owner = THIS_MODULE, .mmap = bsr_mmap, .open = bsr_open, + .llseek = noop_llseek, }; static void bsr_cleanup_devs(void) diff --git a/drivers/char/cs5535_gpio.c b/drivers/char/cs5535_gpio.c index 4d830dc482e..0cf1e5fad9a 100644 --- a/drivers/char/cs5535_gpio.c +++ b/drivers/char/cs5535_gpio.c @@ -169,7 +169,8 @@ static const struct file_operations cs5535_gpio_fops = { .owner = THIS_MODULE, .write = cs5535_gpio_write, .read = cs5535_gpio_read, - .open = cs5535_gpio_open + .open = cs5535_gpio_open, + .llseek = no_llseek, }; static int __init cs5535_gpio_init(void) diff --git a/drivers/char/ds1302.c b/drivers/char/ds1302.c index 170693c93c7..ed8303f9890 100644 --- a/drivers/char/ds1302.c +++ b/drivers/char/ds1302.c @@ -20,7 +20,7 @@ #include <linux/miscdevice.h> #include <linux/delay.h> #include <linux/bcd.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/io.h> @@ -32,6 +32,7 @@ #define RTC_MAJOR_NR 121 /* local major, change later */ +static DEFINE_MUTEX(rtc_mutex); static const char ds1302_name[] = "ds1302"; /* Send 8 bits. */ @@ -164,9 +165,9 @@ static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) struct rtc_time rtc_tm; memset(&rtc_tm, 0, sizeof (struct rtc_time)); - lock_kernel(); + mutex_lock(&rtc_mutex); get_rtc_time(&rtc_tm); - unlock_kernel(); + mutex_unlock(&rtc_mutex); if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time))) return -EFAULT; return 0; @@ -218,7 +219,7 @@ static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) mon = bin2bcd(mon); yrs = bin2bcd(yrs); - lock_kernel(); + mutex_lock(&rtc_mutex); local_irq_save(flags); CMOS_WRITE(yrs, RTC_YEAR); CMOS_WRITE(mon, RTC_MONTH); @@ -227,7 +228,7 @@ static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) CMOS_WRITE(min, RTC_MINUTES); CMOS_WRITE(sec, RTC_SECONDS); local_irq_restore(flags); - unlock_kernel(); + mutex_unlock(&rtc_mutex); /* Notice that at this point, the RTC is updated but * the kernel is still running with the old time. @@ -247,10 +248,10 @@ static long rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if(copy_from_user(&tcs_val, (int*)arg, sizeof(int))) return -EFAULT; - lock_kernel(); + mutex_lock(&rtc_mutex); tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F); ds1302_writereg(RTC_TRICKLECHARGER, tcs_val); - unlock_kernel(); + mutex_unlock(&rtc_mutex); return 0; } default: @@ -288,6 +289,7 @@ get_rtc_status(char *buf) static const struct file_operations rtc_fops = { .owner = THIS_MODULE, .unlocked_ioctl = rtc_ioctl, + .llseek = noop_llseek, }; /* Probe for the chip by writing something to its RAM and try reading it back. */ diff --git a/drivers/char/ds1620.c b/drivers/char/ds1620.c index dbee8688f75..aab9605f0b4 100644 --- a/drivers/char/ds1620.c +++ b/drivers/char/ds1620.c @@ -8,7 +8,7 @@ #include <linux/proc_fs.h> #include <linux/capability.h> #include <linux/init.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <mach/hardware.h> #include <asm/mach-types.h> @@ -34,6 +34,7 @@ #define CFG_CPU 2 #define CFG_1SHOT 1 +static DEFINE_MUTEX(ds1620_mutex); static const char *fan_state[] = { "off", "on", "on (hardwired)" }; /* @@ -210,7 +211,6 @@ static void ds1620_read_state(struct therm *therm) static int ds1620_open(struct inode *inode, struct file *file) { - cycle_kernel_lock(); return nonseekable_open(inode, file); } @@ -321,9 +321,9 @@ ds1620_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret; - lock_kernel(); + mutex_lock(&ds1620_mutex); ret = ds1620_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&ds1620_mutex); return ret; } @@ -357,6 +357,7 @@ static const struct file_operations ds1620_fops = { .open = ds1620_open, .read = ds1620_read, .unlocked_ioctl = ds1620_unlocked_ioctl, + .llseek = no_llseek, }; static struct miscdevice ds1620_miscdev = { diff --git a/drivers/char/dsp56k.c b/drivers/char/dsp56k.c index 8a1b28a10ef..052797b32bd 100644 --- a/drivers/char/dsp56k.c +++ b/drivers/char/dsp56k.c @@ -32,7 +32,7 @@ #include <linux/mm.h> #include <linux/init.h> #include <linux/device.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/firmware.h> #include <linux/platform_device.h> #include <linux/uaccess.h> /* For put_user and get_user */ @@ -94,6 +94,7 @@ } \ } +static DEFINE_MUTEX(dsp56k_mutex); static struct dsp56k_device { unsigned long in_use; long maxio, timeout; @@ -330,9 +331,9 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd, if (len > DSP56K_MAX_BINARY_LENGTH) { return -EINVAL; } - lock_kernel(); + mutex_lock(&dsp56k_mutex); r = dsp56k_upload(bin, len); - unlock_kernel(); + mutex_unlock(&dsp56k_mutex); if (r < 0) { return r; } @@ -342,16 +343,16 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd, case DSP56K_SET_TX_WSIZE: if (arg > 4 || arg < 1) return -EINVAL; - lock_kernel(); + mutex_lock(&dsp56k_mutex); dsp56k.tx_wsize = (int) arg; - unlock_kernel(); + mutex_unlock(&dsp56k_mutex); break; case DSP56K_SET_RX_WSIZE: if (arg > 4 || arg < 1) return -EINVAL; - lock_kernel(); + mutex_lock(&dsp56k_mutex); dsp56k.rx_wsize = (int) arg; - unlock_kernel(); + mutex_unlock(&dsp56k_mutex); break; case DSP56K_HOST_FLAGS: { @@ -363,7 +364,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd, if(get_user(out, &hf->out) < 0) return -EFAULT; - lock_kernel(); + mutex_lock(&dsp56k_mutex); if ((dir & 0x1) && (out & 0x1)) dsp56k_host_interface.icr |= DSP56K_ICR_HF0; else if (dir & 0x1) @@ -378,16 +379,16 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd, if (dsp56k_host_interface.icr & DSP56K_ICR_HF1) status |= 0x2; if (dsp56k_host_interface.isr & DSP56K_ISR_HF2) status |= 0x4; if (dsp56k_host_interface.isr & DSP56K_ISR_HF3) status |= 0x8; - unlock_kernel(); + mutex_unlock(&dsp56k_mutex); return put_user(status, &hf->status); } case DSP56K_HOST_CMD: if (arg > 31 || arg < 0) return -EINVAL; - lock_kernel(); + mutex_lock(&dsp56k_mutex); dsp56k_host_interface.cvr = (u_char)((arg & DSP56K_CVR_HV_MASK) | DSP56K_CVR_HC); - unlock_kernel(); + mutex_unlock(&dsp56k_mutex); break; default: return -EINVAL; @@ -427,7 +428,7 @@ static int dsp56k_open(struct inode *inode, struct file *file) int dev = iminor(inode) & 0x0f; int ret = 0; - lock_kernel(); + mutex_lock(&dsp56k_mutex); switch(dev) { case DSP56K_DEV_56001: @@ -454,7 +455,7 @@ static int dsp56k_open(struct inode *inode, struct file *file) ret = -ENODEV; } out: - unlock_kernel(); + mutex_unlock(&dsp56k_mutex); return ret; } @@ -482,6 +483,7 @@ static const struct file_operations dsp56k_fops = { .unlocked_ioctl = dsp56k_ioctl, .open = dsp56k_open, .release = dsp56k_release, + .llseek = noop_llseek, }; diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index e3859d4eaea..85156dd0cae 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c @@ -57,7 +57,7 @@ #include <linux/ioport.h> /* for request_region */ #include <linux/delay.h> /* for loops_per_jiffy */ #include <linux/sched.h> -#include <linux/smp_lock.h> /* cycle_kernel_lock() */ +#include <linux/mutex.h> #include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */ #include <asm/uaccess.h> /* for get_user, etc. */ #include <linux/wait.h> /* for wait_queue */ @@ -73,6 +73,7 @@ #define TRACE_RET ((void) 0) #endif /* TRACING */ +static DEFINE_MUTEX(dtlk_mutex); static void dtlk_timer_tick(unsigned long data); static int dtlk_major; @@ -105,6 +106,7 @@ static const struct file_operations dtlk_fops = .unlocked_ioctl = dtlk_ioctl, .open = dtlk_open, .release = dtlk_release, + .llseek = no_llseek, }; /* local prototypes */ @@ -275,9 +277,9 @@ static long dtlk_ioctl(struct file *file, switch (cmd) { case DTLK_INTERROGATE: - lock_kernel(); + mutex_lock(&dtlk_mutex); sp = dtlk_interrogate(); - unlock_kernel(); + mutex_unlock(&dtlk_mutex); if (copy_to_user(argp, sp, sizeof(struct dtlk_settings))) return -EINVAL; return 0; @@ -296,7 +298,6 @@ static int dtlk_open(struct inode *inode, struct file *file) { TRACE_TEXT("(dtlk_open"); - cycle_kernel_lock(); nonseekable_open(inode, file); switch (iminor(inode)) { case DTLK_MINOR: diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c index 82b5a88a82d..0e941b57482 100644 --- a/drivers/char/generic_nvram.c +++ b/drivers/char/generic_nvram.c @@ -19,7 +19,7 @@ #include <linux/miscdevice.h> #include <linux/fcntl.h> #include <linux/init.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/nvram.h> #ifdef CONFIG_PPC_PMAC @@ -28,6 +28,7 @@ #define NVRAM_SIZE 8192 +static DEFINE_MUTEX(nvram_mutex); static ssize_t nvram_len; static loff_t nvram_llseek(struct file *file, loff_t offset, int origin) @@ -120,9 +121,9 @@ static long nvram_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned l { int ret; - lock_kernel(); + mutex_lock(&nvram_mutex); ret = nvram_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&nvram_mutex); return ret; } diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c index b6c2cc167c1..f773a9dd14f 100644 --- a/drivers/char/genrtc.c +++ b/drivers/char/genrtc.c @@ -52,7 +52,7 @@ #include <linux/init.h> #include <linux/poll.h> #include <linux/proc_fs.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/workqueue.h> #include <asm/uaccess.h> @@ -66,6 +66,7 @@ * ioctls. */ +static DEFINE_MUTEX(gen_rtc_mutex); static DECLARE_WAIT_QUEUE_HEAD(gen_rtc_wait); /* @@ -337,9 +338,9 @@ static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd, { int ret; - lock_kernel(); + mutex_lock(&gen_rtc_mutex); ret = gen_rtc_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&gen_rtc_mutex); return ret; } @@ -352,16 +353,16 @@ static long gen_rtc_unlocked_ioctl(struct file *file, unsigned int cmd, static int gen_rtc_open(struct inode *inode, struct file *file) { - lock_kernel(); + mutex_lock(&gen_rtc_mutex); if (gen_rtc_status & RTC_IS_OPEN) { - unlock_kernel(); + mutex_unlock(&gen_rtc_mutex); return -EBUSY; } gen_rtc_status |= RTC_IS_OPEN; gen_rtc_irq_data = 0; irq_active = 0; - unlock_kernel(); + mutex_unlock(&gen_rtc_mutex); return 0; } @@ -497,6 +498,7 @@ static const struct file_operations gen_rtc_fops = { .unlocked_ioctl = gen_rtc_unlocked_ioctl, .open = gen_rtc_open, .release = gen_rtc_release, + .llseek = noop_llseek, }; static struct miscdevice rtc_gen_dev = diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index a0a1829d319..a4eee324eb1 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -30,6 +30,7 @@ #include <linux/bcd.h> #include <linux/seq_file.h> #include <linux/bitops.h> +#include <linux/compat.h> #include <linux/clocksource.h> #include <linux/slab.h> @@ -67,6 +68,7 @@ #define read_counter(MC) readl(MC) #endif +static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */ static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; /* This clocksource driver currently only works on ia64 */ @@ -250,7 +252,7 @@ static int hpet_open(struct inode *inode, struct file *file) if (file->f_mode & FMODE_WRITE) return -EINVAL; - lock_kernel(); + mutex_lock(&hpet_mutex); spin_lock_irq(&hpet_lock); for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next) @@ -264,7 +266,7 @@ static int hpet_open(struct inode *inode, struct file *file) if (!devp) { spin_unlock_irq(&hpet_lock); - unlock_kernel(); + mutex_unlock(&hpet_mutex); return -EBUSY; } @@ -272,7 +274,7 @@ static int hpet_open(struct inode *inode, struct file *file) devp->hd_irqdata = 0; devp->hd_flags |= HPET_OPEN; spin_unlock_irq(&hpet_lock); - unlock_kernel(); + mutex_unlock(&hpet_mutex); hpet_timer_set_irq(devp); @@ -429,22 +431,6 @@ static int hpet_release(struct inode *inode, struct file *file) return 0; } -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); - -static long hpet_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - struct hpet_dev *devp; - int ret; - - devp = file->private_data; - lock_kernel(); - ret = hpet_ioctl_common(devp, cmd, arg, 0); - unlock_kernel(); - - return ret; -} - static int hpet_ioctl_ieon(struct hpet_dev *devp) { struct hpet_timer __iomem *timer; @@ -553,7 +539,8 @@ static inline unsigned long hpet_time_div(struct hpets *hpets, } static int -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) +hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, + struct hpet_info *info) { struct hpet_timer __iomem *timer; struct hpet __iomem *hpet; @@ -594,23 +581,15 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) break; case HPET_INFO: { - struct hpet_info info; - if (devp->hd_ireqfreq) - info.hi_ireqfreq = + info->hi_ireqfreq = hpet_time_div(hpetp, devp->hd_ireqfreq); else - info.hi_ireqfreq = 0; - info.hi_flags = + info->hi_ireqfreq = 0; + info->hi_flags = readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK; - info.hi_hpet = hpetp->hp_which; - info.hi_timer = devp - hpetp->hp_dev; - if (kernel) - memcpy((void *)arg, &info, sizeof(info)); - else - if (copy_to_user((void __user *)arg, &info, - sizeof(info))) - err = -EFAULT; + info->hi_hpet = hpetp->hp_which; + info->hi_timer = devp - hpetp->hp_dev; break; } case HPET_EPI: @@ -636,7 +615,7 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) devp->hd_flags &= ~HPET_PERIODIC; break; case HPET_IRQFREQ: - if (!kernel && (arg > hpet_max_freq) && + if ((arg > hpet_max_freq) && !capable(CAP_SYS_RESOURCE)) { err = -EACCES; break; @@ -653,12 +632,63 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) return err; } +static long +hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct hpet_info info; + int err; + + mutex_lock(&hpet_mutex); + err = hpet_ioctl_common(file->private_data, cmd, arg, &info); + mutex_unlock(&hpet_mutex); + + if ((cmd == HPET_INFO) && !err && + (copy_to_user((void __user *)arg, &info, sizeof(info)))) + err = -EFAULT; + + return err; +} + +#ifdef CONFIG_COMPAT +struct compat_hpet_info { + compat_ulong_t hi_ireqfreq; /* Hz */ + compat_ulong_t hi_flags; /* information */ + unsigned short hi_hpet; + unsigned short hi_timer; +}; + +static long +hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct hpet_info info; + int err; + + mutex_lock(&hpet_mutex); + err = hpet_ioctl_common(file->private_data, cmd, arg, &info); + mutex_unlock(&hpet_mutex); + + if ((cmd == HPET_INFO) && !err) { + struct compat_hpet_info __user *u = compat_ptr(arg); + if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) || + put_user(info.hi_flags, &u->hi_flags) || + put_user(info.hi_hpet, &u->hi_hpet) || + put_user(info.hi_timer, &u->hi_timer)) + err = -EFAULT; + } + + return err; +} +#endif + static const struct file_operations hpet_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = hpet_read, .poll = hpet_poll, .unlocked_ioctl = hpet_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = hpet_compat_ioctl, +#endif .open = hpet_open, .release = hpet_release, .fasync = hpet_fasync, diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 3d9c61e5acb..788da05190c 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -170,6 +170,7 @@ static const struct file_operations rng_chrdev_ops = { .owner = THIS_MODULE, .open = rng_dev_open, .read = rng_dev_read, + .llseek = noop_llseek, }; static struct miscdevice rng_miscdev = { diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index 4cd8b227c11..3bc0eef8871 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c @@ -23,7 +23,7 @@ #include <linux/seq_file.h> #include <linux/dmi.h> #include <linux/capability.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -56,6 +56,7 @@ #define I8K_TEMPERATURE_BUG 1 +static DEFINE_MUTEX(i8k_mutex); static char bios_version[4]; MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)"); @@ -399,9 +400,9 @@ static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { long ret; - lock_kernel(); + mutex_lock(&i8k_mutex); ret = i8k_ioctl_unlocked(fp, cmd, arg); - unlock_kernel(); + mutex_unlock(&i8k_mutex); return ret; } diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index d4b71e8d0d2..64a439ce2f8 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c @@ -98,7 +98,7 @@ #include <linux/major.h> #include <linux/wait.h> #include <linux/device.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/firmware.h> #include <linux/platform_device.h> @@ -138,6 +138,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> +static DEFINE_MUTEX(ip2_mutex); static const struct file_operations ip2mem_proc_fops; static const struct file_operations ip2_proc_fops; @@ -236,6 +237,7 @@ static const struct file_operations ip2_ipl = { .write = ip2_ipl_write, .unlocked_ioctl = ip2_ipl_ioctl, .open = ip2_ipl_open, + .llseek = noop_llseek, }; static unsigned long irq_counter; @@ -2897,7 +2899,7 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg ) printk (KERN_DEBUG "IP2IPL: ioctl cmd %d, arg %ld\n", cmd, arg ); #endif - lock_kernel(); + mutex_lock(&ip2_mutex); switch ( iplminor ) { case 0: // IPL device @@ -2961,7 +2963,7 @@ ip2_ipl_ioctl (struct file *pFile, UINT cmd, ULONG arg ) rc = -ENODEV; break; } - unlock_kernel(); + mutex_unlock(&ip2_mutex); return rc; } @@ -2982,7 +2984,6 @@ ip2_ipl_open( struct inode *pInode, struct file *pFile ) #ifdef IP2DEBUG_IPL printk (KERN_DEBUG "IP2IPL: open\n" ); #endif - cycle_kernel_lock(); return 0; } diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index d8ec92a3898..1fc8876af1f 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -44,7 +44,6 @@ #include <linux/init.h> #include <linux/device.h> #include <linux/compat.h> -#include <linux/smp_lock.h> struct ipmi_file_private { @@ -59,6 +58,7 @@ struct ipmi_file_private unsigned int default_retry_time_ms; }; +static DEFINE_MUTEX(ipmi_mutex); static void file_receive_handler(struct ipmi_recv_msg *msg, void *handler_data) { @@ -102,9 +102,9 @@ static int ipmi_fasync(int fd, struct file *file, int on) struct ipmi_file_private *priv = file->private_data; int result; - lock_kernel(); /* could race against open() otherwise */ + mutex_lock(&ipmi_mutex); /* could race against open() otherwise */ result = fasync_helper(fd, file, on, &priv->fasync_queue); - unlock_kernel(); + mutex_unlock(&ipmi_mutex); return (result); } @@ -125,7 +125,7 @@ static int ipmi_open(struct inode *inode, struct file *file) if (!priv) return -ENOMEM; - lock_kernel(); + mutex_lock(&ipmi_mutex); priv->file = file; rv = ipmi_create_user(if_num, @@ -150,7 +150,7 @@ static int ipmi_open(struct inode *inode, struct file *file) priv->default_retry_time_ms = 0; out: - unlock_kernel(); + mutex_unlock(&ipmi_mutex); return rv; } @@ -639,9 +639,9 @@ static long ipmi_unlocked_ioctl(struct file *file, { int ret; - lock_kernel(); + mutex_lock(&ipmi_mutex); ret = ipmi_ioctl(file, cmd, data); - unlock_kernel(); + mutex_unlock(&ipmi_mutex); return ret; } @@ -850,6 +850,7 @@ static const struct file_operations ipmi_fops = { .release = ipmi_release, .fasync = ipmi_fasync, .poll = ipmi_poll, + .llseek = noop_llseek, }; #define DEVICE_NAME "ipmidev" diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 3822b4f49c8..7bd7c45b53e 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -305,6 +305,9 @@ static int num_force_kipmid; #ifdef CONFIG_PCI static int pci_registered; #endif +#ifdef CONFIG_ACPI +static int pnp_registered; +#endif #ifdef CONFIG_PPC_OF static int of_registered; #endif @@ -2126,7 +2129,7 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, { struct acpi_device *acpi_dev; struct smi_info *info; - struct resource *res; + struct resource *res, *res_second; acpi_handle handle; acpi_status status; unsigned long long tmp; @@ -2182,13 +2185,13 @@ static int __devinit ipmi_pnp_probe(struct pnp_dev *dev, info->io.addr_data = res->start; info->io.regspacing = DEFAULT_REGSPACING; - res = pnp_get_resource(dev, + res_second = pnp_get_resource(dev, (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? IORESOURCE_IO : IORESOURCE_MEM, 1); - if (res) { - if (res->start > info->io.addr_data) - info->io.regspacing = res->start - info->io.addr_data; + if (res_second) { + if (res_second->start > info->io.addr_data) + info->io.regspacing = res_second->start - info->io.addr_data; } info->io.regsize = DEFAULT_REGSPACING; info->io.regshift = 0; @@ -3359,6 +3362,7 @@ static __devinit int init_ipmi_si(void) #ifdef CONFIG_ACPI pnp_register_driver(&ipmi_pnp_driver); + pnp_registered = 1; #endif #ifdef CONFIG_DMI @@ -3526,7 +3530,8 @@ static __exit void cleanup_ipmi_si(void) pci_unregister_driver(&ipmi_pci_driver); #endif #ifdef CONFIG_ACPI - pnp_unregister_driver(&ipmi_pnp_driver); + if (pnp_registered) + pnp_unregister_driver(&ipmi_pnp_driver); #endif #ifdef CONFIG_PPC_OF diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 654d566ca57..f4d334f2536 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -35,7 +35,7 @@ #include <linux/moduleparam.h> #include <linux/ipmi.h> #include <linux/ipmi_smi.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/watchdog.h> #include <linux/miscdevice.h> #include <linux/init.h> @@ -149,6 +149,7 @@ #define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int) #endif +static DEFINE_MUTEX(ipmi_watchdog_mutex); static int nowayout = WATCHDOG_NOWAYOUT; static ipmi_user_t watchdog_user; @@ -748,9 +749,9 @@ static long ipmi_unlocked_ioctl(struct file *file, { int ret; - lock_kernel(); + mutex_lock(&ipmi_watchdog_mutex); ret = ipmi_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&ipmi_watchdog_mutex); return ret; } @@ -844,7 +845,6 @@ static int ipmi_open(struct inode *ino, struct file *filep) if (test_and_set_bit(0, &ipmi_wdog_open)) return -EBUSY; - cycle_kernel_lock(); /* * Don't start the timer now, let it start on the @@ -909,6 +909,7 @@ static const struct file_operations ipmi_wdog_fops = { .open = ipmi_open, .release = ipmi_close, .fasync = ipmi_fasync, + .llseek = no_llseek, }; static struct miscdevice ipmi_wdog_miscdev = { diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index be28391adb7..667abd23ad6 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c @@ -704,6 +704,7 @@ static const struct file_operations stli_fsiomem = { .read = stli_memread, .write = stli_memwrite, .unlocked_ioctl = stli_memioctl, + .llseek = default_llseek, }; /*****************************************************************************/ diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 938a3a27388..97c3edb95ae 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -126,7 +126,7 @@ #include <linux/device.h> #include <linux/wait.h> #include <linux/jiffies.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/compat.h> #include <linux/parport.h> @@ -140,6 +140,7 @@ /* if you have more than 8 printers, remember to increase LP_NO */ #define LP_NO 8 +static DEFINE_MUTEX(lp_mutex); static struct lp_struct lp_table[LP_NO]; static unsigned int lp_count = 0; @@ -493,7 +494,7 @@ static int lp_open(struct inode * inode, struct file * file) unsigned int minor = iminor(inode); int ret = 0; - lock_kernel(); + mutex_lock(&lp_mutex); if (minor >= LP_NO) { ret = -ENXIO; goto out; @@ -554,7 +555,7 @@ static int lp_open(struct inode * inode, struct file * file) lp_release_parport (&lp_table[minor]); lp_table[minor].current_mode = IEEE1284_MODE_COMPAT; out: - unlock_kernel(); + mutex_unlock(&lp_mutex); return ret; } @@ -680,7 +681,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd, int ret; minor = iminor(file->f_path.dentry->d_inode); - lock_kernel(); + mutex_lock(&lp_mutex); switch (cmd) { case LPSETTIMEOUT: if (copy_from_user(&par_timeout, (void __user *)arg, @@ -694,7 +695,7 @@ static long lp_ioctl(struct file *file, unsigned int cmd, ret = lp_do_ioctl(minor, cmd, arg, (void __user *)arg); break; } - unlock_kernel(); + mutex_unlock(&lp_mutex); return ret; } @@ -709,7 +710,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd, int ret; minor = iminor(file->f_path.dentry->d_inode); - lock_kernel(); + mutex_lock(&lp_mutex); switch (cmd) { case LPSETTIMEOUT: tc = compat_ptr(arg); @@ -730,7 +731,7 @@ static long lp_compat_ioctl(struct file *file, unsigned int cmd, ret = lp_do_ioctl(minor, cmd, arg, compat_ptr(arg)); break; } - unlock_kernel(); + mutex_unlock(&lp_mutex); return ret; } @@ -748,6 +749,7 @@ static const struct file_operations lp_fops = { #ifdef CONFIG_PARPORT_1284 .read = lp_read, #endif + .llseek = noop_llseek, }; /* --- support for console on the line printer ----------------- */ diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c index 83bef4efe37..1aeaaba680d 100644 --- a/drivers/char/mbcs.c +++ b/drivers/char/mbcs.c @@ -25,7 +25,6 @@ #include <linux/mm.h> #include <linux/uio.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/uaccess.h> @@ -42,6 +41,7 @@ #else #define DBG(fmt...) #endif +static DEFINE_MUTEX(mbcs_mutex); static int mbcs_major; static LIST_HEAD(soft_list); @@ -385,19 +385,19 @@ static int mbcs_open(struct inode *ip, struct file *fp) struct mbcs_soft *soft; int minor; - lock_kernel(); + mutex_lock(&mbcs_mutex); minor = iminor(ip); /* Nothing protects access to this list... */ list_for_each_entry(soft, &soft_list, list) { if (soft->nasid == minor) { fp->private_data = soft->cxdev; - unlock_kernel(); + mutex_unlock(&mbcs_mutex); return 0; } } - unlock_kernel(); + mutex_unlock(&mbcs_mutex); return -ENODEV; } diff --git a/drivers/char/mem.c b/drivers/char/mem.c index a398ecdbd75..e985b1c2730 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -788,10 +788,11 @@ static const struct file_operations zero_fops = { /* * capabilities for /dev/zero * - permits private mappings, "copies" are taken of the source of zeros + * - no writeback happens */ static struct backing_dev_info zero_bdi = { .name = "char/mem", - .capabilities = BDI_CAP_MAP_COPY, + .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK, }; static const struct file_operations full_fops = { @@ -804,6 +805,7 @@ static const struct file_operations full_fops = { static const struct file_operations oldmem_fops = { .read = read_oldmem, .open = open_oldmem, + .llseek = default_llseek, }; #endif @@ -830,6 +832,7 @@ static ssize_t kmsg_write(struct file *file, const char __user *buf, static const struct file_operations kmsg_fops = { .write = kmsg_write, + .llseek = noop_llseek, }; static const struct memdev { @@ -881,6 +884,7 @@ static int memory_open(struct inode *inode, struct file *filp) static const struct file_operations memory_fops = { .open = memory_open, + .llseek = noop_llseek, }; static char *mem_devnode(struct device *dev, mode_t *mode) diff --git a/drivers/char/misc.c b/drivers/char/misc.c index abdafd48898..778273c9324 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c @@ -162,6 +162,7 @@ static struct class *misc_class; static const struct file_operations misc_fops = { .owner = THIS_MODULE, .open = misc_open, + .llseek = noop_llseek, }; /** diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c index ea7c99fa978..c070b53984e 100644 --- a/drivers/char/mmtimer.c +++ b/drivers/char/mmtimer.c @@ -32,7 +32,7 @@ #include <linux/interrupt.h> #include <linux/time.h> #include <linux/math64.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/slab.h> #include <asm/uaccess.h> @@ -59,6 +59,7 @@ extern unsigned long sn_rtc_cycles_per_second; #define rtc_time() (*RTC_COUNTER_ADDR) +static DEFINE_MUTEX(mmtimer_mutex); static long mmtimer_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma); @@ -72,6 +73,7 @@ static const struct file_operations mmtimer_fops = { .owner = THIS_MODULE, .mmap = mmtimer_mmap, .unlocked_ioctl = mmtimer_ioctl, + .llseek = noop_llseek, }; /* @@ -371,7 +373,7 @@ static long mmtimer_ioctl(struct file *file, unsigned int cmd, { int ret = 0; - lock_kernel(); + mutex_lock(&mmtimer_mutex); switch (cmd) { case MMTIMER_GETOFFSET: /* offset of the counter */ @@ -414,7 +416,7 @@ static long mmtimer_ioctl(struct file *file, unsigned int cmd, ret = -ENOTTY; break; } - unlock_kernel(); + mutex_unlock(&mmtimer_mutex); return ret; } diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index ecb89d798e3..966a95bc974 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c @@ -316,7 +316,8 @@ uncached_mmap(struct file *file, struct vm_area_struct *vma) static const struct file_operations fetchop_fops = { .owner = THIS_MODULE, - .mmap = fetchop_mmap + .mmap = fetchop_mmap, + .llseek = noop_llseek, }; static struct miscdevice fetchop_miscdev = { @@ -327,7 +328,8 @@ static struct miscdevice fetchop_miscdev = { static const struct file_operations cached_fops = { .owner = THIS_MODULE, - .mmap = cached_mmap + .mmap = cached_mmap, + .llseek = noop_llseek, }; static struct miscdevice cached_miscdev = { @@ -338,7 +340,8 @@ static struct miscdevice cached_miscdev = { static const struct file_operations uncached_fops = { .owner = THIS_MODULE, - .mmap = uncached_mmap + .mmap = uncached_mmap, + .llseek = noop_llseek, }; static struct miscdevice uncached_miscdev = { diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c index a4ec50c9507..1d82d5838f0 100644 --- a/drivers/char/mwave/mwavedd.c +++ b/drivers/char/mwave/mwavedd.c @@ -56,7 +56,7 @@ #include <linux/serial.h> #include <linux/sched.h> #include <linux/spinlock.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/delay.h> #include <linux/serial_8250.h> #include "smapi.h" @@ -73,6 +73,7 @@ MODULE_LICENSE("GPL"); * checks are made against other devices (ie. superio) for conflicts. * We'll depend on users using the tpctl utility to do that for now */ +static DEFINE_MUTEX(mwave_mutex); int mwave_debug = 0; int mwave_3780i_irq = 0; int mwave_3780i_io = 0; @@ -101,7 +102,6 @@ static int mwave_open(struct inode *inode, struct file *file) PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_open, exit return retval %x\n", retval); - cycle_kernel_lock(); return retval; } @@ -136,9 +136,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RESET" " calling tp3780I_ResetDSP\n"); - lock_kernel(); + mutex_lock(&mwave_mutex); retval = tp3780I_ResetDSP(&pDrvData->rBDData); - unlock_kernel(); + mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RESET" " retval %x from tp3780I_ResetDSP\n", @@ -149,9 +149,9 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, PRINTK_1(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RUN" " calling tp3780I_StartDSP\n"); - lock_kernel(); + mutex_lock(&mwave_mutex); retval = tp3780I_StartDSP(&pDrvData->rBDData); - unlock_kernel(); + mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_RUN" " retval %x from tp3780I_StartDSP\n", @@ -165,10 +165,10 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, "mwavedd::mwave_ioctl," " IOCTL_MW_DSP_ABILITIES calling" " tp3780I_QueryAbilities\n"); - lock_kernel(); + mutex_lock(&mwave_mutex); retval = tp3780I_QueryAbilities(&pDrvData->rBDData, &rAbilities); - unlock_kernel(); + mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl, IOCTL_MW_DSP_ABILITIES" " retval %x from tp3780I_QueryAbilities\n", @@ -199,13 +199,13 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, "mwavedd::mwave_ioctl IOCTL_MW_READ_DATA," " size %lx, ioarg %lx pusBuffer %p\n", rReadData.ulDataLength, ioarg, pusBuffer); - lock_kernel(); + mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rReadData.ulDataLength, rReadData.usDspAddress); - unlock_kernel(); + mutex_unlock(&mwave_mutex); } break; @@ -223,12 +223,12 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, " size %lx, ioarg %lx pusBuffer %p\n", rReadData.ulDataLength / 2, ioarg, pusBuffer); - lock_kernel(); + mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rReadData.ulDataLength / 2, rReadData.usDspAddress); - unlock_kernel(); + mutex_unlock(&mwave_mutex); } break; @@ -246,12 +246,12 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, " size %lx, ioarg %lx pusBuffer %p\n", rWriteData.ulDataLength, ioarg, pusBuffer); - lock_kernel(); + mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspDStore(&pDrvData->rBDData, iocmd, pusBuffer, rWriteData.ulDataLength, rWriteData.usDspAddress); - unlock_kernel(); + mutex_unlock(&mwave_mutex); } break; @@ -269,12 +269,12 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, " size %lx, ioarg %lx pusBuffer %p\n", rWriteData.ulDataLength, ioarg, pusBuffer); - lock_kernel(); + mutex_lock(&mwave_mutex); retval = tp3780I_ReadWriteDspIStore(&pDrvData->rBDData, iocmd, pusBuffer, rWriteData.ulDataLength, rWriteData.usDspAddress); - unlock_kernel(); + mutex_unlock(&mwave_mutex); } break; @@ -295,10 +295,10 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum, pDrvData->IPCs[ipcnum].usIntCount); - lock_kernel(); + mutex_lock(&mwave_mutex); pDrvData->IPCs[ipcnum].bIsHere = FALSE; pDrvData->IPCs[ipcnum].bIsEnabled = TRUE; - unlock_kernel(); + mutex_unlock(&mwave_mutex); PRINTK_2(TRACE_MWAVE, "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC" @@ -323,7 +323,7 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum, pDrvData->IPCs[ipcnum].usIntCount); - lock_kernel(); + mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { DECLARE_WAITQUEUE(wait, current); @@ -364,7 +364,7 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, " processing\n", ipcnum); } - unlock_kernel(); + mutex_unlock(&mwave_mutex); } break; @@ -383,14 +383,14 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd, ipcnum); return -EINVAL; } - lock_kernel(); + mutex_lock(&mwave_mutex); if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) { pDrvData->IPCs[ipcnum].bIsEnabled = FALSE; if (pDrvData->IPCs[ipcnum].bIsHere == TRUE) { wake_up_interruptible(&pDrvData->IPCs[ipcnum].ipc_wait_queue); } } - unlock_kernel(); + mutex_unlock(&mwave_mutex); } break; @@ -479,7 +479,8 @@ static const struct file_operations mwave_fops = { .write = mwave_write, .unlocked_ioctl = mwave_ioctl, .open = mwave_open, - .release = mwave_close + .release = mwave_close, + .llseek = default_llseek, }; diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 66d2917b003..166f1e7aaa7 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -109,10 +109,11 @@ #include <linux/spinlock.h> #include <linux/io.h> #include <linux/uaccess.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/system.h> +static DEFINE_MUTEX(nvram_mutex); static DEFINE_SPINLOCK(nvram_state_lock); static int nvram_open_cnt; /* #times opened */ static int nvram_open_mode; /* special open modes */ @@ -308,7 +309,7 @@ static long nvram_ioctl(struct file *file, unsigned int cmd, if (!capable(CAP_SYS_ADMIN)) return -EACCES; - lock_kernel(); + mutex_lock(&nvram_mutex); spin_lock_irq(&rtc_lock); for (i = 0; i < NVRAM_BYTES; ++i) @@ -316,7 +317,7 @@ static long nvram_ioctl(struct file *file, unsigned int cmd, __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); - unlock_kernel(); + mutex_unlock(&nvram_mutex); return 0; case NVRAM_SETCKS: @@ -325,11 +326,11 @@ static long nvram_ioctl(struct file *file, unsigned int cmd, if (!capable(CAP_SYS_ADMIN)) return -EACCES; - lock_kernel(); + mutex_lock(&nvram_mutex); spin_lock_irq(&rtc_lock); __nvram_set_checksum(); spin_unlock_irq(&rtc_lock); - unlock_kernel(); + mutex_unlock(&nvram_mutex); return 0; default: diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index 2604246501e..8994ce32e6c 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c @@ -182,6 +182,7 @@ static int button_read (struct file *filp, char __user *buffer, static const struct file_operations button_fops = { .owner = THIS_MODULE, .read = button_read, + .llseek = noop_llseek, }; /* diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c index 043a1c7b86b..a12f52400db 100644 --- a/drivers/char/nwflash.c +++ b/drivers/char/nwflash.c @@ -25,7 +25,6 @@ #include <linux/spinlock.h> #include <linux/rwsem.h> #include <linux/init.h> -#include <linux/smp_lock.h> #include <linux/mutex.h> #include <linux/jiffies.h> @@ -41,6 +40,7 @@ #define NWFLASH_VERSION "6.4" +static DEFINE_MUTEX(flash_mutex); static void kick_open(void); static int get_flash_id(void); static int erase_block(int nBlock); @@ -96,7 +96,7 @@ static int get_flash_id(void) static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { - lock_kernel(); + mutex_lock(&flash_mutex); switch (cmd) { case CMD_WRITE_DISABLE: gbWriteBase64Enable = 0; @@ -114,10 +114,10 @@ static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) default: gbWriteBase64Enable = 0; gbWriteEnable = 0; - unlock_kernel(); + mutex_unlock(&flash_mutex); return -EINVAL; } - unlock_kernel(); + mutex_unlock(&flash_mutex); return 0; } @@ -282,7 +282,7 @@ static loff_t flash_llseek(struct file *file, loff_t offset, int orig) { loff_t ret; - lock_kernel(); + mutex_lock(&flash_mutex); if (flashdebug) printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n", (unsigned int) offset, orig); @@ -317,7 +317,7 @@ static loff_t flash_llseek(struct file *file, loff_t offset, int orig) default: ret = -EINVAL; } - unlock_kernel(); + mutex_unlock(&flash_mutex); return ret; } diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c index 8ecbcc174c1..b304ec05250 100644 --- a/drivers/char/pc8736x_gpio.c +++ b/drivers/char/pc8736x_gpio.c @@ -234,6 +234,7 @@ static const struct file_operations pc8736x_gpio_fileops = { .open = pc8736x_gpio_open, .write = nsc_gpio_write, .read = nsc_gpio_read, + .llseek = no_llseek, }; static void __init pc8736x_init_shadow(void) diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index ec73d9f6d9e..6835c23e9a5 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -30,11 +30,10 @@ #include <linux/fs.h> #include <linux/delay.h> #include <linux/bitrev.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/io.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> @@ -55,7 +54,7 @@ __func__ , ## args); \ } while (0) -static char *version = "cm4000_cs.c v2.4.0gm6 - All bugs added by Harald Welte"; +static DEFINE_MUTEX(cmm_mutex); #define T_1SEC (HZ) #define T_10MSEC msecs_to_jiffies(10) @@ -1418,7 +1417,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) iminor(inode), ioctl_names[_IOC_NR(cmd)]); #endif - lock_kernel(); + mutex_lock(&cmm_mutex); rc = -ENODEV; link = dev_table[iminor(inode)]; if (!pcmcia_dev_present(link)) { @@ -1626,7 +1625,7 @@ static long cmm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) rc = -ENOTTY; } out: - unlock_kernel(); + mutex_unlock(&cmm_mutex); return rc; } @@ -1640,7 +1639,7 @@ static int cmm_open(struct inode *inode, struct file *filp) if (minor >= CM4000_MAX_DEV) return -ENODEV; - lock_kernel(); + mutex_lock(&cmm_mutex); link = dev_table[minor]; if (link == NULL || !pcmcia_dev_present(link)) { ret = -ENODEV; @@ -1685,7 +1684,7 @@ static int cmm_open(struct inode *inode, struct file *filp) DEBUGP(2, dev, "<- cmm_open\n"); ret = nonseekable_open(inode, filp); out: - unlock_kernel(); + mutex_unlock(&cmm_mutex); return ret; } @@ -1742,20 +1741,8 @@ static void cmm_cm4000_release(struct pcmcia_device * link) /*==== Interface to PCMCIA Layer =======================================*/ -static int cm4000_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int cm4000_config_check(struct pcmcia_device *p_dev, void *priv_data) { - if (!cfg->io.nwin) - return -ENODEV; - - p_dev->resource[0]->start = cfg->io.win[0].base; - p_dev->resource[0]->end = cfg->io.win[0].len; - p_dev->resource[0]->flags |= pcmcia_io_cfg_data_width(cfg->io.flags); - p_dev->io_lines = cfg->io.flags & CISTPL_IO_LINES_MASK; - return pcmcia_request_io(p_dev); } @@ -1763,13 +1750,13 @@ static int cm4000_config(struct pcmcia_device * link, int devno) { struct cm4000_dev *dev; + link->config_flags |= CONF_AUTO_SET_IO; + /* read the config-tuples */ if (pcmcia_loop_config(link, cm4000_config_check, NULL)) goto cs_release; - link->conf.IntType = 00000002; - - if (pcmcia_request_configuration(link, &link->conf)) + if (pcmcia_enable_device(link)) goto cs_release; dev = link->priv; @@ -1829,7 +1816,6 @@ static int cm4000_probe(struct pcmcia_device *link) dev->p_dev = link; link->priv = dev; - link->conf.IntType = INT_MEMORY_AND_IO; dev_table[i] = link; init_waitqueue_head(&dev->devq); @@ -1880,6 +1866,7 @@ static const struct file_operations cm4000_fops = { .unlocked_ioctl = cmm_ioctl, .open = cmm_open, .release= cmm_close, + .llseek = no_llseek, }; static struct pcmcia_device_id cm4000_ids[] = { @@ -1891,9 +1878,7 @@ MODULE_DEVICE_TABLE(pcmcia, cm4000_ids); static struct pcmcia_driver cm4000_driver = { .owner = THIS_MODULE, - .drv = { - .name = "cm4000_cs", - }, + .name = "cm4000_cs", .probe = cm4000_probe, .remove = cm4000_detach, .suspend = cm4000_suspend, @@ -1905,8 +1890,6 @@ static int __init cmm_init(void) { int rc; - printk(KERN_INFO "%s\n", version); - cmm_class = class_create(THIS_MODULE, "cardman_4000"); if (IS_ERR(cmm_class)) return PTR_ERR(cmm_class); @@ -1931,7 +1914,6 @@ static int __init cmm_init(void) static void __exit cmm_exit(void) { - printk(KERN_INFO MODULE_NAME ": unloading\n"); pcmcia_unregister_driver(&cm4000_driver); unregister_chrdev(major, DEVICE_NAME); class_destroy(cmm_class); diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index 815cde1d057..5d8d59e865f 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c @@ -24,12 +24,11 @@ #include <linux/fs.h> #include <linux/delay.h> #include <linux/poll.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/wait.h> #include <asm/uaccess.h> #include <asm/io.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> @@ -49,8 +48,7 @@ __func__ , ## args); \ } while (0) -static char *version = -"OMNIKEY CardMan 4040 v1.1.0gm5 - All bugs added by Harald Welte"; +static DEFINE_MUTEX(cm4040_mutex); #define CCID_DRIVER_BULK_DEFAULT_TIMEOUT (150*HZ) #define CCID_DRIVER_ASYNC_POWERUP_TIMEOUT (35*HZ) @@ -444,7 +442,7 @@ static int cm4040_open(struct inode *inode, struct file *filp) if (minor >= CM_MAX_DEV) return -ENODEV; - lock_kernel(); + mutex_lock(&cm4040_mutex); link = dev_table[minor]; if (link == NULL || !pcmcia_dev_present(link)) { ret = -ENODEV; @@ -473,7 +471,7 @@ static int cm4040_open(struct inode *inode, struct file *filp) DEBUGP(2, dev, "<- cm4040_open (successfully)\n"); ret = nonseekable_open(inode, filp); out: - unlock_kernel(); + mutex_unlock(&cm4040_mutex); return ret; } @@ -516,26 +514,9 @@ static void cm4040_reader_release(struct pcmcia_device *link) return; } -static int cm4040_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int cm4040_config_check(struct pcmcia_device *p_dev, void *priv_data) { - int rc; - if (!cfg->io.nwin) - return -ENODEV; - - /* Get the IOaddr */ - p_dev->resource[0]->start = cfg->io.win[0].base; - p_dev->resource[0]->end = cfg->io.win[0].len; - p_dev->resource[0]->flags |= pcmcia_io_cfg_data_width(cfg->io.flags); - p_dev->io_lines = cfg->io.flags & CISTPL_IO_LINES_MASK; - rc = pcmcia_request_io(p_dev); - - dev_printk(KERN_INFO, &p_dev->dev, - "pcmcia_request_io returned 0x%x\n", rc); - return rc; + return pcmcia_request_io(p_dev); } @@ -544,15 +525,15 @@ static int reader_config(struct pcmcia_device *link, int devno) struct reader_dev *dev; int fail_rc; + link->config_flags |= CONF_AUTO_SET_IO; + if (pcmcia_loop_config(link, cm4040_config_check, NULL)) goto cs_release; - link->conf.IntType = 00000002; - - fail_rc = pcmcia_request_configuration(link, &link->conf); + fail_rc = pcmcia_enable_device(link); if (fail_rc != 0) { dev_printk(KERN_INFO, &link->dev, - "pcmcia_request_configuration failed 0x%x\n", + "pcmcia_enable_device failed 0x%x\n", fail_rc); goto cs_release; } @@ -599,7 +580,6 @@ static int reader_probe(struct pcmcia_device *link) link->priv = dev; dev->p_dev = link; - link->conf.IntType = INT_MEMORY_AND_IO; dev_table[i] = link; init_waitqueue_head(&dev->devq); @@ -650,6 +630,7 @@ static const struct file_operations reader_fops = { .open = cm4040_open, .release = cm4040_close, .poll = cm4040_poll, + .llseek = no_llseek, }; static struct pcmcia_device_id cm4040_ids[] = { @@ -662,9 +643,7 @@ MODULE_DEVICE_TABLE(pcmcia, cm4040_ids); static struct pcmcia_driver reader_driver = { .owner = THIS_MODULE, - .drv = { - .name = "cm4040_cs", - }, + .name = "cm4040_cs", .probe = reader_probe, .remove = reader_detach, .id_table = cm4040_ids, @@ -674,7 +653,6 @@ static int __init cm4040_init(void) { int rc; - printk(KERN_INFO "%s\n", version); cmx_class = class_create(THIS_MODULE, "cardman_4040"); if (IS_ERR(cmx_class)) return PTR_ERR(cmx_class); @@ -699,7 +677,6 @@ static int __init cm4040_init(void) static void __exit cm4040_exit(void) { - printk(KERN_INFO MODULE_NAME ": unloading\n"); pcmcia_unregister_driver(&reader_driver); unregister_chrdev(major, DEVICE_NAME); class_destroy(cmx_class); diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c index 67bdb05798b..94b8eb4d691 100644 --- a/drivers/char/pcmcia/ipwireless/main.c +++ b/drivers/char/pcmcia/ipwireless/main.c @@ -32,7 +32,6 @@ #include <pcmcia/device_id.h> #include <pcmcia/ss.h> #include <pcmcia/ds.h> -#include <pcmcia/cs.h> static struct pcmcia_device_id ipw_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x02f2, 0x0100), @@ -76,23 +75,18 @@ static void signalled_reboot_callback(void *callback_data) schedule_work(&ipw->work_reboot); } -static int ipwireless_probe(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) { struct ipw_dev *ipw = priv_data; struct resource *io_resource; int ret; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - p_dev->resource[0]->start = cfg->io.win[0].base; - p_dev->resource[0]->end = cfg->io.win[0].len; /* 0x40 causes it to generate level mode interrupts. */ /* 0x04 enables IREQ pin. */ - p_dev->conf.ConfigIndex = cfg->index | 0x44; + p_dev->config_index |= 0x44; p_dev->io_lines = 16; ret = pcmcia_request_io(p_dev); if (ret) @@ -102,65 +96,49 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, resource_size(p_dev->resource[0]), IPWIRELESS_PCCARD_NAME); - if (cfg->mem.nwin == 0) - return 0; - - ipw->request_common_memory.Attributes = + p_dev->resource[2]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; - ipw->request_common_memory.Base = cfg->mem.win[0].host_addr; - ipw->request_common_memory.Size = cfg->mem.win[0].len; - if (ipw->request_common_memory.Size < 0x1000) - ipw->request_common_memory.Size = 0x1000; - ipw->request_common_memory.AccessSpeed = 0; - - ret = pcmcia_request_window(p_dev, &ipw->request_common_memory, - &ipw->handle_common_memory); + ret = pcmcia_request_window(p_dev, p_dev->resource[2], 0); if (ret != 0) goto exit1; - ret = pcmcia_map_mem_page(p_dev, ipw->handle_common_memory, - cfg->mem.win[0].card_addr); - + ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); if (ret != 0) goto exit2; - ipw->is_v2_card = cfg->mem.win[0].len == 0x100; + ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; - ipw->common_memory = ioremap(ipw->request_common_memory.Base, - ipw->request_common_memory.Size); - request_mem_region(ipw->request_common_memory.Base, - ipw->request_common_memory.Size, + ipw->attr_memory = ioremap(p_dev->resource[2]->start, + resource_size(p_dev->resource[2])); + request_mem_region(p_dev->resource[2]->start, + resource_size(p_dev->resource[2]), IPWIRELESS_PCCARD_NAME); - ipw->request_attr_memory.Attributes = - WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | WIN_ENABLE; - ipw->request_attr_memory.Base = 0; - ipw->request_attr_memory.Size = 0; /* this used to be 0x1000 */ - ipw->request_attr_memory.AccessSpeed = 0; - - ret = pcmcia_request_window(p_dev, &ipw->request_attr_memory, - &ipw->handle_attr_memory); - + p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | + WIN_ENABLE; + p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ + ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); if (ret != 0) goto exit2; - ret = pcmcia_map_mem_page(p_dev, ipw->handle_attr_memory, 0); + ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); if (ret != 0) goto exit3; - ipw->attr_memory = ioremap(ipw->request_attr_memory.Base, - ipw->request_attr_memory.Size); - request_mem_region(ipw->request_attr_memory.Base, - ipw->request_attr_memory.Size, IPWIRELESS_PCCARD_NAME); + ipw->attr_memory = ioremap(p_dev->resource[3]->start, + resource_size(p_dev->resource[3])); + request_mem_region(p_dev->resource[3]->start, + resource_size(p_dev->resource[3]), + IPWIRELESS_PCCARD_NAME); return 0; exit3: exit2: if (ipw->common_memory) { - release_mem_region(ipw->request_common_memory.Base, - ipw->request_common_memory.Size); + release_mem_region(p_dev->resource[2]->start, + resource_size(p_dev->resource[2])); iounmap(ipw->common_memory); } exit1: @@ -175,14 +153,13 @@ static int config_ipwireless(struct ipw_dev *ipw) int ret = 0; ipw->is_v2_card = 0; + link->config_flags |= CONF_AUTO_SET_IO | CONF_AUTO_SET_IOMEM | + CONF_ENABLE_IRQ; ret = pcmcia_loop_config(link, ipwireless_probe, ipw); if (ret != 0) return ret; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - INIT_WORK(&ipw->work_reboot, signalled_reboot_work); ipwireless_init_hardware_v1(ipw->hardware, link->resource[0]->start, @@ -201,13 +178,9 @@ static int config_ipwireless(struct ipw_dev *ipw) (unsigned int) link->irq); if (ipw->attr_memory && ipw->common_memory) printk(KERN_INFO IPWIRELESS_PCCARD_NAME - ": attr memory 0x%08lx-0x%08lx, common memory 0x%08lx-0x%08lx\n", - ipw->request_attr_memory.Base, - ipw->request_attr_memory.Base - + ipw->request_attr_memory.Size - 1, - ipw->request_common_memory.Base, - ipw->request_common_memory.Base - + ipw->request_common_memory.Size - 1); + ": attr memory %pR, common memory %pR\n", + link->resource[3], + link->resource[2]); ipw->network = ipwireless_network_create(ipw->hardware); if (!ipw->network) @@ -223,25 +196,23 @@ static int config_ipwireless(struct ipw_dev *ipw) * Do the RequestConfiguration last, because it enables interrupts. * Then we don't get any interrupts before we're ready for them. */ - ret = pcmcia_request_configuration(link, &link->conf); - + ret = pcmcia_enable_device(link); if (ret != 0) goto exit; return 0; exit: - if (ipw->attr_memory) { - release_mem_region(ipw->request_attr_memory.Base, - ipw->request_attr_memory.Size); - iounmap(ipw->attr_memory); - - } if (ipw->common_memory) { - release_mem_region(ipw->request_common_memory.Base, - ipw->request_common_memory.Size); + release_mem_region(link->resource[2]->start, + resource_size(link->resource[2])); iounmap(ipw->common_memory); } + if (ipw->attr_memory) { + release_mem_region(link->resource[3]->start, + resource_size(link->resource[3])); + iounmap(ipw->attr_memory); + } pcmcia_disable_device(link); return -1; } @@ -249,13 +220,13 @@ exit: static void release_ipwireless(struct ipw_dev *ipw) { if (ipw->common_memory) { - release_mem_region(ipw->request_common_memory.Base, - ipw->request_common_memory.Size); + release_mem_region(ipw->link->resource[2]->start, + resource_size(ipw->link->resource[2])); iounmap(ipw->common_memory); } if (ipw->attr_memory) { - release_mem_region(ipw->request_attr_memory.Base, - ipw->request_attr_memory.Size); + release_mem_region(ipw->link->resource[3]->start, + resource_size(ipw->link->resource[3])); iounmap(ipw->attr_memory); } pcmcia_disable_device(ipw->link); @@ -324,7 +295,7 @@ static struct pcmcia_driver me = { .owner = THIS_MODULE, .probe = ipwireless_attach, .remove = ipwireless_detach, - .drv = { .name = IPWIRELESS_PCCARD_NAME }, + .name = IPWIRELESS_PCCARD_NAME, .id_table = ipw_ids }; @@ -336,9 +307,6 @@ static int __init init_ipwireless(void) { int ret; - printk(KERN_INFO IPWIRELESS_PCCARD_NAME " " - IPWIRELESS_PCMCIA_VERSION " by " IPWIRELESS_PCMCIA_AUTHOR "\n"); - ret = ipwireless_tty_init(); if (ret != 0) return ret; @@ -355,9 +323,6 @@ static int __init init_ipwireless(void) */ static void __exit exit_ipwireless(void) { - printk(KERN_INFO IPWIRELESS_PCCARD_NAME " " - IPWIRELESS_PCMCIA_VERSION " removed\n"); - pcmcia_unregister_driver(&me); ipwireless_tty_release(); } diff --git a/drivers/char/pcmcia/ipwireless/main.h b/drivers/char/pcmcia/ipwireless/main.h index c207be87b59..f2cbb116bcc 100644 --- a/drivers/char/pcmcia/ipwireless/main.h +++ b/drivers/char/pcmcia/ipwireless/main.h @@ -21,7 +21,6 @@ #include <linux/sched.h> #include <linux/types.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -45,13 +44,9 @@ struct ipw_dev { struct pcmcia_device *link; int is_v2_card; - window_handle_t handle_attr_memory; void __iomem *attr_memory; - win_req_t request_attr_memory; - window_handle_t handle_common_memory; void __iomem *common_memory; - win_req_t request_common_memory; /* Reference to attribute memory, containing CIS data */ void *attribute_memory; diff --git a/drivers/char/pcmcia/ipwireless/tty.h b/drivers/char/pcmcia/ipwireless/tty.h index 3e163d4cab1..747b2d63786 100644 --- a/drivers/char/pcmcia/ipwireless/tty.h +++ b/drivers/char/pcmcia/ipwireless/tty.h @@ -21,7 +21,6 @@ #include <linux/types.h> #include <linux/sched.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 9ecd6bef5d3..be181005760 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -70,7 +70,6 @@ #include <linux/workqueue.h> #include <linux/hdlc.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -550,9 +549,6 @@ static int mgslpc_probe(struct pcmcia_device *link) /* Initialize the struct pcmcia_device structure */ - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - ret = mgslpc_config(link); if (ret) return ret; @@ -565,20 +561,8 @@ static int mgslpc_probe(struct pcmcia_device *link) /* Card has been inserted. */ -static int mgslpc_ioprobe(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data) { - if (!cfg->io.nwin) - return -ENODEV; - - p_dev->resource[0]->start = cfg->io.win[0].base; - p_dev->resource[0]->end = cfg->io.win[0].len; - p_dev->resource[0]->flags |= pcmcia_io_cfg_data_width(cfg->io.flags); - p_dev->io_lines = cfg->io.flags & CISTPL_IO_LINES_MASK; - return pcmcia_request_io(p_dev); } @@ -590,32 +574,24 @@ static int mgslpc_config(struct pcmcia_device *link) if (debug_level >= DEBUG_LEVEL_INFO) printk("mgslpc_config(0x%p)\n", link); + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL); if (ret != 0) goto failed; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - link->conf.ConfigIndex = 8; - link->conf.Present = PRESENT_OPTION; + link->config_index = 8; + link->config_regs = PRESENT_OPTION; ret = pcmcia_request_irq(link, mgslpc_isr); if (ret) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; info->io_base = link->resource[0]->start; info->irq_level = link->irq; - - dev_info(&link->dev, "index 0x%02x:", - link->conf.ConfigIndex); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %d", link->irq); - if (link->resource[0]) - printk(", io %pR", link->resource[0]); - printk("\n"); return 0; failed: @@ -2797,9 +2773,7 @@ MODULE_DEVICE_TABLE(pcmcia, mgslpc_ids); static struct pcmcia_driver mgslpc_driver = { .owner = THIS_MODULE, - .drv = { - .name = "synclink_cs", - }, + .name = "synclink_cs", .probe = mgslpc_probe, .remove = mgslpc_detach, .id_table = mgslpc_ids, @@ -2835,8 +2809,6 @@ static void synclink_cs_cleanup(void) { int rc; - printk("Unloading %s: version %s\n", driver_name, driver_version); - while(mgslpc_device_list) mgslpc_remove_device(mgslpc_device_list); @@ -2859,8 +2831,6 @@ static int __init synclink_cs_init(void) BREAKPOINT(); } - printk("%s %s\n", driver_name, driver_version); - if ((rc = pcmcia_register_driver(&mgslpc_driver)) < 0) return rc; @@ -4127,6 +4097,8 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); + memset(&new_line, 0, size); + switch(ifr->ifr_settings.type) { case IF_GET_IFACE: /* return current sync_serial_settings */ diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 02abfddce45..723152d978a 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -67,7 +67,7 @@ #include <linux/slab.h> #include <linux/major.h> #include <linux/ppdev.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/uaccess.h> #define PP_VERSION "ppdev: user-space parallel port driver" @@ -97,6 +97,7 @@ struct pp_struct { /* ROUND_UP macro from fs/select.c */ #define ROUND_UP(x,y) (((x)+(y)-1)/(y)) +static DEFINE_MUTEX(pp_do_mutex); static inline void pp_enable_irq (struct pp_struct *pp) { struct parport *port = pp->pdev->port; @@ -630,9 +631,9 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static long pp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; - lock_kernel(); + mutex_lock(&pp_do_mutex); ret = pp_do_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&pp_do_mutex); return ret; } @@ -641,7 +642,6 @@ static int pp_open (struct inode * inode, struct file * file) unsigned int minor = iminor(inode); struct pp_struct *pp; - cycle_kernel_lock(); if (minor >= PARPORT_MAX) return -ENXIO; diff --git a/drivers/char/random.c b/drivers/char/random.c index caef35a4689..5a1aa64f4e7 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1165,6 +1165,7 @@ const struct file_operations random_fops = { .poll = random_poll, .unlocked_ioctl = random_ioctl, .fasync = random_fasync, + .llseek = noop_llseek, }; const struct file_operations urandom_fops = { @@ -1172,6 +1173,7 @@ const struct file_operations urandom_fops = { .write = random_write, .unlocked_ioctl = random_ioctl, .fasync = random_fasync, + .llseek = noop_llseek, }; /*************************************************************** diff --git a/drivers/char/raw.c b/drivers/char/raw.c index b38942f6bf3..bfe25ea9766 100644 --- a/drivers/char/raw.c +++ b/drivers/char/raw.c @@ -19,8 +19,8 @@ #include <linux/cdev.h> #include <linux/device.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/gfp.h> +#include <linux/compat.h> #include <asm/uaccess.h> @@ -55,7 +55,6 @@ static int raw_open(struct inode *inode, struct file *filp) return 0; } - lock_kernel(); mutex_lock(&raw_mutex); /* @@ -82,7 +81,6 @@ static int raw_open(struct inode *inode, struct file *filp) bdev->bd_inode->i_mapping; filp->private_data = bdev; mutex_unlock(&raw_mutex); - unlock_kernel(); return 0; out2: @@ -91,7 +89,6 @@ out1: blkdev_put(bdev, filp->f_mode); out: mutex_unlock(&raw_mutex); - unlock_kernel(); return err; } @@ -125,20 +122,84 @@ static long raw_ioctl(struct file *filp, unsigned int command, unsigned long arg) { struct block_device *bdev = filp->private_data; - int ret; + return blkdev_ioctl(bdev, 0, command, arg); +} + +static int bind_set(int number, u64 major, u64 minor) +{ + dev_t dev = MKDEV(major, minor); + struct raw_device_data *rawdev; + int err = 0; - lock_kernel(); - ret = blkdev_ioctl(bdev, 0, command, arg); - unlock_kernel(); + if (number <= 0 || number >= MAX_RAW_MINORS) + return -EINVAL; - return ret; + if (MAJOR(dev) != major || MINOR(dev) != minor) + return -EINVAL; + + rawdev = &raw_devices[number]; + + /* + * This is like making block devices, so demand the + * same capability + */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + /* + * For now, we don't need to check that the underlying + * block device is present or not: we can do that when + * the raw device is opened. Just check that the + * major/minor numbers make sense. + */ + + if (MAJOR(dev) == 0 && dev != 0) + return -EINVAL; + + mutex_lock(&raw_mutex); + if (rawdev->inuse) { + mutex_unlock(&raw_mutex); + return -EBUSY; + } + if (rawdev->binding) { + bdput(rawdev->binding); + module_put(THIS_MODULE); + } + if (!dev) { + /* unbind */ + rawdev->binding = NULL; + device_destroy(raw_class, MKDEV(RAW_MAJOR, number)); + } else { + rawdev->binding = bdget(dev); + if (rawdev->binding == NULL) { + err = -ENOMEM; + } else { + dev_t raw = MKDEV(RAW_MAJOR, number); + __module_get(THIS_MODULE); + device_destroy(raw_class, raw); + device_create(raw_class, NULL, raw, NULL, + "raw%d", number); + } + } + mutex_unlock(&raw_mutex); + return err; } -static void bind_device(struct raw_config_request *rq) +static int bind_get(int number, dev_t *dev) { - device_destroy(raw_class, MKDEV(RAW_MAJOR, rq->raw_minor)); - device_create(raw_class, NULL, MKDEV(RAW_MAJOR, rq->raw_minor), NULL, - "raw%d", rq->raw_minor); + struct raw_device_data *rawdev; + struct block_device *bdev; + + if (number <= 0 || number >= MAX_RAW_MINORS) + return -EINVAL; + + rawdev = &raw_devices[number]; + + mutex_lock(&raw_mutex); + bdev = rawdev->binding; + *dev = bdev ? bdev->bd_dev : 0; + mutex_unlock(&raw_mutex); + return 0; } /* @@ -149,105 +210,78 @@ static long raw_ctl_ioctl(struct file *filp, unsigned int command, unsigned long arg) { struct raw_config_request rq; - struct raw_device_data *rawdev; - int err = 0; + dev_t dev; + int err; - lock_kernel(); switch (command) { case RAW_SETBIND: + if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) + return -EFAULT; + + return bind_set(rq.raw_minor, rq.block_major, rq.block_minor); + case RAW_GETBIND: + if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) + return -EFAULT; - /* First, find out which raw minor we want */ + err = bind_get(rq.raw_minor, &dev); + if (err) + return err; - if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) { - err = -EFAULT; - goto out; - } + rq.block_major = MAJOR(dev); + rq.block_minor = MINOR(dev); - if (rq.raw_minor <= 0 || rq.raw_minor >= MAX_RAW_MINORS) { - err = -EINVAL; - goto out; - } - rawdev = &raw_devices[rq.raw_minor]; - - if (command == RAW_SETBIND) { - dev_t dev; - - /* - * This is like making block devices, so demand the - * same capability - */ - if (!capable(CAP_SYS_ADMIN)) { - err = -EPERM; - goto out; - } - - /* - * For now, we don't need to check that the underlying - * block device is present or not: we can do that when - * the raw device is opened. Just check that the - * major/minor numbers make sense. - */ - - dev = MKDEV(rq.block_major, rq.block_minor); - if ((rq.block_major == 0 && rq.block_minor != 0) || - MAJOR(dev) != rq.block_major || - MINOR(dev) != rq.block_minor) { - err = -EINVAL; - goto out; - } - - mutex_lock(&raw_mutex); - if (rawdev->inuse) { - mutex_unlock(&raw_mutex); - err = -EBUSY; - goto out; - } - if (rawdev->binding) { - bdput(rawdev->binding); - module_put(THIS_MODULE); - } - if (rq.block_major == 0 && rq.block_minor == 0) { - /* unbind */ - rawdev->binding = NULL; - device_destroy(raw_class, - MKDEV(RAW_MAJOR, rq.raw_minor)); - } else { - rawdev->binding = bdget(dev); - if (rawdev->binding == NULL) - err = -ENOMEM; - else { - __module_get(THIS_MODULE); - bind_device(&rq); - } - } - mutex_unlock(&raw_mutex); - } else { - struct block_device *bdev; - - mutex_lock(&raw_mutex); - bdev = rawdev->binding; - if (bdev) { - rq.block_major = MAJOR(bdev->bd_dev); - rq.block_minor = MINOR(bdev->bd_dev); - } else { - rq.block_major = rq.block_minor = 0; - } - mutex_unlock(&raw_mutex); - if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) { - err = -EFAULT; - goto out; - } - } - break; - default: - err = -EINVAL; - break; + if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) + return -EFAULT; + + return 0; } -out: - unlock_kernel(); - return err; + + return -EINVAL; +} + +#ifdef CONFIG_COMPAT +struct raw32_config_request { + compat_int_t raw_minor; + compat_u64 block_major; + compat_u64 block_minor; +}; + +static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct raw32_config_request __user *user_req = compat_ptr(arg); + struct raw32_config_request rq; + dev_t dev; + int err = 0; + + switch (cmd) { + case RAW_SETBIND: + if (copy_from_user(&rq, user_req, sizeof(rq))) + return -EFAULT; + + return bind_set(rq.raw_minor, rq.block_major, rq.block_minor); + + case RAW_GETBIND: + if (copy_from_user(&rq, user_req, sizeof(rq))) + return -EFAULT; + + err = bind_get(rq.raw_minor, &dev); + if (err) + return err; + + rq.block_major = MAJOR(dev); + rq.block_minor = MINOR(dev); + + if (copy_to_user(user_req, &rq, sizeof(rq))) + return -EFAULT; + + return 0; + } + + return -EINVAL; } +#endif static const struct file_operations raw_fops = { .read = do_sync_read, @@ -258,13 +292,18 @@ static const struct file_operations raw_fops = { .open = raw_open, .release = raw_release, .unlocked_ioctl = raw_ioctl, + .llseek = default_llseek, .owner = THIS_MODULE, }; static const struct file_operations raw_ctl_fops = { .unlocked_ioctl = raw_ctl_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = raw_ctl_compat_ioctl, +#endif .open = raw_open, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct cdev raw_cdev; diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c index d58c2eb07f0..5e33293d24e 100644 --- a/drivers/char/rio/rio_linux.c +++ b/drivers/char/rio/rio_linux.c @@ -44,7 +44,7 @@ #include <linux/delay.h> #include <linux/pci.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/miscdevice.h> #include <linux/init.h> @@ -122,6 +122,7 @@ more than 512 ports.... */ /* These constants are derived from SCO Source */ +static DEFINE_MUTEX(rio_fw_mutex); static struct Conf RIOConf = { /* locator */ "RIO Config here", @@ -241,6 +242,7 @@ static struct real_driver rio_real_driver = { static const struct file_operations rio_fw_fops = { .owner = THIS_MODULE, .unlocked_ioctl = rio_fw_ioctl, + .llseek = noop_llseek, }; static struct miscdevice rio_fw_device = { @@ -566,9 +568,9 @@ static long rio_fw_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) func_enter(); /* The "dev" argument isn't used. */ - lock_kernel(); + mutex_lock(&rio_fw_mutex); rc = riocontrol(p, 0, cmd, arg, capable(CAP_SYS_ADMIN)); - unlock_kernel(); + mutex_unlock(&rio_fw_mutex); func_exit(); return rc; diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c index 99e5272e3c5..0bc135b9b16 100644 --- a/drivers/char/scx200_gpio.c +++ b/drivers/char/scx200_gpio.c @@ -67,6 +67,7 @@ static const struct file_operations scx200_gpio_fileops = { .read = nsc_gpio_read, .open = scx200_gpio_open, .release = scx200_gpio_release, + .llseek = no_llseek, }; static struct cdev scx200_gpio_cdev; /* use 1 cdev for all pins */ diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c index 32b74de18f5..5816b39ff5a 100644 --- a/drivers/char/snsc.c +++ b/drivers/char/snsc.c @@ -21,7 +21,7 @@ #include <linux/poll.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/sn/io.h> #include <asm/sn/sn_sal.h> #include <asm/sn/module.h> @@ -34,6 +34,7 @@ #define SCDRV_BUFSZ 2048 #define SCDRV_TIMEOUT 1000 +static DEFINE_MUTEX(scdrv_mutex); static irqreturn_t scdrv_interrupt(int irq, void *subch_data) { @@ -105,7 +106,7 @@ scdrv_open(struct inode *inode, struct file *file) file->private_data = sd; /* hook this subchannel up to the system controller interrupt */ - lock_kernel(); + mutex_lock(&scdrv_mutex); rv = request_irq(SGI_UART_VECTOR, scdrv_interrupt, IRQF_SHARED | IRQF_DISABLED, SYSCTL_BASENAME, sd); @@ -113,10 +114,10 @@ scdrv_open(struct inode *inode, struct file *file) ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch); kfree(sd); printk("%s: irq request failed (%d)\n", __func__, rv); - unlock_kernel(); + mutex_unlock(&scdrv_mutex); return -EBUSY; } - unlock_kernel(); + mutex_unlock(&scdrv_mutex); return 0; } @@ -357,6 +358,7 @@ static const struct file_operations scdrv_fops = { .poll = scdrv_poll, .open = scdrv_open, .release = scdrv_release, + .llseek = noop_llseek, }; static struct class *snsc_class; diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index f2167f8e5aa..8ef16490810 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c @@ -608,6 +608,7 @@ static unsigned int sc26198_baudtable[] = { static const struct file_operations stl_fsiomem = { .owner = THIS_MODULE, .unlocked_ioctl = stl_memioctl, + .llseek = noop_llseek, }; static struct class *stallion_class; diff --git a/drivers/char/sx.c b/drivers/char/sx.c index 5b24db4ff7f..e53f1686539 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c @@ -397,6 +397,7 @@ static struct real_driver sx_real_driver = { static const struct file_operations sx_fw_fops = { .owner = THIS_MODULE, .unlocked_ioctl = sx_fw_ioctl, + .llseek = noop_llseek, }; static struct miscdevice sx_fw_device = { diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index ef31bb81e84..f3019f53e87 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -772,6 +772,7 @@ static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf, static const struct file_operations proc_sysrq_trigger_operations = { .write = write_sysrq_trigger, + .llseek = noop_llseek, }; static void sysrq_init_procfs(void) diff --git a/drivers/char/tb0219.c b/drivers/char/tb0219.c index cad4eb65f13..ad264185eb1 100644 --- a/drivers/char/tb0219.c +++ b/drivers/char/tb0219.c @@ -261,6 +261,7 @@ static const struct file_operations tb0219_fops = { .write = tanbac_tb0219_write, .open = tanbac_tb0219_open, .release = tanbac_tb0219_release, + .llseek = no_llseek, }; static void tb0219_restart(char *command) diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 80ea6bcfffd..0c964cdcc22 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -37,7 +37,7 @@ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/spinlock.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/timer.h> #include <linux/sysfs.h> #include <linux/device.h> @@ -206,7 +206,7 @@ static int tlclk_open(struct inode *inode, struct file *filp) { int result; - lock_kernel(); + mutex_lock(&tlclk_mutex); if (test_and_set_bit(0, &useflags)) { result = -EBUSY; /* this legacy device is always one per system and it doesn't @@ -229,7 +229,7 @@ static int tlclk_open(struct inode *inode, struct file *filp) inb(TLCLK_REG6); /* Clear interrupt events */ out: - unlock_kernel(); + mutex_unlock(&tlclk_mutex); return result; } @@ -267,6 +267,7 @@ static const struct file_operations tlclk_fops = { .read = tlclk_read, .open = tlclk_open, .release = tlclk_release, + .llseek = noop_llseek, }; diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index f8bc79f6de3..014c9d90d29 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c @@ -68,7 +68,7 @@ #include <linux/stat.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/toshiba.h> #define TOSH_MINOR_DEV 181 @@ -78,6 +78,7 @@ MODULE_AUTHOR("Jonathan Buzzard <jonathan@buzzard.org.uk>"); MODULE_DESCRIPTION("Toshiba laptop SMM driver"); MODULE_SUPPORTED_DEVICE("toshiba"); +static DEFINE_MUTEX(tosh_mutex); static int tosh_fn; module_param_named(fn, tosh_fn, int, 0); MODULE_PARM_DESC(fn, "User specified Fn key detection port"); @@ -95,6 +96,7 @@ static long tosh_ioctl(struct file *, unsigned int, static const struct file_operations tosh_fops = { .owner = THIS_MODULE, .unlocked_ioctl = tosh_ioctl, + .llseek = noop_llseek, }; static struct miscdevice tosh_device = { @@ -274,16 +276,16 @@ static long tosh_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) return -EINVAL; /* do we need to emulate the fan ? */ - lock_kernel(); + mutex_lock(&tosh_mutex); if (tosh_fan==1) { if (((ax==0xf300) || (ax==0xf400)) && (bx==0x0004)) { err = tosh_emulate_fan(®s); - unlock_kernel(); + mutex_unlock(&tosh_mutex); break; } } err = tosh_smm(®s); - unlock_kernel(); + mutex_unlock(&tosh_mutex); break; default: return -EINVAL; diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 05ad4a17a28..7c4133582db 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -47,6 +47,16 @@ enum tpm_duration { #define TPM_MAX_PROTECTED_ORDINAL 12 #define TPM_PROTECTED_ORDINAL_MASK 0xFF +/* + * Bug workaround - some TPM's don't flush the most + * recently changed pcr on suspend, so force the flush + * with an extend to the selected _unused_ non-volatile pcr. + */ +static int tpm_suspend_pcr; +module_param_named(suspend_pcr, tpm_suspend_pcr, uint, 0644); +MODULE_PARM_DESC(suspend_pcr, + "PCR to use for dummy writes to faciltate flush on suspend."); + static LIST_HEAD(tpm_chip_list); static DEFINE_SPINLOCK(driver_lock); static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES); @@ -1077,18 +1087,6 @@ static struct tpm_input_header savestate_header = { .ordinal = TPM_ORD_SAVESTATE }; -/* Bug workaround - some TPM's don't flush the most - * recently changed pcr on suspend, so force the flush - * with an extend to the selected _unused_ non-volatile pcr. - */ -static int tpm_suspend_pcr; -static int __init tpm_suspend_setup(char *str) -{ - get_option(&str, &tpm_suspend_pcr); - return 1; -} -__setup("tpm_suspend_pcr=", tpm_suspend_setup); - /* * We are about to suspend. Save the TPM state * so that it can be restored. diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c index c7072ba14f4..493b47a0d51 100644 --- a/drivers/char/uv_mmtimer.c +++ b/drivers/char/uv_mmtimer.c @@ -52,6 +52,7 @@ static const struct file_operations uv_mmtimer_fops = { .owner = THIS_MODULE, .mmap = uv_mmtimer_mmap, .unlocked_ioctl = uv_mmtimer_ioctl, + .llseek = noop_llseek, }; /** diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c index 42f7fa442ff..ad6e64a2912 100644 --- a/drivers/char/viotape.c +++ b/drivers/char/viotape.c @@ -46,7 +46,7 @@ #include <linux/completion.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/slab.h> #include <asm/uaccess.h> @@ -64,6 +64,7 @@ #define VIOTAPE_KERN_WARN KERN_WARNING "viotape: " #define VIOTAPE_KERN_INFO KERN_INFO "viotape: " +static DEFINE_MUTEX(proc_viotape_mutex); static int viotape_numdev; /* @@ -684,9 +685,9 @@ static long viotap_unlocked_ioctl(struct file *file, { long rc; - lock_kernel(); + mutex_lock(&proc_viotape_mutex); rc = viotap_ioctl(file->f_path.dentry->d_inode, file, cmd, arg); - unlock_kernel(); + mutex_unlock(&proc_viotape_mutex); return rc; } @@ -700,7 +701,7 @@ static int viotap_open(struct inode *inode, struct file *file) if (op == NULL) return -ENOMEM; - lock_kernel(); + mutex_lock(&proc_viotape_mutex); get_dev_info(file->f_path.dentry->d_inode, &devi); /* Note: We currently only support one mode! */ @@ -731,7 +732,7 @@ static int viotap_open(struct inode *inode, struct file *file) free_op: free_op_struct(op); - unlock_kernel(); + mutex_unlock(&proc_viotape_mutex); return ret; } @@ -804,6 +805,7 @@ const struct file_operations viotap_fops = { .unlocked_ioctl = viotap_unlocked_ioctl, .open = viotap_open, .release = viotap_release, + .llseek = noop_llseek, }; /* Handle interrupt events for tape */ diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 942a9826bd2..6c1b676643a 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -48,6 +48,9 @@ struct ports_driver_data { /* Used for exporting per-port information to debugfs */ struct dentry *debugfs_dir; + /* List of all the devices we're handling */ + struct list_head portdevs; + /* Number of devices this driver is handling */ unsigned int index; @@ -108,6 +111,9 @@ struct port_buffer { * ports for that device (vdev->priv). */ struct ports_device { + /* Next portdev in the list, head is in the pdrvdata struct */ + struct list_head list; + /* * Workqueue handlers where we process deferred work after * notification @@ -178,15 +184,21 @@ struct port { struct console cons; /* Each port associates with a separate char device */ - struct cdev cdev; + struct cdev *cdev; struct device *dev; + /* Reference-counting to handle port hot-unplugs and file operations */ + struct kref kref; + /* A waitqueue for poll() or blocking read operations */ wait_queue_head_t waitqueue; /* The 'name' of the port that we expose via sysfs properties */ char *name; + /* We can notify apps of host connect / disconnect events via SIGIO */ + struct fasync_struct *async_queue; + /* The 'id' to identify the port with the Host */ u32 id; @@ -221,6 +233,41 @@ out: return port; } +static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, + dev_t dev) +{ + struct port *port; + unsigned long flags; + + spin_lock_irqsave(&portdev->ports_lock, flags); + list_for_each_entry(port, &portdev->ports, list) + if (port->cdev->dev == dev) + goto out; + port = NULL; +out: + spin_unlock_irqrestore(&portdev->ports_lock, flags); + + return port; +} + +static struct port *find_port_by_devt(dev_t dev) +{ + struct ports_device *portdev; + struct port *port; + unsigned long flags; + + spin_lock_irqsave(&pdrvdata_lock, flags); + list_for_each_entry(portdev, &pdrvdata.portdevs, list) { + port = find_port_by_devt_in_portdev(portdev, dev); + if (port) + goto out; + } + port = NULL; +out: + spin_unlock_irqrestore(&pdrvdata_lock, flags); + return port; +} + static struct port *find_port_by_id(struct ports_device *portdev, u32 id) { struct port *port; @@ -410,7 +457,10 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id, static ssize_t send_control_msg(struct port *port, unsigned int event, unsigned int value) { - return __send_control_msg(port->portdev, port->id, event, value); + /* Did the port get unplugged before userspace closed it? */ + if (port->portdev) + return __send_control_msg(port->portdev, port->id, event, value); + return 0; } /* Callers must take the port->outvq_lock */ @@ -459,9 +509,12 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, /* * Wait till the host acknowledges it pushed out the data we - * sent. This is done for ports in blocking mode or for data - * from the hvc_console; the tty operations are performed with - * spinlocks held so we can't sleep here. + * sent. This is done for data from the hvc_console; the tty + * operations are performed with spinlocks held so we can't + * sleep here. An alternative would be to copy the data to a + * buffer and relax the spinning requirement. The downside is + * we need to kmalloc a GFP_ATOMIC buffer each time the + * console driver writes something out. */ while (!virtqueue_get_buf(out_vq, &len)) cpu_relax(); @@ -522,6 +575,10 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count, /* The condition that must be true for polling to end */ static bool will_read_block(struct port *port) { + if (!port->guest_connected) { + /* Port got hot-unplugged. Let's exit. */ + return false; + } return !port_has_data(port) && port->host_connected; } @@ -572,6 +629,9 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, if (ret < 0) return ret; } + /* Port got hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; /* * We could've received a disconnection message while we were * waiting for more data. @@ -596,6 +656,10 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, ssize_t ret; bool nonblock; + /* Userspace could be out to fool us */ + if (!count) + return 0; + port = filp->private_data; nonblock = filp->f_flags & O_NONBLOCK; @@ -609,6 +673,9 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, if (ret < 0) return ret; } + /* Port got hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; count = min((size_t)(32 * 1024), count); @@ -622,6 +689,14 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, goto free_buf; } + /* + * We now ask send_buf() to not spin for generic ports -- we + * can re-use the same code path that non-blocking file + * descriptors take for blocking file descriptors since the + * wait is already done and we're certain the write will go + * through to the host. + */ + nonblock = true; ret = send_buf(port, buf, count, nonblock); if (nonblock && ret > 0) @@ -641,8 +716,12 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) port = filp->private_data; poll_wait(filp, &port->waitqueue, wait); + if (!port->guest_connected) { + /* Port got unplugged */ + return POLLHUP; + } ret = 0; - if (port->inbuf) + if (!will_read_block(port)) ret |= POLLIN | POLLRDNORM; if (!will_write_block(port)) ret |= POLLOUT; @@ -652,6 +731,8 @@ static unsigned int port_fops_poll(struct file *filp, poll_table *wait) return ret; } +static void remove_port(struct kref *kref); + static int port_fops_release(struct inode *inode, struct file *filp) { struct port *port; @@ -672,6 +753,16 @@ static int port_fops_release(struct inode *inode, struct file *filp) reclaim_consumed_buffers(port); spin_unlock_irq(&port->outvq_lock); + /* + * Locks aren't necessary here as a port can't be opened after + * unplug, and if a port isn't unplugged, a kref would already + * exist for the port. Plus, taking ports_lock here would + * create a dependency on other locks taken by functions + * inside remove_port if we're the last holder of the port, + * creating many problems. + */ + kref_put(&port->kref, remove_port); + return 0; } @@ -679,22 +770,31 @@ static int port_fops_open(struct inode *inode, struct file *filp) { struct cdev *cdev = inode->i_cdev; struct port *port; + int ret; - port = container_of(cdev, struct port, cdev); + port = find_port_by_devt(cdev->dev); filp->private_data = port; + /* Prevent against a port getting hot-unplugged at the same time */ + spin_lock_irq(&port->portdev->ports_lock); + kref_get(&port->kref); + spin_unlock_irq(&port->portdev->ports_lock); + /* * Don't allow opening of console port devices -- that's done * via /dev/hvc */ - if (is_console_port(port)) - return -ENXIO; + if (is_console_port(port)) { + ret = -ENXIO; + goto out; + } /* Allow only one process to open a particular port at a time */ spin_lock_irq(&port->inbuf_lock); if (port->guest_connected) { spin_unlock_irq(&port->inbuf_lock); - return -EMFILE; + ret = -EMFILE; + goto out; } port->guest_connected = true; @@ -709,10 +809,23 @@ static int port_fops_open(struct inode *inode, struct file *filp) reclaim_consumed_buffers(port); spin_unlock_irq(&port->outvq_lock); + nonseekable_open(inode, filp); + /* Notify host of port being opened */ send_control_msg(filp->private_data, VIRTIO_CONSOLE_PORT_OPEN, 1); return 0; +out: + kref_put(&port->kref, remove_port); + return ret; +} + +static int port_fops_fasync(int fd, struct file *filp, int mode) +{ + struct port *port; + + port = filp->private_data; + return fasync_helper(fd, filp, mode, &port->async_queue); } /* @@ -728,6 +841,8 @@ static const struct file_operations port_fops = { .write = port_fops_write, .poll = port_fops_poll, .release = port_fops_release, + .fasync = port_fops_fasync, + .llseek = no_llseek, }; /* @@ -986,6 +1101,12 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock) return nr_added_bufs; } +static void send_sigio_to_port(struct port *port) +{ + if (port->async_queue && port->guest_connected) + kill_fasync(&port->async_queue, SIGIO, POLL_OUT); +} + static int add_port(struct ports_device *portdev, u32 id) { char debugfs_name[16]; @@ -1000,6 +1121,7 @@ static int add_port(struct ports_device *portdev, u32 id) err = -ENOMEM; goto fail; } + kref_init(&port->kref); port->portdev = portdev; port->id = id; @@ -1007,6 +1129,7 @@ static int add_port(struct ports_device *portdev, u32 id) port->name = NULL; port->inbuf = NULL; port->cons.hvc = NULL; + port->async_queue = NULL; port->cons.ws.ws_row = port->cons.ws.ws_col = 0; @@ -1017,14 +1140,20 @@ static int add_port(struct ports_device *portdev, u32 id) port->in_vq = portdev->in_vqs[port->id]; port->out_vq = portdev->out_vqs[port->id]; - cdev_init(&port->cdev, &port_fops); + port->cdev = cdev_alloc(); + if (!port->cdev) { + dev_err(&port->portdev->vdev->dev, "Error allocating cdev\n"); + err = -ENOMEM; + goto free_port; + } + port->cdev->ops = &port_fops; devt = MKDEV(portdev->chr_major, id); - err = cdev_add(&port->cdev, devt, 1); + err = cdev_add(port->cdev, devt, 1); if (err < 0) { dev_err(&port->portdev->vdev->dev, "Error %d adding cdev for port %u\n", err, id); - goto free_port; + goto free_cdev; } port->dev = device_create(pdrvdata.class, &port->portdev->vdev->dev, devt, port, "vport%up%u", @@ -1089,7 +1218,7 @@ free_inbufs: free_device: device_destroy(pdrvdata.class, port->dev->devt); free_cdev: - cdev_del(&port->cdev); + cdev_del(port->cdev); free_port: kfree(port); fail: @@ -1098,21 +1227,45 @@ fail: return err; } -/* Remove all port-specific data. */ -static int remove_port(struct port *port) +/* No users remain, remove all port-specific data. */ +static void remove_port(struct kref *kref) +{ + struct port *port; + + port = container_of(kref, struct port, kref); + + sysfs_remove_group(&port->dev->kobj, &port_attribute_group); + device_destroy(pdrvdata.class, port->dev->devt); + cdev_del(port->cdev); + + kfree(port->name); + + debugfs_remove(port->debugfs_file); + + kfree(port); +} + +/* + * Port got unplugged. Remove port from portdev's list and drop the + * kref reference. If no userspace has this port opened, it will + * result in immediate removal the port. + */ +static void unplug_port(struct port *port) { struct port_buffer *buf; + spin_lock_irq(&port->portdev->ports_lock); + list_del(&port->list); + spin_unlock_irq(&port->portdev->ports_lock); + if (port->guest_connected) { port->guest_connected = false; port->host_connected = false; wake_up_interruptible(&port->waitqueue); - send_control_msg(port, VIRTIO_CONSOLE_PORT_OPEN, 0); - } - spin_lock_irq(&port->portdev->ports_lock); - list_del(&port->list); - spin_unlock_irq(&port->portdev->ports_lock); + /* Let the app know the port is going down. */ + send_sigio_to_port(port); + } if (is_console_port(port)) { spin_lock_irq(&pdrvdata_lock); @@ -1131,9 +1284,6 @@ static int remove_port(struct port *port) hvc_remove(port->cons.hvc); #endif } - sysfs_remove_group(&port->dev->kobj, &port_attribute_group); - device_destroy(pdrvdata.class, port->dev->devt); - cdev_del(&port->cdev); /* Remove unused data this port might have received. */ discard_port_data(port); @@ -1144,12 +1294,19 @@ static int remove_port(struct port *port) while ((buf = virtqueue_detach_unused_buf(port->in_vq))) free_buf(buf); - kfree(port->name); - - debugfs_remove(port->debugfs_file); + /* + * We should just assume the device itself has gone off -- + * else a close on an open port later will try to send out a + * control message. + */ + port->portdev = NULL; - kfree(port); - return 0; + /* + * Locks around here are not necessary - a port can't be + * opened after we removed the port struct from ports_list + * above. + */ + kref_put(&port->kref, remove_port); } /* Any private messages that the Host and Guest want to share */ @@ -1188,7 +1345,7 @@ static void handle_control_message(struct ports_device *portdev, add_port(portdev, cpkt->id); break; case VIRTIO_CONSOLE_PORT_REMOVE: - remove_port(port); + unplug_port(port); break; case VIRTIO_CONSOLE_CONSOLE_PORT: if (!cpkt->value) @@ -1230,6 +1387,12 @@ static void handle_control_message(struct ports_device *portdev, spin_lock_irq(&port->outvq_lock); reclaim_consumed_buffers(port); spin_unlock_irq(&port->outvq_lock); + + /* + * If the guest is connected, it'll be interested in + * knowing the host connection state changed. + */ + send_sigio_to_port(port); break; case VIRTIO_CONSOLE_PORT_NAME: /* @@ -1326,6 +1489,9 @@ static void in_intr(struct virtqueue *vq) wake_up_interruptible(&port->waitqueue); + /* Send a SIGIO indicating new data in case the process asked for it */ + send_sigio_to_port(port); + if (is_console_port(port) && hvc_poll(port->cons.hvc)) hvc_kick(); } @@ -1562,6 +1728,10 @@ static int __devinit virtcons_probe(struct virtio_device *vdev) add_port(portdev, 0); } + spin_lock_irq(&pdrvdata_lock); + list_add_tail(&portdev->list, &pdrvdata.portdevs); + spin_unlock_irq(&pdrvdata_lock); + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID, VIRTIO_CONSOLE_DEVICE_READY, 1); return 0; @@ -1585,23 +1755,41 @@ static void virtcons_remove(struct virtio_device *vdev) { struct ports_device *portdev; struct port *port, *port2; - struct port_buffer *buf; - unsigned int len; portdev = vdev->priv; + spin_lock_irq(&pdrvdata_lock); + list_del(&portdev->list); + spin_unlock_irq(&pdrvdata_lock); + + /* Disable interrupts for vqs */ + vdev->config->reset(vdev); + /* Finish up work that's lined up */ cancel_work_sync(&portdev->control_work); list_for_each_entry_safe(port, port2, &portdev->ports, list) - remove_port(port); + unplug_port(port); unregister_chrdev(portdev->chr_major, "virtio-portsdev"); - while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) - free_buf(buf); + /* + * When yanking out a device, we immediately lose the + * (device-side) queues. So there's no point in keeping the + * guest side around till we drop our final reference. This + * also means that any ports which are in an open state will + * have to just stop using the port, as the vqs are going + * away. + */ + if (use_multiport(portdev)) { + struct port_buffer *buf; + unsigned int len; - while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) - free_buf(buf); + while ((buf = virtqueue_get_buf(portdev->c_ivq, &len))) + free_buf(buf); + + while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq))) + free_buf(buf); + } vdev->config->del_vqs(vdev); kfree(portdev->in_vqs); @@ -1648,6 +1836,7 @@ static int __init init(void) PTR_ERR(pdrvdata.debugfs_dir)); } INIT_LIST_HEAD(&pdrvdata.consoles); + INIT_LIST_HEAD(&pdrvdata.portdevs); return register_virtio_driver(&virtio_console); } diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index b663d573aad..9f2272e6de1 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -81,7 +81,6 @@ #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/sysctl.h> #include <linux/fs.h> #include <linux/cdev.h> @@ -112,6 +111,7 @@ #define HWICAP_DEVICES 1 /* An array, which is set to true when the device is registered. */ +static DEFINE_MUTEX(hwicap_mutex); static bool probed_devices[HWICAP_DEVICES]; static struct mutex icap_sem; @@ -502,7 +502,7 @@ static int hwicap_open(struct inode *inode, struct file *file) struct hwicap_drvdata *drvdata; int status; - lock_kernel(); + mutex_lock(&hwicap_mutex); drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev); status = mutex_lock_interruptible(&drvdata->sem); @@ -528,7 +528,7 @@ static int hwicap_open(struct inode *inode, struct file *file) error: mutex_unlock(&drvdata->sem); out: - unlock_kernel(); + mutex_unlock(&hwicap_mutex); return status; } @@ -567,6 +567,7 @@ static const struct file_operations hwicap_fops = { .read = hwicap_read, .open = hwicap_open, .release = hwicap_release, + .llseek = noop_llseek, }; static int __devinit hwicap_setup(struct device *dev, int id, diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index c2408bbe9c2..f508690eb95 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -80,7 +80,7 @@ * Limiting Performance Impact * --------------------------- * C states, especially those with large exit latencies, can have a real - * noticable impact on workloads, which is not acceptable for most sysadmins, + * noticeable impact on workloads, which is not acceptable for most sysadmins, * and in addition, less performance has a power price of its own. * * As a general rule of thumb, menu assumes that the following heuristic diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c index 8661c84a105..b98c67664ae 100644 --- a/drivers/dca/dca-core.c +++ b/drivers/dca/dca-core.c @@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock); static LIST_HEAD(dca_domains); +static BLOCKING_NOTIFIER_HEAD(dca_provider_chain); + +static int dca_providers_blocked; + static struct pci_bus *dca_pci_rc_from_dev(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); @@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain) kfree(domain); } +static int dca_provider_ioat_ver_3_0(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return ((pdev->vendor == PCI_VENDOR_ID_INTEL) && + ((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) || + (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7))); +} + +static void unregister_dca_providers(void) +{ + struct dca_provider *dca, *_dca; + struct list_head unregistered_providers; + struct dca_domain *domain; + unsigned long flags; + + blocking_notifier_call_chain(&dca_provider_chain, + DCA_PROVIDER_REMOVE, NULL); + + INIT_LIST_HEAD(&unregistered_providers); + + spin_lock_irqsave(&dca_lock, flags); + + if (list_empty(&dca_domains)) { + spin_unlock_irqrestore(&dca_lock, flags); + return; + } + + /* at this point only one domain in the list is expected */ + domain = list_first_entry(&dca_domains, struct dca_domain, node); + if (!domain) + return; + + list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) { + list_del(&dca->node); + list_add(&dca->node, &unregistered_providers); + } + + dca_free_domain(domain); + + spin_unlock_irqrestore(&dca_lock, flags); + + list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { + dca_sysfs_remove_provider(dca); + list_del(&dca->node); + } +} + static struct dca_domain *dca_find_domain(struct pci_bus *rc) { struct dca_domain *domain; @@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev) domain = dca_find_domain(rc); if (!domain) { - domain = dca_allocate_domain(rc); - if (domain) - list_add(&domain->node, &dca_domains); + if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) { + dca_providers_blocked = 1; + } else { + domain = dca_allocate_domain(rc); + if (domain) + list_add(&domain->node, &dca_domains); + } } return domain; @@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca) } EXPORT_SYMBOL_GPL(free_dca_provider); -static BLOCKING_NOTIFIER_HEAD(dca_provider_chain); - /** * register_dca_provider - register a dca provider * @dca - struct created by alloc_dca_provider() @@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) unsigned long flags; struct dca_domain *domain; + spin_lock_irqsave(&dca_lock, flags); + if (dca_providers_blocked) { + spin_unlock_irqrestore(&dca_lock, flags); + return -ENODEV; + } + spin_unlock_irqrestore(&dca_lock, flags); + err = dca_sysfs_add_provider(dca, dev); if (err) return err; @@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev) spin_lock_irqsave(&dca_lock, flags); domain = dca_get_domain(dev); if (!domain) { - spin_unlock_irqrestore(&dca_lock, flags); + if (dca_providers_blocked) { + spin_unlock_irqrestore(&dca_lock, flags); + dca_sysfs_remove_provider(dca); + unregister_dca_providers(); + } else { + spin_unlock_irqrestore(&dca_lock, flags); + } return -ENODEV; } list_add(&dca->node, &domain->dca_providers); diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 557e2272e5b..ae2b8714d19 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -157,6 +157,7 @@ static const struct file_operations coh901318_debugfs_status_operations = { .owner = THIS_MODULE, .open = coh901318_debugfs_open, .read = coh901318_debugfs_read, + .llseek = default_llseek, }; diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 216f9d383b5..effd140fc04 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -879,7 +879,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_tx_status = ioat_tx_status; + dma->device_tx_status = ioat_dma_tx_status; err = ioat_probe(device); if (err) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 86c5ae9fde3..411d5bf50fc 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -162,7 +162,7 @@ static int mv_is_err_intr(u32 intr_cause) static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) { - u32 val = (1 << (1 + (chan->idx * 16))); + u32 val = ~(1 << (chan->idx * 16)); dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); __raw_writel(val, XOR_INTR_CAUSE(chan)); } diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index fb64cf36ba6..eb6b54dbb80 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -580,7 +580,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( sh_chan = to_sh_chan(chan); param = chan->private; - slave_addr = param->config->addr; /* Someone calling slave DMA on a public channel? */ if (!param || !sg_len) { @@ -589,6 +588,8 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( return NULL; } + slave_addr = param->config->addr; + /* * if (param != NULL), this is a successfully requested slave channel, * therefore param->config != NULL too. diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 70bb350de99..9dbb28b9559 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -39,7 +39,7 @@ config EDAC_DEBUG there're four debug levels (x=0,1,2,3 from low to high). Usually you should select 'N'. - config EDAC_DECODE_MCE +config EDAC_DECODE_MCE tristate "Decode MCEs in human-readable form (only on AMD for now)" depends on CPU_SUP_AMD && X86_MCE default y @@ -51,6 +51,16 @@ config EDAC_DEBUG which occur really early upon boot, before the module infrastructure has been initialized. +config EDAC_MCE_INJ + tristate "Simple MCE injection interface over /sysfs" + depends on EDAC_DECODE_MCE + default n + help + This is a simple interface to inject MCEs over /sysfs and test + the MCE decoding code in EDAC. + + This is currently AMD-only. + config EDAC_MM_EDAC tristate "Main Memory EDAC (Error Detection And Correction) reporting" help @@ -66,13 +76,13 @@ config EDAC_MCE config EDAC_AMD64 tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h" - depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE + depends on EDAC_MM_EDAC && AMD_NB && X86_64 && PCI && EDAC_DECODE_MCE help Support for error detection and correction on the AMD 64 Families of Memory Controllers (K8, F10h and F11h) config EDAC_AMD64_ERROR_INJECTION - bool "Sysfs Error Injection facilities" + bool "Sysfs HW Error injection facilities" depends on EDAC_AMD64 help Recent Opterons (Family 10h and later) provide for Memory Error diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index ca6b1bb24cc..32c7bc93c52 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -17,6 +17,9 @@ ifdef CONFIG_PCI edac_core-objs += edac_pci.o edac_pci_sysfs.o endif +obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o + +edac_mce_amd-objs := mce_amd.o obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index e7d5d6b5dcf..8521401bbd7 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -1,5 +1,5 @@ #include "amd64_edac.h" -#include <asm/k8.h> +#include <asm/amd_nb.h> static struct edac_pci_ctl_info *amd64_ctl_pci; @@ -2073,11 +2073,18 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci, amd64_handle_ue(mci, info); } -void amd64_decode_bus_error(int node_id, struct err_regs *regs) +void amd64_decode_bus_error(int node_id, struct mce *m, u32 nbcfg) { struct mem_ctl_info *mci = mci_lookup[node_id]; + struct err_regs regs; - __amd64_decode_bus_error(mci, regs); + regs.nbsl = (u32) m->status; + regs.nbsh = (u32)(m->status >> 32); + regs.nbeal = (u32) m->addr; + regs.nbeah = (u32)(m->addr >> 32); + regs.nbcfg = nbcfg; + + __amd64_decode_bus_error(mci, ®s); /* * Check the UE bit of the NB status high register, if set generate some @@ -2086,7 +2093,7 @@ void amd64_decode_bus_error(int node_id, struct err_regs *regs) * * FIXME: this should go somewhere else, if at all. */ - if (regs->nbsh & K8_NBSH_UC_ERR && !report_gart_errors) + if (regs.nbsh & K8_NBSH_UC_ERR && !report_gart_errors) edac_mc_handle_ue_no_info(mci, "UE bit is set"); } @@ -2927,7 +2934,7 @@ static int __init amd64_edac_init(void) * to finish initialization of the MC instances. */ err = -ENODEV; - for (nb = 0; nb < num_k8_northbridges; nb++) { + for (nb = 0; nb < k8_northbridges.num; nb++) { if (!pvt_lookup[nb]) continue; diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 613b9381e71..044aee4f944 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -72,7 +72,7 @@ #include <linux/edac.h> #include <asm/msr.h> #include "edac_core.h" -#include "edac_mce_amd.h" +#include "mce_amd.h" #define amd64_printk(level, fmt, arg...) \ edac_printk(level, "amd64", fmt, ##arg) @@ -482,11 +482,10 @@ extern const char *rrrr_msgs[16]; extern const char *to_msgs[2]; extern const char *pp_msgs[4]; extern const char *ii_msgs[4]; -extern const char *ext_msgs[32]; extern const char *htlink_msgs[8]; #ifdef CONFIG_EDAC_DEBUG -#define NUM_DBG_ATTRS 9 +#define NUM_DBG_ATTRS 5 #else #define NUM_DBG_ATTRS 0 #endif diff --git a/drivers/edac/amd64_edac_dbg.c b/drivers/edac/amd64_edac_dbg.c index 59cf2cf6e11..e3562288f4c 100644 --- a/drivers/edac/amd64_edac_dbg.c +++ b/drivers/edac/amd64_edac_dbg.c @@ -1,167 +1,16 @@ #include "amd64_edac.h" -/* - * accept a hex value and store it into the virtual error register file, field: - * nbeal and nbeah. Assume virtual error values have already been set for: NBSL, - * NBSH and NBCFG. Then proceed to map the error values to a MC, CSROW and - * CHANNEL - */ -static ssize_t amd64_nbea_store(struct mem_ctl_info *mci, const char *data, - size_t count) -{ - struct amd64_pvt *pvt = mci->pvt_info; - unsigned long long value; - int ret = 0; - - ret = strict_strtoull(data, 16, &value); - if (ret != -EINVAL) { - debugf0("received NBEA= 0x%llx\n", value); - - /* place the value into the virtual error packet */ - pvt->ctl_error_info.nbeal = (u32) value; - value >>= 32; - pvt->ctl_error_info.nbeah = (u32) value; - - /* Process the Mapping request */ - /* TODO: Add race prevention */ - amd_decode_nb_mce(pvt->mc_node_id, &pvt->ctl_error_info, 1); - - return count; - } - return ret; -} - -/* display back what the last NBEA (MCA NB Address (MC4_ADDR)) was written */ -static ssize_t amd64_nbea_show(struct mem_ctl_info *mci, char *data) -{ - struct amd64_pvt *pvt = mci->pvt_info; - u64 value; - - value = pvt->ctl_error_info.nbeah; - value <<= 32; - value |= pvt->ctl_error_info.nbeal; - - return sprintf(data, "%llx\n", value); -} - -/* store the NBSL (MCA NB Status Low (MC4_STATUS)) value user desires */ -static ssize_t amd64_nbsl_store(struct mem_ctl_info *mci, const char *data, - size_t count) -{ - struct amd64_pvt *pvt = mci->pvt_info; - unsigned long value; - int ret = 0; - - ret = strict_strtoul(data, 16, &value); - if (ret != -EINVAL) { - debugf0("received NBSL= 0x%lx\n", value); - - pvt->ctl_error_info.nbsl = (u32) value; - - return count; - } - return ret; -} - -/* display back what the last NBSL value written */ -static ssize_t amd64_nbsl_show(struct mem_ctl_info *mci, char *data) -{ - struct amd64_pvt *pvt = mci->pvt_info; - u32 value; - - value = pvt->ctl_error_info.nbsl; - - return sprintf(data, "%x\n", value); -} - -/* store the NBSH (MCA NB Status High) value user desires */ -static ssize_t amd64_nbsh_store(struct mem_ctl_info *mci, const char *data, - size_t count) -{ - struct amd64_pvt *pvt = mci->pvt_info; - unsigned long value; - int ret = 0; - - ret = strict_strtoul(data, 16, &value); - if (ret != -EINVAL) { - debugf0("received NBSH= 0x%lx\n", value); - - pvt->ctl_error_info.nbsh = (u32) value; - - return count; - } - return ret; -} - -/* display back what the last NBSH value written */ -static ssize_t amd64_nbsh_show(struct mem_ctl_info *mci, char *data) -{ - struct amd64_pvt *pvt = mci->pvt_info; - u32 value; - - value = pvt->ctl_error_info.nbsh; - - return sprintf(data, "%x\n", value); +#define EDAC_DCT_ATTR_SHOW(reg) \ +static ssize_t amd64_##reg##_show(struct mem_ctl_info *mci, char *data) \ +{ \ + struct amd64_pvt *pvt = mci->pvt_info; \ + return sprintf(data, "0x%016llx\n", (u64)pvt->reg); \ } -/* accept and store the NBCFG (MCA NB Configuration) value user desires */ -static ssize_t amd64_nbcfg_store(struct mem_ctl_info *mci, - const char *data, size_t count) -{ - struct amd64_pvt *pvt = mci->pvt_info; - unsigned long value; - int ret = 0; - - ret = strict_strtoul(data, 16, &value); - if (ret != -EINVAL) { - debugf0("received NBCFG= 0x%lx\n", value); - - pvt->ctl_error_info.nbcfg = (u32) value; - - return count; - } - return ret; -} - -/* various show routines for the controls of a MCI */ -static ssize_t amd64_nbcfg_show(struct mem_ctl_info *mci, char *data) -{ - struct amd64_pvt *pvt = mci->pvt_info; - - return sprintf(data, "%x\n", pvt->ctl_error_info.nbcfg); -} - - -static ssize_t amd64_dhar_show(struct mem_ctl_info *mci, char *data) -{ - struct amd64_pvt *pvt = mci->pvt_info; - - return sprintf(data, "%x\n", pvt->dhar); -} - - -static ssize_t amd64_dbam_show(struct mem_ctl_info *mci, char *data) -{ - struct amd64_pvt *pvt = mci->pvt_info; - - return sprintf(data, "%x\n", pvt->dbam0); -} - - -static ssize_t amd64_topmem_show(struct mem_ctl_info *mci, char *data) -{ - struct amd64_pvt *pvt = mci->pvt_info; - - return sprintf(data, "%llx\n", pvt->top_mem); -} - - -static ssize_t amd64_topmem2_show(struct mem_ctl_info *mci, char *data) -{ - struct amd64_pvt *pvt = mci->pvt_info; - - return sprintf(data, "%llx\n", pvt->top_mem2); -} +EDAC_DCT_ATTR_SHOW(dhar); +EDAC_DCT_ATTR_SHOW(dbam0); +EDAC_DCT_ATTR_SHOW(top_mem); +EDAC_DCT_ATTR_SHOW(top_mem2); static ssize_t amd64_hole_show(struct mem_ctl_info *mci, char *data) { @@ -182,38 +31,6 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { { .attr = { - .name = "nbea_ctl", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = amd64_nbea_show, - .store = amd64_nbea_store, - }, - { - .attr = { - .name = "nbsl_ctl", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = amd64_nbsl_show, - .store = amd64_nbsl_store, - }, - { - .attr = { - .name = "nbsh_ctl", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = amd64_nbsh_show, - .store = amd64_nbsh_store, - }, - { - .attr = { - .name = "nbcfg_ctl", - .mode = (S_IRUGO | S_IWUSR) - }, - .show = amd64_nbcfg_show, - .store = amd64_nbcfg_store, - }, - { - .attr = { .name = "dhar", .mode = (S_IRUGO) }, @@ -225,7 +42,7 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { .name = "dbam", .mode = (S_IRUGO) }, - .show = amd64_dbam_show, + .show = amd64_dbam0_show, .store = NULL, }, { @@ -233,7 +50,7 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { .name = "topmem", .mode = (S_IRUGO) }, - .show = amd64_topmem_show, + .show = amd64_top_mem_show, .store = NULL, }, { @@ -241,7 +58,7 @@ struct mcidev_sysfs_attribute amd64_dbg_attrs[] = { .name = "topmem2", .mode = (S_IRUGO) }, - .show = amd64_topmem2_show, + .show = amd64_top_mem2_show, .store = NULL, }, { diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c index 070968178a2..2941dca91aa 100644 --- a/drivers/edac/edac_device_sysfs.c +++ b/drivers/edac/edac_device_sysfs.c @@ -13,6 +13,7 @@ #include <linux/ctype.h> #include <linux/module.h> #include <linux/slab.h> +#include <linux/edac.h> #include "edac_core.h" #include "edac_module.h" @@ -235,7 +236,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) debugf1("%s()\n", __func__); /* get the /sys/devices/system/edac reference */ - edac_class = edac_get_edac_class(); + edac_class = edac_get_sysfs_class(); if (edac_class == NULL) { debugf1("%s() no edac_class error\n", __func__); err = -ENODEV; @@ -255,7 +256,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) if (!try_module_get(edac_dev->owner)) { err = -ENODEV; - goto err_out; + goto err_mod_get; } /* register */ @@ -282,6 +283,9 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev) err_kobj_reg: module_put(edac_dev->owner); +err_mod_get: + edac_put_sysfs_class(); + err_out: return err; } @@ -290,12 +294,11 @@ err_out: * edac_device_unregister_sysfs_main_kobj: * the '..../edac/<name>' kobject */ -void edac_device_unregister_sysfs_main_kobj( - struct edac_device_ctl_info *edac_dev) +void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev) { debugf0("%s()\n", __func__); debugf4("%s() name of kobject is: %s\n", - __func__, kobject_name(&edac_dev->kobj)); + __func__, kobject_name(&dev->kobj)); /* * Unregister the edac device's kobject and @@ -304,7 +307,8 @@ void edac_device_unregister_sysfs_main_kobj( * a) module_put() this module * b) 'kfree' the memory */ - kobject_put(&edac_dev->kobj); + kobject_put(&dev->kobj); + edac_put_sysfs_class(); } /* edac_dev -> instance information */ diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 3630308e7b8..6b21e25f7a8 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -339,6 +339,9 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci) { int status; + if (mci->op_state != OP_RUNNING_POLL) + return; + status = cancel_delayed_work(&mci->work); if (status == 0) { debugf0("%s() not canceled, flush the queue\n", diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c index 8aad94d10c0..a4135860149 100644 --- a/drivers/edac/edac_mc_sysfs.c +++ b/drivers/edac/edac_mc_sysfs.c @@ -11,6 +11,7 @@ #include <linux/ctype.h> #include <linux/slab.h> +#include <linux/edac.h> #include <linux/bug.h> #include "edac_core.h" @@ -1011,13 +1012,13 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) */ int edac_sysfs_setup_mc_kset(void) { - int err = 0; + int err = -EINVAL; struct sysdev_class *edac_class; debugf1("%s()\n", __func__); /* get the /sys/devices/system/edac class reference */ - edac_class = edac_get_edac_class(); + edac_class = edac_get_sysfs_class(); if (edac_class == NULL) { debugf1("%s() no edac_class error=%d\n", __func__, err); goto fail_out; @@ -1028,15 +1029,16 @@ int edac_sysfs_setup_mc_kset(void) if (!mc_kset) { err = -ENOMEM; debugf1("%s() Failed to register '.../edac/mc'\n", __func__); - goto fail_out; + goto fail_kset; } debugf1("%s() Registered '.../edac/mc' kobject\n", __func__); return 0; +fail_kset: + edac_put_sysfs_class(); - /* error unwind stack */ fail_out: return err; } @@ -1049,5 +1051,6 @@ fail_out: void edac_sysfs_teardown_mc_kset(void) { kset_unregister(mc_kset); + edac_put_sysfs_class(); } diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c deleted file mode 100644 index 9014df6f605..00000000000 --- a/drivers/edac/edac_mce_amd.c +++ /dev/null @@ -1,452 +0,0 @@ -#include <linux/module.h> -#include "edac_mce_amd.h" - -static bool report_gart_errors; -static void (*nb_bus_decoder)(int node_id, struct err_regs *regs); - -void amd_report_gart_errors(bool v) -{ - report_gart_errors = v; -} -EXPORT_SYMBOL_GPL(amd_report_gart_errors); - -void amd_register_ecc_decoder(void (*f)(int, struct err_regs *)) -{ - nb_bus_decoder = f; -} -EXPORT_SYMBOL_GPL(amd_register_ecc_decoder); - -void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *)) -{ - if (nb_bus_decoder) { - WARN_ON(nb_bus_decoder != f); - - nb_bus_decoder = NULL; - } -} -EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder); - -/* - * string representation for the different MCA reported error types, see F3x48 - * or MSR0000_0411. - */ -const char *tt_msgs[] = { /* transaction type */ - "instruction", - "data", - "generic", - "reserved" -}; -EXPORT_SYMBOL_GPL(tt_msgs); - -const char *ll_msgs[] = { /* cache level */ - "L0", - "L1", - "L2", - "L3/generic" -}; -EXPORT_SYMBOL_GPL(ll_msgs); - -const char *rrrr_msgs[] = { - "generic", - "generic read", - "generic write", - "data read", - "data write", - "inst fetch", - "prefetch", - "evict", - "snoop", - "reserved RRRR= 9", - "reserved RRRR= 10", - "reserved RRRR= 11", - "reserved RRRR= 12", - "reserved RRRR= 13", - "reserved RRRR= 14", - "reserved RRRR= 15" -}; -EXPORT_SYMBOL_GPL(rrrr_msgs); - -const char *pp_msgs[] = { /* participating processor */ - "local node originated (SRC)", - "local node responded to request (RES)", - "local node observed as 3rd party (OBS)", - "generic" -}; -EXPORT_SYMBOL_GPL(pp_msgs); - -const char *to_msgs[] = { - "no timeout", - "timed out" -}; -EXPORT_SYMBOL_GPL(to_msgs); - -const char *ii_msgs[] = { /* memory or i/o */ - "mem access", - "reserved", - "i/o access", - "generic" -}; -EXPORT_SYMBOL_GPL(ii_msgs); - -/* - * Map the 4 or 5 (family-specific) bits of Extended Error code to the - * string table. - */ -const char *ext_msgs[] = { - "K8 ECC error", /* 0_0000b */ - "CRC error on link", /* 0_0001b */ - "Sync error packets on link", /* 0_0010b */ - "Master Abort during link operation", /* 0_0011b */ - "Target Abort during link operation", /* 0_0100b */ - "Invalid GART PTE entry during table walk", /* 0_0101b */ - "Unsupported atomic RMW command received", /* 0_0110b */ - "WDT error: NB transaction timeout", /* 0_0111b */ - "ECC/ChipKill ECC error", /* 0_1000b */ - "SVM DEV Error", /* 0_1001b */ - "Link Data error", /* 0_1010b */ - "Link/L3/Probe Filter Protocol error", /* 0_1011b */ - "NB Internal Arrays Parity error", /* 0_1100b */ - "DRAM Address/Control Parity error", /* 0_1101b */ - "Link Transmission error", /* 0_1110b */ - "GART/DEV Table Walk Data error" /* 0_1111b */ - "Res 0x100 error", /* 1_0000b */ - "Res 0x101 error", /* 1_0001b */ - "Res 0x102 error", /* 1_0010b */ - "Res 0x103 error", /* 1_0011b */ - "Res 0x104 error", /* 1_0100b */ - "Res 0x105 error", /* 1_0101b */ - "Res 0x106 error", /* 1_0110b */ - "Res 0x107 error", /* 1_0111b */ - "Res 0x108 error", /* 1_1000b */ - "Res 0x109 error", /* 1_1001b */ - "Res 0x10A error", /* 1_1010b */ - "Res 0x10B error", /* 1_1011b */ - "ECC error in L3 Cache Data", /* 1_1100b */ - "L3 Cache Tag error", /* 1_1101b */ - "L3 Cache LRU Parity error", /* 1_1110b */ - "Probe Filter error" /* 1_1111b */ -}; -EXPORT_SYMBOL_GPL(ext_msgs); - -static void amd_decode_dc_mce(u64 mc0_status) -{ - u32 ec = mc0_status & 0xffff; - u32 xec = (mc0_status >> 16) & 0xf; - - pr_emerg("Data Cache Error"); - - if (xec == 1 && TLB_ERROR(ec)) - pr_cont(": %s TLB multimatch.\n", LL_MSG(ec)); - else if (xec == 0) { - if (mc0_status & (1ULL << 40)) - pr_cont(" during Data Scrub.\n"); - else if (TLB_ERROR(ec)) - pr_cont(": %s TLB parity error.\n", LL_MSG(ec)); - else if (MEM_ERROR(ec)) { - u8 ll = ec & 0x3; - u8 tt = (ec >> 2) & 0x3; - u8 rrrr = (ec >> 4) & 0xf; - - /* see F10h BKDG (31116), Table 92. */ - if (ll == 0x1) { - if (tt != 0x1) - goto wrong_dc_mce; - - pr_cont(": Data/Tag %s error.\n", RRRR_MSG(ec)); - - } else if (ll == 0x2 && rrrr == 0x3) - pr_cont(" during L1 linefill from L2.\n"); - else - goto wrong_dc_mce; - } else if (BUS_ERROR(ec) && boot_cpu_data.x86 == 0xf) - pr_cont(" during system linefill.\n"); - else - goto wrong_dc_mce; - } else - goto wrong_dc_mce; - - return; - -wrong_dc_mce: - pr_warning("Corrupted DC MCE info?\n"); -} - -static void amd_decode_ic_mce(u64 mc1_status) -{ - u32 ec = mc1_status & 0xffff; - u32 xec = (mc1_status >> 16) & 0xf; - - pr_emerg("Instruction Cache Error"); - - if (xec == 1 && TLB_ERROR(ec)) - pr_cont(": %s TLB multimatch.\n", LL_MSG(ec)); - else if (xec == 0) { - if (TLB_ERROR(ec)) - pr_cont(": %s TLB Parity error.\n", LL_MSG(ec)); - else if (BUS_ERROR(ec)) { - if (boot_cpu_data.x86 == 0xf && - (mc1_status & (1ULL << 58))) - pr_cont(" during system linefill.\n"); - else - pr_cont(" during attempted NB data read.\n"); - } else if (MEM_ERROR(ec)) { - u8 ll = ec & 0x3; - u8 rrrr = (ec >> 4) & 0xf; - - if (ll == 0x2) - pr_cont(" during a linefill from L2.\n"); - else if (ll == 0x1) { - - switch (rrrr) { - case 0x5: - pr_cont(": Parity error during " - "data load.\n"); - break; - - case 0x7: - pr_cont(": Copyback Parity/Victim" - " error.\n"); - break; - - case 0x8: - pr_cont(": Tag Snoop error.\n"); - break; - - default: - goto wrong_ic_mce; - break; - } - } - } else - goto wrong_ic_mce; - } else - goto wrong_ic_mce; - - return; - -wrong_ic_mce: - pr_warning("Corrupted IC MCE info?\n"); -} - -static void amd_decode_bu_mce(u64 mc2_status) -{ - u32 ec = mc2_status & 0xffff; - u32 xec = (mc2_status >> 16) & 0xf; - - pr_emerg("Bus Unit Error"); - - if (xec == 0x1) - pr_cont(" in the write data buffers.\n"); - else if (xec == 0x3) - pr_cont(" in the victim data buffers.\n"); - else if (xec == 0x2 && MEM_ERROR(ec)) - pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec)); - else if (xec == 0x0) { - if (TLB_ERROR(ec)) - pr_cont(": %s error in a Page Descriptor Cache or " - "Guest TLB.\n", TT_MSG(ec)); - else if (BUS_ERROR(ec)) - pr_cont(": %s/ECC error in data read from NB: %s.\n", - RRRR_MSG(ec), PP_MSG(ec)); - else if (MEM_ERROR(ec)) { - u8 rrrr = (ec >> 4) & 0xf; - - if (rrrr >= 0x7) - pr_cont(": %s error during data copyback.\n", - RRRR_MSG(ec)); - else if (rrrr <= 0x1) - pr_cont(": %s parity/ECC error during data " - "access from L2.\n", RRRR_MSG(ec)); - else - goto wrong_bu_mce; - } else - goto wrong_bu_mce; - } else - goto wrong_bu_mce; - - return; - -wrong_bu_mce: - pr_warning("Corrupted BU MCE info?\n"); -} - -static void amd_decode_ls_mce(u64 mc3_status) -{ - u32 ec = mc3_status & 0xffff; - u32 xec = (mc3_status >> 16) & 0xf; - - pr_emerg("Load Store Error"); - - if (xec == 0x0) { - u8 rrrr = (ec >> 4) & 0xf; - - if (!BUS_ERROR(ec) || (rrrr != 0x3 && rrrr != 0x4)) - goto wrong_ls_mce; - - pr_cont(" during %s.\n", RRRR_MSG(ec)); - } - return; - -wrong_ls_mce: - pr_warning("Corrupted LS MCE info?\n"); -} - -void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors) -{ - u32 ec = ERROR_CODE(regs->nbsl); - - if (!handle_errors) - return; - - /* - * GART TLB error reporting is disabled by default. Bail out early. - */ - if (TLB_ERROR(ec) && !report_gart_errors) - return; - - pr_emerg("Northbridge Error, node %d", node_id); - - /* - * F10h, revD can disable ErrCpu[3:0] so check that first and also the - * value encoding has changed so interpret those differently - */ - if ((boot_cpu_data.x86 == 0x10) && - (boot_cpu_data.x86_model > 7)) { - if (regs->nbsh & K8_NBSH_ERR_CPU_VAL) - pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf)); - } else { - u8 assoc_cpus = regs->nbsh & 0xf; - - if (assoc_cpus > 0) - pr_cont(", core: %d", fls(assoc_cpus) - 1); - - pr_cont("\n"); - } - - pr_emerg("%s.\n", EXT_ERR_MSG(regs->nbsl)); - - if (BUS_ERROR(ec) && nb_bus_decoder) - nb_bus_decoder(node_id, regs); -} -EXPORT_SYMBOL_GPL(amd_decode_nb_mce); - -static void amd_decode_fr_mce(u64 mc5_status) -{ - /* we have only one error signature so match all fields at once. */ - if ((mc5_status & 0xffff) == 0x0f0f) - pr_emerg(" FR Error: CPU Watchdog timer expire.\n"); - else - pr_warning("Corrupted FR MCE info?\n"); -} - -static inline void amd_decode_err_code(unsigned int ec) -{ - if (TLB_ERROR(ec)) { - pr_emerg("Transaction: %s, Cache Level %s\n", - TT_MSG(ec), LL_MSG(ec)); - } else if (MEM_ERROR(ec)) { - pr_emerg("Transaction: %s, Type: %s, Cache Level: %s", - RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec)); - } else if (BUS_ERROR(ec)) { - pr_emerg("Transaction type: %s(%s), %s, Cache Level: %s, " - "Participating Processor: %s\n", - RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec), - PP_MSG(ec)); - } else - pr_warning("Huh? Unknown MCE error 0x%x\n", ec); -} - -static int amd_decode_mce(struct notifier_block *nb, unsigned long val, - void *data) -{ - struct mce *m = (struct mce *)data; - struct err_regs regs; - int node, ecc; - - pr_emerg("MC%d_STATUS: ", m->bank); - - pr_cont("%sorrected error, other errors lost: %s, " - "CPU context corrupt: %s", - ((m->status & MCI_STATUS_UC) ? "Unc" : "C"), - ((m->status & MCI_STATUS_OVER) ? "yes" : "no"), - ((m->status & MCI_STATUS_PCC) ? "yes" : "no")); - - /* do the two bits[14:13] together */ - ecc = (m->status >> 45) & 0x3; - if (ecc) - pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U")); - - pr_cont("\n"); - - switch (m->bank) { - case 0: - amd_decode_dc_mce(m->status); - break; - - case 1: - amd_decode_ic_mce(m->status); - break; - - case 2: - amd_decode_bu_mce(m->status); - break; - - case 3: - amd_decode_ls_mce(m->status); - break; - - case 4: - regs.nbsl = (u32) m->status; - regs.nbsh = (u32)(m->status >> 32); - regs.nbeal = (u32) m->addr; - regs.nbeah = (u32)(m->addr >> 32); - node = amd_get_nb_id(m->extcpu); - - amd_decode_nb_mce(node, ®s, 1); - break; - - case 5: - amd_decode_fr_mce(m->status); - break; - - default: - break; - } - - amd_decode_err_code(m->status & 0xffff); - - return NOTIFY_STOP; -} - -static struct notifier_block amd_mce_dec_nb = { - .notifier_call = amd_decode_mce, -}; - -static int __init mce_amd_init(void) -{ - /* - * We can decode MCEs for K8, F10h and F11h CPUs: - */ - if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) - return 0; - - if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) - return 0; - - atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); - - return 0; -} -early_initcall(mce_amd_init); - -#ifdef MODULE -static void __exit mce_amd_exit(void) -{ - atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb); -} - -MODULE_DESCRIPTION("AMD MCE decoder"); -MODULE_ALIAS("edac-mce-amd"); -MODULE_LICENSE("GPL"); -module_exit(mce_amd_exit); -#endif diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c index 7e1374afd96..be4b075c309 100644 --- a/drivers/edac/edac_module.c +++ b/drivers/edac/edac_module.c @@ -27,15 +27,6 @@ EXPORT_SYMBOL_GPL(edac_debug_level); struct workqueue_struct *edac_workqueue; /* - * sysfs object: /sys/devices/system/edac - * need to export to other files in this modules - */ -static struct sysdev_class edac_class = { - .name = "edac", -}; -static int edac_class_valid; - -/* * edac_op_state_to_string() */ char *edac_op_state_to_string(int opstate) @@ -55,60 +46,6 @@ char *edac_op_state_to_string(int opstate) } /* - * edac_get_edac_class() - * - * return pointer to the edac class of 'edac' - */ -struct sysdev_class *edac_get_edac_class(void) -{ - struct sysdev_class *classptr = NULL; - - if (edac_class_valid) - classptr = &edac_class; - - return classptr; -} - -/* - * edac_register_sysfs_edac_name() - * - * register the 'edac' into /sys/devices/system - * - * return: - * 0 success - * !0 error - */ -static int edac_register_sysfs_edac_name(void) -{ - int err; - - /* create the /sys/devices/system/edac directory */ - err = sysdev_class_register(&edac_class); - - if (err) { - debugf1("%s() error=%d\n", __func__, err); - return err; - } - - edac_class_valid = 1; - return 0; -} - -/* - * sysdev_class_unregister() - * - * unregister the 'edac' from /sys/devices/system - */ -static void edac_unregister_sysfs_edac_name(void) -{ - /* only if currently registered, then unregister it */ - if (edac_class_valid) - sysdev_class_unregister(&edac_class); - - edac_class_valid = 0; -} - -/* * edac_workqueue_setup * initialize the edac work queue for polling operations */ @@ -154,21 +91,11 @@ static int __init edac_init(void) edac_pci_clear_parity_errors(); /* - * perform the registration of the /sys/devices/system/edac class object - */ - if (edac_register_sysfs_edac_name()) { - edac_printk(KERN_ERR, EDAC_MC, - "Error initializing 'edac' kobject\n"); - err = -ENODEV; - goto error; - } - - /* * now set up the mc_kset under the edac class object */ err = edac_sysfs_setup_mc_kset(); if (err) - goto sysfs_setup_fail; + goto error; /* Setup/Initialize the workq for this core */ err = edac_workqueue_setup(); @@ -183,9 +110,6 @@ static int __init edac_init(void) workq_fail: edac_sysfs_teardown_mc_kset(); -sysfs_setup_fail: - edac_unregister_sysfs_edac_name(); - error: return err; } @@ -201,7 +125,6 @@ static void __exit edac_exit(void) /* tear down the various subsystems */ edac_workqueue_teardown(); edac_sysfs_teardown_mc_kset(); - edac_unregister_sysfs_edac_name(); } /* diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index 233d4798c3a..17aabb7b90e 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h @@ -42,7 +42,6 @@ extern void edac_device_unregister_sysfs_main_kobj( struct edac_device_ctl_info *edac_dev); extern int edac_device_create_sysfs(struct edac_device_ctl_info *edac_dev); extern void edac_device_remove_sysfs(struct edac_device_ctl_info *edac_dev); -extern struct sysdev_class *edac_get_edac_class(void); /* edac core workqueue: single CPU mode */ extern struct workqueue_struct *edac_workqueue; diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index c39697df9cb..023b01cb517 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c @@ -7,7 +7,7 @@ * */ #include <linux/module.h> -#include <linux/sysdev.h> +#include <linux/edac.h> #include <linux/slab.h> #include <linux/ctype.h> @@ -354,7 +354,7 @@ static int edac_pci_main_kobj_setup(void) /* First time, so create the main kobject and its * controls and atributes */ - edac_class = edac_get_edac_class(); + edac_class = edac_get_sysfs_class(); if (edac_class == NULL) { debugf1("%s() no edac_class\n", __func__); err = -ENODEV; @@ -368,7 +368,7 @@ static int edac_pci_main_kobj_setup(void) if (!try_module_get(THIS_MODULE)) { debugf1("%s() try_module_get() failed\n", __func__); err = -ENODEV; - goto decrement_count_fail; + goto mod_get_fail; } edac_pci_top_main_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); @@ -403,6 +403,9 @@ kobject_init_and_add_fail: kzalloc_fail: module_put(THIS_MODULE); +mod_get_fail: + edac_put_sysfs_class(); + decrement_count_fail: /* if are on this error exit, nothing to tear down */ atomic_dec(&edac_pci_sysfs_refcount); @@ -429,6 +432,7 @@ static void edac_pci_main_kobj_teardown(void) __func__); kobject_put(edac_pci_top_main_kobj); } + edac_put_sysfs_class(); } /* diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c index 20b428aa155..aab970760b7 100644 --- a/drivers/edac/edac_stub.c +++ b/drivers/edac/edac_stub.c @@ -3,10 +3,13 @@ * * Author: Dave Jiang <djiang@mvista.com> * - * 2007 (c) MontaVista Software, Inc. This file is licensed under - * the terms of the GNU General Public License version 2. This program - * is licensed "as is" without any warranty of any kind, whether express - * or implied. + * 2007 (c) MontaVista Software, Inc. + * 2010 (c) Advanced Micro Devices Inc. + * Borislav Petkov <borislav.petkov@amd.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. * */ #include <linux/module.h> @@ -23,6 +26,8 @@ EXPORT_SYMBOL_GPL(edac_handlers); int edac_err_assert = 0; EXPORT_SYMBOL_GPL(edac_err_assert); +static atomic_t edac_class_valid = ATOMIC_INIT(0); + /* * called to determine if there is an EDAC driver interested in * knowing an event (such as NMI) occurred @@ -44,3 +49,41 @@ void edac_atomic_assert_error(void) edac_err_assert++; } EXPORT_SYMBOL_GPL(edac_atomic_assert_error); + +/* + * sysfs object: /sys/devices/system/edac + * need to export to other files + */ +struct sysdev_class edac_class = { + .name = "edac", +}; +EXPORT_SYMBOL_GPL(edac_class); + +/* return pointer to the 'edac' node in sysfs */ +struct sysdev_class *edac_get_sysfs_class(void) +{ + int err = 0; + + if (atomic_read(&edac_class_valid)) + goto out; + + /* create the /sys/devices/system/edac directory */ + err = sysdev_class_register(&edac_class); + if (err) { + printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n"); + return NULL; + } + +out: + atomic_inc(&edac_class_valid); + return &edac_class; +} +EXPORT_SYMBOL_GPL(edac_get_sysfs_class); + +void edac_put_sysfs_class(void) +{ + /* last user unregisters it */ + if (atomic_dec_and_test(&edac_class_valid)) + sysdev_class_unregister(&edac_class); +} +EXPORT_SYMBOL_GPL(edac_put_sysfs_class); diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index e0187d16dd7..0fd5b85a0f7 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -1140,6 +1140,7 @@ static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = { ATTR_COUNTER(0), ATTR_COUNTER(1), ATTR_COUNTER(2), + { .attr = { .name = NULL } } }; static struct mcidev_sysfs_group i7core_udimm_counters = { diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c new file mode 100644 index 00000000000..c0181093b49 --- /dev/null +++ b/drivers/edac/mce_amd.c @@ -0,0 +1,680 @@ +#include <linux/module.h> +#include <linux/slab.h> + +#include "mce_amd.h" + +static struct amd_decoder_ops *fam_ops; + +static u8 nb_err_cpumask = 0xf; + +static bool report_gart_errors; +static void (*nb_bus_decoder)(int node_id, struct mce *m, u32 nbcfg); + +void amd_report_gart_errors(bool v) +{ + report_gart_errors = v; +} +EXPORT_SYMBOL_GPL(amd_report_gart_errors); + +void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32)) +{ + nb_bus_decoder = f; +} +EXPORT_SYMBOL_GPL(amd_register_ecc_decoder); + +void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32)) +{ + if (nb_bus_decoder) { + WARN_ON(nb_bus_decoder != f); + + nb_bus_decoder = NULL; + } +} +EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder); + +/* + * string representation for the different MCA reported error types, see F3x48 + * or MSR0000_0411. + */ + +/* transaction type */ +const char *tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" }; +EXPORT_SYMBOL_GPL(tt_msgs); + +/* cache level */ +const char *ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" }; +EXPORT_SYMBOL_GPL(ll_msgs); + +/* memory transaction type */ +const char *rrrr_msgs[] = { + "GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP" +}; +EXPORT_SYMBOL_GPL(rrrr_msgs); + +/* participating processor */ +const char *pp_msgs[] = { "SRC", "RES", "OBS", "GEN" }; +EXPORT_SYMBOL_GPL(pp_msgs); + +/* request timeout */ +const char *to_msgs[] = { "no timeout", "timed out" }; +EXPORT_SYMBOL_GPL(to_msgs); + +/* memory or i/o */ +const char *ii_msgs[] = { "MEM", "RESV", "IO", "GEN" }; +EXPORT_SYMBOL_GPL(ii_msgs); + +static const char *f10h_nb_mce_desc[] = { + "HT link data error", + "Protocol error (link, L3, probe filter, etc.)", + "Parity error in NB-internal arrays", + "Link Retry due to IO link transmission error", + "L3 ECC data cache error", + "ECC error in L3 cache tag", + "L3 LRU parity bits error", + "ECC Error in the Probe Filter directory" +}; + +static bool f12h_dc_mce(u16 ec) +{ + bool ret = false; + + if (MEM_ERROR(ec)) { + u8 ll = ec & 0x3; + ret = true; + + if (ll == LL_L2) + pr_cont("during L1 linefill from L2.\n"); + else if (ll == LL_L1) + pr_cont("Data/Tag %s error.\n", RRRR_MSG(ec)); + else + ret = false; + } + return ret; +} + +static bool f10h_dc_mce(u16 ec) +{ + u8 r4 = (ec >> 4) & 0xf; + u8 ll = ec & 0x3; + + if (r4 == R4_GEN && ll == LL_L1) { + pr_cont("during data scrub.\n"); + return true; + } + return f12h_dc_mce(ec); +} + +static bool k8_dc_mce(u16 ec) +{ + if (BUS_ERROR(ec)) { + pr_cont("during system linefill.\n"); + return true; + } + + return f10h_dc_mce(ec); +} + +static bool f14h_dc_mce(u16 ec) +{ + u8 r4 = (ec >> 4) & 0xf; + u8 ll = ec & 0x3; + u8 tt = (ec >> 2) & 0x3; + u8 ii = tt; + bool ret = true; + + if (MEM_ERROR(ec)) { + + if (tt != TT_DATA || ll != LL_L1) + return false; + + switch (r4) { + case R4_DRD: + case R4_DWR: + pr_cont("Data/Tag parity error due to %s.\n", + (r4 == R4_DRD ? "load/hw prf" : "store")); + break; + case R4_EVICT: + pr_cont("Copyback parity error on a tag miss.\n"); + break; + case R4_SNOOP: + pr_cont("Tag parity error during snoop.\n"); + break; + default: + ret = false; + } + } else if (BUS_ERROR(ec)) { + + if ((ii != II_MEM && ii != II_IO) || ll != LL_LG) + return false; + + pr_cont("System read data error on a "); + + switch (r4) { + case R4_RD: + pr_cont("TLB reload.\n"); + break; + case R4_DWR: + pr_cont("store.\n"); + break; + case R4_DRD: + pr_cont("load.\n"); + break; + default: + ret = false; + } + } else { + ret = false; + } + + return ret; +} + +static void amd_decode_dc_mce(struct mce *m) +{ + u16 ec = m->status & 0xffff; + u8 xec = (m->status >> 16) & 0xf; + + pr_emerg(HW_ERR "Data Cache Error: "); + + /* TLB error signatures are the same across families */ + if (TLB_ERROR(ec)) { + u8 tt = (ec >> 2) & 0x3; + + if (tt == TT_DATA) { + pr_cont("%s TLB %s.\n", LL_MSG(ec), + (xec ? "multimatch" : "parity error")); + return; + } + else + goto wrong_dc_mce; + } + + if (!fam_ops->dc_mce(ec)) + goto wrong_dc_mce; + + return; + +wrong_dc_mce: + pr_emerg(HW_ERR "Corrupted DC MCE info?\n"); +} + +static bool k8_ic_mce(u16 ec) +{ + u8 ll = ec & 0x3; + u8 r4 = (ec >> 4) & 0xf; + bool ret = true; + + if (!MEM_ERROR(ec)) + return false; + + if (ll == 0x2) + pr_cont("during a linefill from L2.\n"); + else if (ll == 0x1) { + switch (r4) { + case R4_IRD: + pr_cont("Parity error during data load.\n"); + break; + + case R4_EVICT: + pr_cont("Copyback Parity/Victim error.\n"); + break; + + case R4_SNOOP: + pr_cont("Tag Snoop error.\n"); + break; + + default: + ret = false; + break; + } + } else + ret = false; + + return ret; +} + +static bool f14h_ic_mce(u16 ec) +{ + u8 ll = ec & 0x3; + u8 tt = (ec >> 2) & 0x3; + u8 r4 = (ec >> 4) & 0xf; + bool ret = true; + + if (MEM_ERROR(ec)) { + if (tt != 0 || ll != 1) + ret = false; + + if (r4 == R4_IRD) + pr_cont("Data/tag array parity error for a tag hit.\n"); + else if (r4 == R4_SNOOP) + pr_cont("Tag error during snoop/victimization.\n"); + else + ret = false; + } + return ret; +} + +static void amd_decode_ic_mce(struct mce *m) +{ + u16 ec = m->status & 0xffff; + u8 xec = (m->status >> 16) & 0xf; + + pr_emerg(HW_ERR "Instruction Cache Error: "); + + if (TLB_ERROR(ec)) + pr_cont("%s TLB %s.\n", LL_MSG(ec), + (xec ? "multimatch" : "parity error")); + else if (BUS_ERROR(ec)) { + bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58))); + + pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read")); + } else if (fam_ops->ic_mce(ec)) + ; + else + pr_emerg(HW_ERR "Corrupted IC MCE info?\n"); +} + +static void amd_decode_bu_mce(struct mce *m) +{ + u32 ec = m->status & 0xffff; + u32 xec = (m->status >> 16) & 0xf; + + pr_emerg(HW_ERR "Bus Unit Error"); + + if (xec == 0x1) + pr_cont(" in the write data buffers.\n"); + else if (xec == 0x3) + pr_cont(" in the victim data buffers.\n"); + else if (xec == 0x2 && MEM_ERROR(ec)) + pr_cont(": %s error in the L2 cache tags.\n", RRRR_MSG(ec)); + else if (xec == 0x0) { + if (TLB_ERROR(ec)) + pr_cont(": %s error in a Page Descriptor Cache or " + "Guest TLB.\n", TT_MSG(ec)); + else if (BUS_ERROR(ec)) + pr_cont(": %s/ECC error in data read from NB: %s.\n", + RRRR_MSG(ec), PP_MSG(ec)); + else if (MEM_ERROR(ec)) { + u8 rrrr = (ec >> 4) & 0xf; + + if (rrrr >= 0x7) + pr_cont(": %s error during data copyback.\n", + RRRR_MSG(ec)); + else if (rrrr <= 0x1) + pr_cont(": %s parity/ECC error during data " + "access from L2.\n", RRRR_MSG(ec)); + else + goto wrong_bu_mce; + } else + goto wrong_bu_mce; + } else + goto wrong_bu_mce; + + return; + +wrong_bu_mce: + pr_emerg(HW_ERR "Corrupted BU MCE info?\n"); +} + +static void amd_decode_ls_mce(struct mce *m) +{ + u16 ec = m->status & 0xffff; + u8 xec = (m->status >> 16) & 0xf; + + if (boot_cpu_data.x86 == 0x14) { + pr_emerg("You shouldn't be seeing an LS MCE on this cpu family," + " please report on LKML.\n"); + return; + } + + pr_emerg(HW_ERR "Load Store Error"); + + if (xec == 0x0) { + u8 r4 = (ec >> 4) & 0xf; + + if (!BUS_ERROR(ec) || (r4 != R4_DRD && r4 != R4_DWR)) + goto wrong_ls_mce; + + pr_cont(" during %s.\n", RRRR_MSG(ec)); + } else + goto wrong_ls_mce; + + return; + +wrong_ls_mce: + pr_emerg(HW_ERR "Corrupted LS MCE info?\n"); +} + +static bool k8_nb_mce(u16 ec, u8 xec) +{ + bool ret = true; + + switch (xec) { + case 0x1: + pr_cont("CRC error detected on HT link.\n"); + break; + + case 0x5: + pr_cont("Invalid GART PTE entry during GART table walk.\n"); + break; + + case 0x6: + pr_cont("Unsupported atomic RMW received from an IO link.\n"); + break; + + case 0x0: + case 0x8: + if (boot_cpu_data.x86 == 0x11) + return false; + + pr_cont("DRAM ECC error detected on the NB.\n"); + break; + + case 0xd: + pr_cont("Parity error on the DRAM addr/ctl signals.\n"); + break; + + default: + ret = false; + break; + } + + return ret; +} + +static bool f10h_nb_mce(u16 ec, u8 xec) +{ + bool ret = true; + u8 offset = 0; + + if (k8_nb_mce(ec, xec)) + return true; + + switch(xec) { + case 0xa ... 0xc: + offset = 10; + break; + + case 0xe: + offset = 11; + break; + + case 0xf: + if (TLB_ERROR(ec)) + pr_cont("GART Table Walk data error.\n"); + else if (BUS_ERROR(ec)) + pr_cont("DMA Exclusion Vector Table Walk error.\n"); + else + ret = false; + + goto out; + break; + + case 0x1c ... 0x1f: + offset = 24; + break; + + default: + ret = false; + + goto out; + break; + } + + pr_cont("%s.\n", f10h_nb_mce_desc[xec - offset]); + +out: + return ret; +} + +static bool nb_noop_mce(u16 ec, u8 xec) +{ + return false; +} + +void amd_decode_nb_mce(int node_id, struct mce *m, u32 nbcfg) +{ + u8 xec = (m->status >> 16) & 0x1f; + u16 ec = m->status & 0xffff; + u32 nbsh = (u32)(m->status >> 32); + + pr_emerg(HW_ERR "Northbridge Error, node %d: ", node_id); + + /* + * F10h, revD can disable ErrCpu[3:0] so check that first and also the + * value encoding has changed so interpret those differently + */ + if ((boot_cpu_data.x86 == 0x10) && + (boot_cpu_data.x86_model > 7)) { + if (nbsh & K8_NBSH_ERR_CPU_VAL) + pr_cont(", core: %u", (u8)(nbsh & nb_err_cpumask)); + } else { + u8 assoc_cpus = nbsh & nb_err_cpumask; + + if (assoc_cpus > 0) + pr_cont(", core: %d", fls(assoc_cpus) - 1); + } + + switch (xec) { + case 0x2: + pr_cont("Sync error (sync packets on HT link detected).\n"); + return; + + case 0x3: + pr_cont("HT Master abort.\n"); + return; + + case 0x4: + pr_cont("HT Target abort.\n"); + return; + + case 0x7: + pr_cont("NB Watchdog timeout.\n"); + return; + + case 0x9: + pr_cont("SVM DMA Exclusion Vector error.\n"); + return; + + default: + break; + } + + if (!fam_ops->nb_mce(ec, xec)) + goto wrong_nb_mce; + + if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10) + if ((xec == 0x8 || xec == 0x0) && nb_bus_decoder) + nb_bus_decoder(node_id, m, nbcfg); + + return; + +wrong_nb_mce: + pr_emerg(HW_ERR "Corrupted NB MCE info?\n"); +} +EXPORT_SYMBOL_GPL(amd_decode_nb_mce); + +static void amd_decode_fr_mce(struct mce *m) +{ + if (boot_cpu_data.x86 == 0xf || + boot_cpu_data.x86 == 0x11) + goto wrong_fr_mce; + + /* we have only one error signature so match all fields at once. */ + if ((m->status & 0xffff) == 0x0f0f) { + pr_emerg(HW_ERR "FR Error: CPU Watchdog timer expire.\n"); + return; + } + +wrong_fr_mce: + pr_emerg(HW_ERR "Corrupted FR MCE info?\n"); +} + +static inline void amd_decode_err_code(u16 ec) +{ + if (TLB_ERROR(ec)) { + pr_emerg(HW_ERR "Transaction: %s, Cache Level: %s\n", + TT_MSG(ec), LL_MSG(ec)); + } else if (MEM_ERROR(ec)) { + pr_emerg(HW_ERR "Transaction: %s, Type: %s, Cache Level: %s\n", + RRRR_MSG(ec), TT_MSG(ec), LL_MSG(ec)); + } else if (BUS_ERROR(ec)) { + pr_emerg(HW_ERR "Transaction: %s (%s), %s, Cache Level: %s, " + "Participating Processor: %s\n", + RRRR_MSG(ec), II_MSG(ec), TO_MSG(ec), LL_MSG(ec), + PP_MSG(ec)); + } else + pr_emerg(HW_ERR "Huh? Unknown MCE error 0x%x\n", ec); +} + +/* + * Filter out unwanted MCE signatures here. + */ +static bool amd_filter_mce(struct mce *m) +{ + u8 xec = (m->status >> 16) & 0x1f; + + /* + * NB GART TLB error reporting is disabled by default. + */ + if (m->bank == 4 && xec == 0x5 && !report_gart_errors) + return true; + + return false; +} + +int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) +{ + struct mce *m = (struct mce *)data; + int node, ecc; + + if (amd_filter_mce(m)) + return NOTIFY_STOP; + + pr_emerg(HW_ERR "MC%d_STATUS: ", m->bank); + + pr_cont("%sorrected error, other errors lost: %s, " + "CPU context corrupt: %s", + ((m->status & MCI_STATUS_UC) ? "Unc" : "C"), + ((m->status & MCI_STATUS_OVER) ? "yes" : "no"), + ((m->status & MCI_STATUS_PCC) ? "yes" : "no")); + + /* do the two bits[14:13] together */ + ecc = (m->status >> 45) & 0x3; + if (ecc) + pr_cont(", %sECC Error", ((ecc == 2) ? "C" : "U")); + + pr_cont("\n"); + + switch (m->bank) { + case 0: + amd_decode_dc_mce(m); + break; + + case 1: + amd_decode_ic_mce(m); + break; + + case 2: + amd_decode_bu_mce(m); + break; + + case 3: + amd_decode_ls_mce(m); + break; + + case 4: + node = amd_get_nb_id(m->extcpu); + amd_decode_nb_mce(node, m, 0); + break; + + case 5: + amd_decode_fr_mce(m); + break; + + default: + break; + } + + amd_decode_err_code(m->status & 0xffff); + + return NOTIFY_STOP; +} +EXPORT_SYMBOL_GPL(amd_decode_mce); + +static struct notifier_block amd_mce_dec_nb = { + .notifier_call = amd_decode_mce, +}; + +static int __init mce_amd_init(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) + return 0; + + if ((boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x12) && + (boot_cpu_data.x86 != 0x14 || boot_cpu_data.x86_model > 0xf)) + return 0; + + fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); + if (!fam_ops) + return -ENOMEM; + + switch (boot_cpu_data.x86) { + case 0xf: + fam_ops->dc_mce = k8_dc_mce; + fam_ops->ic_mce = k8_ic_mce; + fam_ops->nb_mce = k8_nb_mce; + break; + + case 0x10: + fam_ops->dc_mce = f10h_dc_mce; + fam_ops->ic_mce = k8_ic_mce; + fam_ops->nb_mce = f10h_nb_mce; + break; + + case 0x11: + fam_ops->dc_mce = k8_dc_mce; + fam_ops->ic_mce = k8_ic_mce; + fam_ops->nb_mce = f10h_nb_mce; + break; + + case 0x12: + fam_ops->dc_mce = f12h_dc_mce; + fam_ops->ic_mce = k8_ic_mce; + fam_ops->nb_mce = nb_noop_mce; + break; + + case 0x14: + nb_err_cpumask = 0x3; + fam_ops->dc_mce = f14h_dc_mce; + fam_ops->ic_mce = f14h_ic_mce; + fam_ops->nb_mce = nb_noop_mce; + break; + + default: + printk(KERN_WARNING "Huh? What family is that: %d?!\n", + boot_cpu_data.x86); + kfree(fam_ops); + return -EINVAL; + } + + pr_info("MCE: In-kernel MCE decoding enabled.\n"); + + atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb); + + return 0; +} +early_initcall(mce_amd_init); + +#ifdef MODULE +static void __exit mce_amd_exit(void) +{ + atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb); + kfree(fam_ops); +} + +MODULE_DESCRIPTION("AMD MCE decoder"); +MODULE_ALIAS("edac-mce-amd"); +MODULE_LICENSE("GPL"); +module_exit(mce_amd_exit); +#endif diff --git a/drivers/edac/edac_mce_amd.h b/drivers/edac/mce_amd.h index df23ee065f7..35f6e0e3b29 100644 --- a/drivers/edac/edac_mce_amd.h +++ b/drivers/edac/mce_amd.h @@ -1,11 +1,14 @@ #ifndef _EDAC_MCE_AMD_H #define _EDAC_MCE_AMD_H +#include <linux/notifier.h> + #include <asm/mce.h> +#define BIT_64(n) (U64_C(1) << (n)) + #define ERROR_CODE(x) ((x) & 0xffff) #define EXT_ERROR_CODE(x) (((x) >> 16) & 0x1f) -#define EXT_ERR_MSG(x) ext_msgs[EXT_ERROR_CODE(x)] #define LOW_SYNDROME(x) (((x) >> 15) & 0xff) #define HIGH_SYNDROME(x) (((x) >> 24) & 0xff) @@ -20,13 +23,14 @@ #define II_MSG(x) ii_msgs[II(x)] #define LL(x) (((x) >> 0) & 0x3) #define LL_MSG(x) ll_msgs[LL(x)] -#define RRRR(x) (((x) >> 4) & 0xf) -#define RRRR_MSG(x) rrrr_msgs[RRRR(x)] #define TO(x) (((x) >> 8) & 0x1) #define TO_MSG(x) to_msgs[TO(x)] #define PP(x) (((x) >> 9) & 0x3) #define PP_MSG(x) pp_msgs[PP(x)] +#define RRRR(x) (((x) >> 4) & 0xf) +#define RRRR_MSG(x) ((RRRR(x) < 9) ? rrrr_msgs[RRRR(x)] : "Wrong R4!") + #define K8_NBSH 0x4C #define K8_NBSH_VALID_BIT BIT(31) @@ -41,13 +45,45 @@ #define K8_NBSH_UECC BIT(13) #define K8_NBSH_ERR_SCRUBER BIT(8) +enum tt_ids { + TT_INSTR = 0, + TT_DATA, + TT_GEN, + TT_RESV, +}; + +enum ll_ids { + LL_RESV = 0, + LL_L1, + LL_L2, + LL_LG, +}; + +enum ii_ids { + II_MEM = 0, + II_RESV, + II_IO, + II_GEN, +}; + +enum rrrr_ids { + R4_GEN = 0, + R4_RD, + R4_WR, + R4_DRD, + R4_DWR, + R4_IRD, + R4_PREF, + R4_EVICT, + R4_SNOOP, +}; + extern const char *tt_msgs[]; extern const char *ll_msgs[]; extern const char *rrrr_msgs[]; extern const char *pp_msgs[]; extern const char *to_msgs[]; extern const char *ii_msgs[]; -extern const char *ext_msgs[]; /* * relevant NB regs @@ -60,10 +96,19 @@ struct err_regs { u32 nbeal; }; +/* + * per-family decoder ops + */ +struct amd_decoder_ops { + bool (*dc_mce)(u16); + bool (*ic_mce)(u16); + bool (*nb_mce)(u16, u8); +}; void amd_report_gart_errors(bool); -void amd_register_ecc_decoder(void (*f)(int, struct err_regs *)); -void amd_unregister_ecc_decoder(void (*f)(int, struct err_regs *)); -void amd_decode_nb_mce(int, struct err_regs *, int); +void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32)); +void amd_unregister_ecc_decoder(void (*f)(int, struct mce *, u32)); +void amd_decode_nb_mce(int, struct mce *, u32); +int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data); #endif /* _EDAC_MCE_AMD_H */ diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c new file mode 100644 index 00000000000..8d0688f36d4 --- /dev/null +++ b/drivers/edac/mce_amd_inj.c @@ -0,0 +1,171 @@ +/* + * A simple MCE injection facility for testing the MCE decoding code. This + * driver should be built as module so that it can be loaded on production + * kernels for testing purposes. + * + * This file may be distributed under the terms of the GNU General Public + * License version 2. + * + * Copyright (c) 2010: Borislav Petkov <borislav.petkov@amd.com> + * Advanced Micro Devices Inc. + */ + +#include <linux/kobject.h> +#include <linux/sysdev.h> +#include <linux/edac.h> +#include <asm/mce.h> + +#include "mce_amd.h" + +struct edac_mce_attr { + struct attribute attr; + ssize_t (*show) (struct kobject *kobj, struct edac_mce_attr *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, struct edac_mce_attr *attr, + const char *buf, size_t count); +}; + +#define EDAC_MCE_ATTR(_name, _mode, _show, _store) \ +static struct edac_mce_attr mce_attr_##_name = __ATTR(_name, _mode, _show, _store) + +static struct kobject *mce_kobj; + +/* + * Collect all the MCi_XXX settings + */ +static struct mce i_mce; + +#define MCE_INJECT_STORE(reg) \ +static ssize_t edac_inject_##reg##_store(struct kobject *kobj, \ + struct edac_mce_attr *attr, \ + const char *data, size_t count)\ +{ \ + int ret = 0; \ + unsigned long value; \ + \ + ret = strict_strtoul(data, 16, &value); \ + if (ret < 0) \ + printk(KERN_ERR "Error writing MCE " #reg " field.\n"); \ + \ + i_mce.reg = value; \ + \ + return count; \ +} + +MCE_INJECT_STORE(status); +MCE_INJECT_STORE(misc); +MCE_INJECT_STORE(addr); + +#define MCE_INJECT_SHOW(reg) \ +static ssize_t edac_inject_##reg##_show(struct kobject *kobj, \ + struct edac_mce_attr *attr, \ + char *buf) \ +{ \ + return sprintf(buf, "0x%016llx\n", i_mce.reg); \ +} + +MCE_INJECT_SHOW(status); +MCE_INJECT_SHOW(misc); +MCE_INJECT_SHOW(addr); + +EDAC_MCE_ATTR(status, 0644, edac_inject_status_show, edac_inject_status_store); +EDAC_MCE_ATTR(misc, 0644, edac_inject_misc_show, edac_inject_misc_store); +EDAC_MCE_ATTR(addr, 0644, edac_inject_addr_show, edac_inject_addr_store); + +/* + * This denotes into which bank we're injecting and triggers + * the injection, at the same time. + */ +static ssize_t edac_inject_bank_store(struct kobject *kobj, + struct edac_mce_attr *attr, + const char *data, size_t count) +{ + int ret = 0; + unsigned long value; + + ret = strict_strtoul(data, 10, &value); + if (ret < 0) { + printk(KERN_ERR "Invalid bank value!\n"); + return -EINVAL; + } + + if (value > 5) { + printk(KERN_ERR "Non-existant MCE bank: %lu\n", value); + return -EINVAL; + } + + i_mce.bank = value; + + amd_decode_mce(NULL, 0, &i_mce); + + return count; +} + +static ssize_t edac_inject_bank_show(struct kobject *kobj, + struct edac_mce_attr *attr, char *buf) +{ + return sprintf(buf, "%d\n", i_mce.bank); +} + +EDAC_MCE_ATTR(bank, 0644, edac_inject_bank_show, edac_inject_bank_store); + +static struct edac_mce_attr *sysfs_attrs[] = { &mce_attr_status, &mce_attr_misc, + &mce_attr_addr, &mce_attr_bank +}; + +static int __init edac_init_mce_inject(void) +{ + struct sysdev_class *edac_class = NULL; + int i, err = 0; + + edac_class = edac_get_sysfs_class(); + if (!edac_class) + return -EINVAL; + + mce_kobj = kobject_create_and_add("mce", &edac_class->kset.kobj); + if (!mce_kobj) { + printk(KERN_ERR "Error creating a mce kset.\n"); + err = -ENOMEM; + goto err_mce_kobj; + } + + for (i = 0; i < ARRAY_SIZE(sysfs_attrs); i++) { + err = sysfs_create_file(mce_kobj, &sysfs_attrs[i]->attr); + if (err) { + printk(KERN_ERR "Error creating %s in sysfs.\n", + sysfs_attrs[i]->attr.name); + goto err_sysfs_create; + } + } + return 0; + +err_sysfs_create: + while (i-- >= 0) + sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr); + + kobject_del(mce_kobj); + +err_mce_kobj: + edac_put_sysfs_class(); + + return err; +} + +static void __exit edac_exit_mce_inject(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sysfs_attrs); i++) + sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr); + + kobject_del(mce_kobj); + + edac_put_sysfs_class(); +} + +module_init(edac_init_mce_inject); +module_exit(edac_exit_mce_inject); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>"); +MODULE_AUTHOR("AMD Inc."); +MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding"); diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c index 8528b10763e..bf184fb59a5 100644 --- a/drivers/firewire/nosy.c +++ b/drivers/firewire/nosy.c @@ -405,6 +405,7 @@ static const struct file_operations nosy_ops = { .poll = nosy_poll, .open = nosy_open, .release = nosy_release, + .llseek = noop_llseek, }; #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */ diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 1b05896648b..9dcb17d51ae 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -2840,7 +2840,7 @@ static int __devinit pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct fw_ohci *ohci; - u32 bus_options, max_receive, link_speed, version, link_enh; + u32 bus_options, max_receive, link_speed, version; u64 guid; int i, err, n_ir, n_it; size_t size; @@ -2894,23 +2894,6 @@ static int __devinit pci_probe(struct pci_dev *dev, if (param_quirks) ohci->quirks = param_quirks; - /* TI OHCI-Lynx and compatible: set recommended configuration bits. */ - if (dev->vendor == PCI_VENDOR_ID_TI) { - pci_read_config_dword(dev, PCI_CFG_TI_LinkEnh, &link_enh); - - /* adjust latency of ATx FIFO: use 1.7 KB threshold */ - link_enh &= ~TI_LinkEnh_atx_thresh_mask; - link_enh |= TI_LinkEnh_atx_thresh_1_7K; - - /* use priority arbitration for asynchronous responses */ - link_enh |= TI_LinkEnh_enab_unfair; - - /* required for aPhyEnhanceEnable to work */ - link_enh |= TI_LinkEnh_enab_accel; - - pci_write_config_dword(dev, PCI_CFG_TI_LinkEnh, link_enh); - } - ar_context_init(&ohci->ar_request_ctx, ohci, OHCI1394_AsReqRcvContextControlSet); diff --git a/drivers/firewire/ohci.h b/drivers/firewire/ohci.h index 0e6c5a46690..ef5e7336da6 100644 --- a/drivers/firewire/ohci.h +++ b/drivers/firewire/ohci.h @@ -155,12 +155,4 @@ #define OHCI1394_phy_tcode 0xe -/* TI extensions */ - -#define PCI_CFG_TI_LinkEnh 0xf4 -#define TI_LinkEnh_enab_accel 0x00000002 -#define TI_LinkEnh_enab_unfair 0x00000080 -#define TI_LinkEnh_atx_thresh_mask 0x00003000 -#define TI_LinkEnh_atx_thresh_1_7K 0x00001000 - #endif /* _FIREWIRE_OHCI_H */ diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 280c9b5ad9e..88a3ae6cd02 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -125,7 +125,7 @@ config ISCSI_IBFT_FIND config ISCSI_IBFT tristate "iSCSI Boot Firmware Table Attributes module" select ISCSI_BOOT_SYSFS - depends on ISCSI_IBFT_FIND && SCSI + depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL default n help This option enables support for detection and exposing of iSCSI diff --git a/drivers/gpio/tc35892-gpio.c b/drivers/gpio/tc35892-gpio.c index 1be6288780d..7e10c935a04 100644 --- a/drivers/gpio/tc35892-gpio.c +++ b/drivers/gpio/tc35892-gpio.c @@ -322,6 +322,9 @@ static int __devinit tc35892_gpio_probe(struct platform_device *pdev) goto out_freeirq; } + if (pdata->setup) + pdata->setup(tc35892, tc35892_gpio->chip.base); + platform_set_drvdata(pdev, tc35892_gpio); return 0; @@ -338,9 +341,14 @@ out_free: static int __devexit tc35892_gpio_remove(struct platform_device *pdev) { struct tc35892_gpio *tc35892_gpio = platform_get_drvdata(pdev); + struct tc35892 *tc35892 = tc35892_gpio->tc35892; + struct tc35892_gpio_platform_data *pdata = tc35892->pdata->gpio; int irq = platform_get_irq(pdev, 0); int ret; + if (pdata->remove) + pdata->remove(tc35892, tc35892_gpio->chip.base); + ret = gpiochip_remove(&tc35892_gpio->chip); if (ret < 0) { dev_err(tc35892_gpio->dev, diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 4cab0c6397e..7af44367262 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -73,7 +73,8 @@ source "drivers/gpu/drm/radeon/Kconfig" config DRM_I810 tristate "Intel I810" - depends on DRM && AGP && AGP_INTEL + # BKL usage in order to avoid AB-BA deadlocks, may become BROKEN_ON_SMP + depends on DRM && AGP && AGP_INTEL && BKL help Choose this option if you have an Intel I810 graphics card. If M is selected, the module will be called i810. AGP support is required @@ -86,6 +87,8 @@ choice config DRM_I830 tristate "i830 driver" + # BKL usage in order to avoid AB-BA deadlocks, i830 may get removed + depends on BKL help Choose this option if you have a system that has Intel 830M, 845G, 852GM, 855GM or 865G integrated graphics. If M is selected, the diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c index 55d03ed0500..529a0dbe9fc 100644 --- a/drivers/gpu/drm/drm_buffer.c +++ b/drivers/gpu/drm/drm_buffer.c @@ -98,8 +98,8 @@ EXPORT_SYMBOL(drm_buffer_alloc); * user_data: A pointer the data that is copied to the buffer. * size: The Number of bytes to copy. */ -extern int drm_buffer_copy_from_user(struct drm_buffer *buf, - void __user *user_data, int size) +int drm_buffer_copy_from_user(struct drm_buffer *buf, + void __user *user_data, int size) { int nr_pages = size / PAGE_SIZE + 1; int idx; @@ -163,7 +163,7 @@ void *drm_buffer_read_object(struct drm_buffer *buf, { int idx = drm_buffer_index(buf); int page = drm_buffer_page(buf); - void *obj = 0; + void *obj = NULL; if (idx + objsize <= PAGE_SIZE) { obj = &buf->data[page][idx]; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 84da748555b..ff6690f4fc8 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -284,7 +284,8 @@ EXPORT_SYMBOL(drm_exit); /** File operations structure */ static const struct file_operations drm_stub_fops = { .owner = THIS_MODULE, - .open = drm_stub_open + .open = drm_stub_open, + .llseek = noop_llseek, }; static int __init drm_core_init(void) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index bf92d07510d..5663d271906 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev, return -ENOMEM; kref_init(&obj->refcount); - kref_init(&obj->handlecount); + atomic_set(&obj->handle_count, 0); obj->size = size; atomic_inc(&dev->object_count); @@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref) } EXPORT_SYMBOL(drm_gem_object_free); -/** - * Called after the last reference to the object has been lost. - * Must be called without holding struct_mutex - * - * Frees the object - */ -void -drm_gem_object_free_unlocked(struct kref *kref) -{ - struct drm_gem_object *obj = (struct drm_gem_object *) kref; - struct drm_device *dev = obj->dev; - - if (dev->driver->gem_free_object_unlocked != NULL) - dev->driver->gem_free_object_unlocked(obj); - else if (dev->driver->gem_free_object != NULL) { - mutex_lock(&dev->struct_mutex); - dev->driver->gem_free_object(obj); - mutex_unlock(&dev->struct_mutex); - } -} -EXPORT_SYMBOL(drm_gem_object_free_unlocked); - static void drm_gem_object_ref_bug(struct kref *list_kref) { BUG(); @@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref) * called before drm_gem_object_free or we'll be touching * freed memory */ -void -drm_gem_object_handle_free(struct kref *kref) +void drm_gem_object_handle_free(struct drm_gem_object *obj) { - struct drm_gem_object *obj = container_of(kref, - struct drm_gem_object, - handlecount); struct drm_device *dev = obj->dev; /* Remove any name for this object */ @@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; drm_gem_object_reference(obj); + + mutex_lock(&obj->dev->struct_mutex); + drm_vm_open_locked(vma); + mutex_unlock(&obj->dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_open); @@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; - drm_gem_object_unreference_unlocked(obj); + mutex_lock(&obj->dev->struct_mutex); + drm_vm_close_locked(vma); + drm_gem_object_unreference(obj); + mutex_unlock(&obj->dev->struct_mutex); } EXPORT_SYMBOL(drm_gem_vm_close); diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 2ef2c782724..974e970ce3f 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data) seq_printf(m, "%6d %8zd %7d %8d\n", obj->name, obj->size, - atomic_read(&obj->handlecount.refcount), + atomic_read(&obj->handle_count), atomic_read(&obj->refcount.refcount)); return 0; } diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index fda67468e60..5df450683aa 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma) mutex_unlock(&dev->struct_mutex); } -/** - * \c close method for all virtual memory types. - * - * \param vma virtual memory area. - * - * Search the \p vma private data entry in drm_device::vmalist, unlink it, and - * free it. - */ -static void drm_vm_close(struct vm_area_struct *vma) +void drm_vm_close_locked(struct vm_area_struct *vma) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; @@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma) vma->vm_start, vma->vm_end - vma->vm_start); atomic_dec(&dev->vma_count); - mutex_lock(&dev->struct_mutex); list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { if (pt->vma == vma) { list_del(&pt->head); @@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma) break; } } +} + +/** + * \c close method for all virtual memory types. + * + * \param vma virtual memory area. + * + * Search the \p vma private data entry in drm_device::vmalist, unlink it, and + * free it. + */ +static void drm_vm_close(struct vm_area_struct *vma) +{ + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->minor->dev; + + mutex_lock(&dev->struct_mutex); + drm_vm_close_locked(vma); mutex_unlock(&dev->struct_mutex); } diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 61b4caf220f..ff33e53bbbf 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -116,9 +116,10 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) static const struct file_operations i810_buffer_fops = { .open = drm_open, .release = drm_release, - .unlocked_ioctl = drm_ioctl, + .unlocked_ioctl = i810_ioctl, .mmap = i810_mmap_buffers, .fasync = drm_fasync, + .llseek = noop_llseek, }; static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index b4250b2cac1..fe69914ce50 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -63,6 +63,7 @@ static struct drm_driver driver = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, + .llseek = noop_llseek, }, .pci_driver = { diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 671aa18415a..ca6f31ff0ee 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c @@ -118,9 +118,10 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) static const struct file_operations i830_buffer_fops = { .open = drm_open, .release = drm_release, - .unlocked_ioctl = drm_ioctl, + .unlocked_ioctl = i830_ioctl, .mmap = i830_mmap_buffers, .fasync = drm_fasync, + .llseek = noop_llseek, }; static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c index a5c66aa82f0..5b6298b24e2 100644 --- a/drivers/gpu/drm/i830/i830_drv.c +++ b/drivers/gpu/drm/i830/i830_drv.c @@ -74,6 +74,7 @@ static struct drm_driver driver = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, + .llseek = noop_llseek, }, .pci_driver = { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5e43d707678..048149748fd 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -782,6 +782,7 @@ static const struct file_operations i915_wedged_fops = { .open = i915_wedged_open, .read = i915_wedged_read, .write = i915_wedged_write, + .llseek = default_llseek, }; /* As the drm_debugfs_init() routines are called before dev->dev_private is diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 9d67b485303..2dd2c93ebfa 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1787,9 +1787,9 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) } } - div_u64(diff, diff1); + diff = div_u64(diff, diff1); ret = ((m * diff) + c); - div_u64(ret, 10); + ret = div_u64(ret, 10); dev_priv->last_count1 = total_count; dev_priv->last_time1 = now; @@ -1858,7 +1858,7 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv) /* More magic constants... */ diff = diff * 1181; - div_u64(diff, diffms * 10); + diff = div_u64(diff, diffms * 10); dev_priv->gfx_power = diff; } @@ -2231,6 +2231,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->mchdev_lock = &mchdev_lock; spin_unlock(&mchdev_lock); + /* XXX Prevent module unload due to memory corruption bugs. */ + __module_get(THIS_MODULE); + return 0; out_workqueue_free: diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 216deb57978..895ab896e33 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -170,6 +170,7 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ + INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), @@ -547,6 +548,7 @@ static struct drm_driver driver = { #ifdef CONFIG_COMPAT .compat_ioctl = i915_compat_ioctl, #endif + .llseek = noop_llseek, }, .pci_driver = { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 16fca1d1799..90b1d6753b9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = drm_gem_handle_create(file_priv, obj, &handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(obj); if (ret) { - drm_gem_object_unreference_unlocked(obj); return ret; } - /* Sink the floating reference from kref_init(handlecount) */ - drm_gem_object_handle_unreference_unlocked(obj); - args->handle = handle; return 0; } @@ -471,14 +469,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return -ENOENT; obj_priv = to_intel_bo(obj); - /* Bounds check source. - * - * XXX: This could use review for overflow issues... - */ - if (args->offset > obj->size || args->size > obj->size || - args->offset + args->size > obj->size) { - drm_gem_object_unreference_unlocked(obj); - return -EINVAL; + /* Bounds check source. */ + if (args->offset > obj->size || args->size > obj->size - args->offset) { + ret = -EINVAL; + goto err; + } + + if (!access_ok(VERIFY_WRITE, + (char __user *)(uintptr_t)args->data_ptr, + args->size)) { + ret = -EFAULT; + goto err; } if (i915_gem_object_needs_bit17_swizzle(obj)) { @@ -490,8 +491,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, file_priv); } +err: drm_gem_object_unreference_unlocked(obj); - return ret; } @@ -580,8 +581,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; - if (!access_ok(VERIFY_READ, user_data, remain)) - return -EFAULT; mutex_lock(&dev->struct_mutex); @@ -934,14 +933,17 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return -ENOENT; obj_priv = to_intel_bo(obj); - /* Bounds check destination. - * - * XXX: This could use review for overflow issues... - */ - if (args->offset > obj->size || args->size > obj->size || - args->offset + args->size > obj->size) { - drm_gem_object_unreference_unlocked(obj); - return -EINVAL; + /* Bounds check destination. */ + if (args->offset > obj->size || args->size > obj->size - args->offset) { + ret = -EINVAL; + goto err; + } + + if (!access_ok(VERIFY_READ, + (char __user *)(uintptr_t)args->data_ptr, + args->size)) { + ret = -EFAULT; + goto err; } /* We can only do the GTT pwrite on untiled buffers, as otherwise @@ -975,8 +977,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, DRM_INFO("pwrite failed %d\n", ret); #endif +err: drm_gem_object_unreference_unlocked(obj); - return ret; } @@ -2351,14 +2353,21 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) reg->obj = obj; - if (IS_GEN6(dev)) + switch (INTEL_INFO(dev)->gen) { + case 6: sandybridge_write_fence_reg(reg); - else if (IS_I965G(dev)) + break; + case 5: + case 4: i965_write_fence_reg(reg); - else if (IS_I9XX(dev)) + break; + case 3: i915_write_fence_reg(reg); - else + break; + case 2: i830_write_fence_reg(reg); + break; + } trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, obj_priv->tiling_mode); @@ -2381,22 +2390,26 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj_priv->fence_reg]; + uint32_t fence_reg; - if (IS_GEN6(dev)) { + switch (INTEL_INFO(dev)->gen) { + case 6: I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (obj_priv->fence_reg * 8), 0); - } else if (IS_I965G(dev)) { + break; + case 5: + case 4: I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); - } else { - uint32_t fence_reg; - - if (obj_priv->fence_reg < 8) - fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; + break; + case 3: + if (obj_priv->fence_reg >= 8) + fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; else - fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - - 8) * 4; + case 2: + fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; I915_WRITE(fence_reg, 0); + break; } reg->obj = NULL; @@ -3247,6 +3260,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, (int) reloc->offset, reloc->read_domains, reloc->write_domain); + drm_gem_object_unreference(target_obj); + i915_gem_object_unpin(obj); return -EINVAL; } if (reloc->write_domain & I915_GEM_DOMAIN_CPU || diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 72cae3cccad..5c428fa3e0b 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -79,6 +79,7 @@ mark_free(struct drm_i915_gem_object *obj_priv, struct list_head *unwind) { list_add(&obj_priv->evict_list, unwind); + drm_gem_object_reference(&obj_priv->base); return drm_mm_scan_add_block(obj_priv->gtt_space); } @@ -92,7 +93,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen { drm_i915_private_t *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; - struct drm_i915_gem_object *obj_priv, *tmp_obj_priv; + struct drm_i915_gem_object *obj_priv; struct list_head *render_iter, *bsd_iter; int ret = 0; @@ -165,6 +166,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen list_for_each_entry(obj_priv, &unwind_list, evict_list) { ret = drm_mm_scan_remove_block(obj_priv->gtt_space); BUG_ON(ret); + drm_gem_object_unreference(&obj_priv->base); } /* We expect the caller to unpin, evict all and try again, or give up. @@ -173,36 +175,34 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen return -ENOSPC; found: + /* drm_mm doesn't allow any other other operations while + * scanning, therefore store to be evicted objects on a + * temporary list. */ INIT_LIST_HEAD(&eviction_list); - list_for_each_entry_safe(obj_priv, tmp_obj_priv, - &unwind_list, evict_list) { + while (!list_empty(&unwind_list)) { + obj_priv = list_first_entry(&unwind_list, + struct drm_i915_gem_object, + evict_list); if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { - /* drm_mm doesn't allow any other other operations while - * scanning, therefore store to be evicted objects on a - * temporary list. */ list_move(&obj_priv->evict_list, &eviction_list); + continue; } + list_del(&obj_priv->evict_list); + drm_gem_object_unreference(&obj_priv->base); } /* Unbinding will emit any required flushes */ - list_for_each_entry_safe(obj_priv, tmp_obj_priv, - &eviction_list, evict_list) { -#if WATCH_LRU - DRM_INFO("%s: evicting %p\n", __func__, obj); -#endif - ret = i915_gem_object_unbind(&obj_priv->base); - if (ret) - return ret; + while (!list_empty(&eviction_list)) { + obj_priv = list_first_entry(&eviction_list, + struct drm_i915_gem_object, + evict_list); + if (ret == 0) + ret = i915_gem_object_unbind(&obj_priv->base); + list_del(&obj_priv->evict_list); + drm_gem_object_unreference(&obj_priv->base); } - /* The just created free hole should be on the top of the free stack - * maintained by drm_mm, so this BUG_ON actually executes in O(1). - * Furthermore all accessed data has just recently been used, so it - * should be really fast, too. */ - BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size, - alignment, 0)); - - return 0; + return ret; } int diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 2c6b98f2440..31f08581e93 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -789,16 +789,25 @@ int i915_save_state(struct drm_device *dev) dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); /* Fences */ - if (IS_I965G(dev)) { + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); + break; + case 5: + case 4: for (i = 0; i < 16; i++) dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); - } else { - for (i = 0; i < 8; i++) - dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); - + break; + case 3: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); + case 2: + for (i = 0; i < 8; i++) + dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); + break; + } return 0; @@ -815,15 +824,24 @@ int i915_restore_state(struct drm_device *dev) I915_WRITE(HWS_PGA, dev_priv->saveHWS); /* Fences */ - if (IS_I965G(dev)) { + switch (INTEL_INFO(dev)->gen) { + case 6: + for (i = 0; i < 16; i++) + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); + break; + case 5: + case 4: for (i = 0; i < 16; i++) I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); - } else { - for (i = 0; i < 8; i++) - I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); + break; + case 3: + case 2: if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) for (i = 0; i < 8; i++) I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); + for (i = 0; i < 8; i++) + I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); + break; } i915_restore_display(dev); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index a02a8df7372..197d4f32585 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -188,7 +188,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 1000, 1)) - DRM_ERROR("timed out waiting for FORCE_TRIGGER"); + DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); if (turn_off_dac) { I915_WRITE(PCH_ADPA, temp); @@ -245,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) if (wait_for((I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT) == 0, 1000, 1)) - DRM_ERROR("timed out waiting for FORCE_DETECT to go off"); + DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); } stat = I915_READ(PORT_HOTPLUG_STAT); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 19daead5b52..97922859459 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1013,8 +1013,8 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) DRM_DEBUG_KMS("vblank wait timed out\n"); } -/** - * intel_wait_for_vblank_off - wait for vblank after disabling a pipe +/* + * intel_wait_for_pipe_off - wait for pipe to turn off * @dev: drm device * @pipe: pipe to wait for * @@ -1022,25 +1022,39 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe) * spinning on the vblank interrupt status bit, since we won't actually * see an interrupt when the pipe is disabled. * - * So this function waits for the display line value to settle (it - * usually ends up stopping at the start of the next frame). + * On Gen4 and above: + * wait for the pipe register state bit to turn off + * + * Otherwise: + * wait for the display line value to settle (it usually + * ends up stopping at the start of the next frame). + * */ -void intel_wait_for_vblank_off(struct drm_device *dev, int pipe) +static void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); - unsigned long timeout = jiffies + msecs_to_jiffies(100); - u32 last_line; - - /* Wait for the display line to settle */ - do { - last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; - mdelay(5); - } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && - time_after(timeout, jiffies)); - - if (time_after(jiffies, timeout)) - DRM_DEBUG_KMS("vblank wait timed out\n"); + + if (INTEL_INFO(dev)->gen >= 4) { + int pipeconf_reg = (pipe == 0 ? PIPEACONF : PIPEBCONF); + + /* Wait for the Pipe State to go off */ + if (wait_for((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) == 0, + 100, 0)) + DRM_DEBUG_KMS("pipe_off wait timed out\n"); + } else { + u32 last_line; + int pipedsl_reg = (pipe == 0 ? PIPEADSL : PIPEBDSL); + unsigned long timeout = jiffies + msecs_to_jiffies(100); + + /* Wait for the display line to settle */ + do { + last_line = I915_READ(pipedsl_reg) & DSL_LINEMASK; + mdelay(5); + } while (((I915_READ(pipedsl_reg) & DSL_LINEMASK) != last_line) && + time_after(timeout, jiffies)); + if (time_after(jiffies, timeout)) + DRM_DEBUG_KMS("pipe_off wait timed out\n"); + } } /* Parameters have changed, update FBC info */ @@ -2328,13 +2342,13 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(dspbase_reg); } - /* Wait for vblank for the disable to take effect */ - intel_wait_for_vblank_off(dev, pipe); - /* Don't disable pipe A or pipe A PLLs if needed */ if (pipeconf_reg == PIPEACONF && - (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + (dev_priv->quirks & QUIRK_PIPEA_FORCE)) { + /* Wait for vblank for the disable to take effect */ + intel_wait_for_vblank(dev, pipe); goto skip_pipe_off; + } /* Next, disable display pipes */ temp = I915_READ(pipeconf_reg); @@ -2343,8 +2357,8 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(pipeconf_reg); } - /* Wait for vblank for the disable to take effect. */ - intel_wait_for_vblank_off(dev, pipe); + /* Wait for the pipe to turn off */ + intel_wait_for_pipe_off(dev, pipe); temp = I915_READ(dpll_reg); if ((temp & DPLL_VCO_ENABLE) != 0) { @@ -2463,11 +2477,19 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; + if (HAS_PCH_SPLIT(dev)) { /* FDI link clock is fixed at 2.7G */ if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) return false; } + + /* XXX some encoders set the crtcinfo, others don't. + * Obviously we need some form of conflict resolution here... + */ + if (adjusted_mode->crtc_htotal == 0) + drm_mode_set_crtcinfo(adjusted_mode, 0); + return true; } diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1a51ee07de3..9ab8708ac6b 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1138,18 +1138,14 @@ static bool intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, uint8_t dp_train_pat, - uint8_t train_set[4], - bool first) + uint8_t train_set[4]) { struct drm_device *dev = intel_dp->base.enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); int ret; I915_WRITE(intel_dp->output_reg, dp_reg_value); POSTING_READ(intel_dp->output_reg); - if (first) - intel_wait_for_vblank(dev, intel_crtc->pipe); intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, @@ -1174,10 +1170,15 @@ intel_dp_link_train(struct intel_dp *intel_dp) uint8_t voltage; bool clock_recovery = false; bool channel_eq = false; - bool first = true; int tries; u32 reg; uint32_t DP = intel_dp->DP; + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.enc.crtc); + + /* Enable output, wait for it to become active */ + I915_WRITE(intel_dp->output_reg, intel_dp->DP); + POSTING_READ(intel_dp->output_reg); + intel_wait_for_vblank(dev, intel_crtc->pipe); /* Write the link configuration data */ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, @@ -1210,9 +1211,8 @@ intel_dp_link_train(struct intel_dp *intel_dp) reg = DP | DP_LINK_TRAIN_PAT_1; if (!intel_dp_set_link_train(intel_dp, reg, - DP_TRAINING_PATTERN_1, train_set, first)) + DP_TRAINING_PATTERN_1, train_set)) break; - first = false; /* Set training pattern 1 */ udelay(100); @@ -1266,8 +1266,7 @@ intel_dp_link_train(struct intel_dp *intel_dp) /* channel eq pattern */ if (!intel_dp_set_link_train(intel_dp, reg, - DP_TRAINING_PATTERN_2, train_set, - false)) + DP_TRAINING_PATTERN_2, train_set)) break; udelay(400); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index ad312ca6b3e..8828b3ac641 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -229,7 +229,6 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern void intel_wait_for_vblank_off(struct drm_device *dev, int pipe); extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 7bdc96256bf..b61966c126d 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev, drm_fb_helper_fini(&ifbdev->helper); drm_framebuffer_cleanup(&ifb->base); - if (ifb->obj) + if (ifb->obj) { drm_gem_object_unreference(ifb->obj); + ifb->obj = NULL; + } return 0; } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e8e902d614e..ee73e428a84 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2170,8 +2170,7 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) return true; err: - intel_sdvo_destroy_enhance_property(connector); - kfree(intel_sdvo_connector); + intel_sdvo_destroy(connector); return false; } @@ -2243,8 +2242,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) return true; err: - intel_sdvo_destroy_enhance_property(connector); - kfree(intel_sdvo_connector); + intel_sdvo_destroy(connector); return false; } @@ -2522,11 +2520,10 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, uint16_t response; } enhancements; - if (!intel_sdvo_get_value(intel_sdvo, - SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, - &enhancements, sizeof(enhancements))) - return false; - + enhancements.response = 0; + intel_sdvo_get_value(intel_sdvo, + SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS, + &enhancements, sizeof(enhancements)); if (enhancements.response == 0) { DRM_DEBUG_KMS("No enhancement is supported\n"); return true; diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 26d0d8ced80..ac64f0b0392 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -75,6 +75,7 @@ static struct drm_driver driver = { #ifdef CONFIG_COMPAT .compat_ioctl = mga_compat_ioctl, #endif + .llseek = noop_llseek, }, .pci_driver = { .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 87186a4bbf0..fc737037f75 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -558,8 +558,10 @@ nouveau_connector_get_modes(struct drm_connector *connector) if (nv_encoder->dcb->type == OUTPUT_LVDS && (nv_encoder->dcb->lvdsconf.use_straps_for_mode || dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { - nv_connector->native_mode = drm_mode_create(dev); - nouveau_bios_fp_mode(dev, nv_connector->native_mode); + struct drm_display_mode mode; + + nouveau_bios_fp_mode(dev, &mode); + nv_connector->native_mode = drm_mode_duplicate(dev, &mode); } /* Find the native mode if this is a digital panel, if we didn't diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 1de5eb53e01..eb15345162a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -393,6 +393,7 @@ static struct drm_driver driver = { #if defined(CONFIG_COMPAT) .compat_ioctl = nouveau_compat_ioctl, #endif + .llseek = noop_llseek, }, .pci_driver = { .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index ead7b8fc53f..19620a6709f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, goto out; ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(nvbo->gem); out: - drm_gem_object_handle_unreference_unlocked(nvbo->gem); - - if (ret) - drm_gem_object_unreference_unlocked(nvbo->gem); return ret; } diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 1e2971f13aa..d42c76c2371 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -71,6 +71,7 @@ static struct drm_driver driver = { #ifdef CONFIG_COMPAT .compat_ioctl = r128_compat_ioctl, #endif + .llseek = noop_llseek, }, .pci_driver = { .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 1bc72c3190a..fe359a239df 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -4999,7 +4999,7 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS #define SW_I2C_CNTL_WRITE1BIT 6 //==============================VESA definition Portion=============================== -#define VESA_OEM_PRODUCT_REV '01.00' +#define VESA_OEM_PRODUCT_REV "01.00" #define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support #define VESA_MODE_WIN_ATTRIBUTE 7 #define VESA_WIN_SIZE 64 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 79082d4398a..2f93d46ae69 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1137,7 +1137,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) WREG32(RCU_IND_INDEX, 0x203); efuse_straps_3 = RREG32(RCU_IND_DATA); - efuse_box_bit_127_124 = (u8)(efuse_straps_3 & 0xF0000000) >> 28; + efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28); switch(efuse_box_bit_127_124) { case 0x0: @@ -1407,6 +1407,7 @@ int evergreen_mc_init(struct radeon_device *rdev) rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; rdev->mc.visible_vram_size = rdev->mc.aper_size; + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; r600_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); @@ -1520,7 +1521,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) { u32 tmp; - WREG32(CP_INT_CNTL, 0); + WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); WREG32(GRBM_INT_CNTL, 0); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index e151f16a8f8..e59422320bb 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -1030,6 +1030,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) return r; } rdev->cp.ready = true; + rdev->mc.active_vram_size = rdev->mc.real_vram_size; return 0; } @@ -1047,6 +1048,7 @@ void r100_cp_fini(struct radeon_device *rdev) void r100_cp_disable(struct radeon_device *rdev) { /* Disable ring */ + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; rdev->cp.ready = false; WREG32(RADEON_CP_CSQ_MODE, 0); WREG32(RADEON_CP_CSQ_CNTL, 0); @@ -2295,6 +2297,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev) /* FIXME we don't use the second aperture yet when we could use it */ if (rdev->mc.visible_vram_size > rdev->mc.aper_size) rdev->mc.visible_vram_size = rdev->mc.aper_size; + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); if (rdev->flags & RADEON_IS_IGP) { uint32_t tom; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index afc18d87fdc..7b65e4efe8a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1248,6 +1248,7 @@ int r600_mc_init(struct radeon_device *rdev) rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.visible_vram_size = rdev->mc.aper_size; + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; r600_vram_gtt_location(rdev, &rdev->mc); if (rdev->flags & RADEON_IS_IGP) { @@ -1917,6 +1918,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) */ void r600_cp_stop(struct radeon_device *rdev) { + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); } @@ -2729,7 +2731,7 @@ int r600_ib_test(struct radeon_device *rdev) if (i < rdev->usec_timeout) { DRM_INFO("ib test succeeded in %u usecs\n", i); } else { - DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", + DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); r = -EINVAL; } @@ -2910,7 +2912,7 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev) { u32 tmp; - WREG32(CP_INT_CNTL, 0); + WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); WREG32(GRBM_INT_CNTL, 0); WREG32(DxMODE_INT_MASK, 0); if (ASIC_IS_DCE3(rdev)) { @@ -3528,7 +3530,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL */ - if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && + rdev->vram_scratch.ptr) { void __iomem *ptr = (void *)rdev->vram_scratch.ptr; u32 tmp; diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 9ceb2a1ce79..3473c00781f 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -532,6 +532,7 @@ int r600_blit_init(struct radeon_device *rdev) memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); + rdev->mc.active_vram_size = rdev->mc.real_vram_size; return 0; } @@ -539,6 +540,7 @@ void r600_blit_fini(struct radeon_device *rdev) { int r; + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; if (rdev->r600_blit.shader_obj == NULL) return; /* If we can't reserve the bo, unref should be enough to destroy diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a168d644bf9..9ff38c99a6e 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -344,6 +344,7 @@ struct radeon_mc { * about vram size near mc fb location */ u64 mc_vram_size; u64 visible_vram_size; + u64 active_vram_size; u64 gtt_size; u64 gtt_start; u64 gtt_end; diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index ebae14c4b76..8e43ddae70c 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, *connector_type = DRM_MODE_CONNECTOR_DVID; } + /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ + if ((dev->pdev->device == 0x796e) && + (dev->pdev->subsystem_vendor == 0x1462) && + (dev->pdev->subsystem_device == 0x7302)) { + if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || + (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) + return false; + } + /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ if ((dev->pdev->device == 0x7941) && (dev->pdev->subsystem_vendor == 0x147b) && @@ -1549,39 +1558,39 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev) switch (tv_info->ucTV_BootUpDefaultStandard) { case ATOM_TV_NTSC: tv_std = TV_STD_NTSC; - DRM_INFO("Default TV standard: NTSC\n"); + DRM_DEBUG_KMS("Default TV standard: NTSC\n"); break; case ATOM_TV_NTSCJ: tv_std = TV_STD_NTSC_J; - DRM_INFO("Default TV standard: NTSC-J\n"); + DRM_DEBUG_KMS("Default TV standard: NTSC-J\n"); break; case ATOM_TV_PAL: tv_std = TV_STD_PAL; - DRM_INFO("Default TV standard: PAL\n"); + DRM_DEBUG_KMS("Default TV standard: PAL\n"); break; case ATOM_TV_PALM: tv_std = TV_STD_PAL_M; - DRM_INFO("Default TV standard: PAL-M\n"); + DRM_DEBUG_KMS("Default TV standard: PAL-M\n"); break; case ATOM_TV_PALN: tv_std = TV_STD_PAL_N; - DRM_INFO("Default TV standard: PAL-N\n"); + DRM_DEBUG_KMS("Default TV standard: PAL-N\n"); break; case ATOM_TV_PALCN: tv_std = TV_STD_PAL_CN; - DRM_INFO("Default TV standard: PAL-CN\n"); + DRM_DEBUG_KMS("Default TV standard: PAL-CN\n"); break; case ATOM_TV_PAL60: tv_std = TV_STD_PAL_60; - DRM_INFO("Default TV standard: PAL-60\n"); + DRM_DEBUG_KMS("Default TV standard: PAL-60\n"); break; case ATOM_TV_SECAM: tv_std = TV_STD_SECAM; - DRM_INFO("Default TV standard: SECAM\n"); + DRM_DEBUG_KMS("Default TV standard: SECAM\n"); break; default: tv_std = TV_STD_NTSC; - DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); + DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n"); break; } } diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index a04b7a6ad95..7b7ea269549 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -913,47 +913,47 @@ radeon_combios_get_tv_info(struct radeon_device *rdev) switch (RBIOS8(tv_info + 7) & 0xf) { case 1: tv_std = TV_STD_NTSC; - DRM_INFO("Default TV standard: NTSC\n"); + DRM_DEBUG_KMS("Default TV standard: NTSC\n"); break; case 2: tv_std = TV_STD_PAL; - DRM_INFO("Default TV standard: PAL\n"); + DRM_DEBUG_KMS("Default TV standard: PAL\n"); break; case 3: tv_std = TV_STD_PAL_M; - DRM_INFO("Default TV standard: PAL-M\n"); + DRM_DEBUG_KMS("Default TV standard: PAL-M\n"); break; case 4: tv_std = TV_STD_PAL_60; - DRM_INFO("Default TV standard: PAL-60\n"); + DRM_DEBUG_KMS("Default TV standard: PAL-60\n"); break; case 5: tv_std = TV_STD_NTSC_J; - DRM_INFO("Default TV standard: NTSC-J\n"); + DRM_DEBUG_KMS("Default TV standard: NTSC-J\n"); break; case 6: tv_std = TV_STD_SCART_PAL; - DRM_INFO("Default TV standard: SCART-PAL\n"); + DRM_DEBUG_KMS("Default TV standard: SCART-PAL\n"); break; default: tv_std = TV_STD_NTSC; - DRM_INFO + DRM_DEBUG_KMS ("Unknown TV standard; defaulting to NTSC\n"); break; } switch ((RBIOS8(tv_info + 9) >> 2) & 0x3) { case 0: - DRM_INFO("29.498928713 MHz TV ref clk\n"); + DRM_DEBUG_KMS("29.498928713 MHz TV ref clk\n"); break; case 1: - DRM_INFO("28.636360000 MHz TV ref clk\n"); + DRM_DEBUG_KMS("28.636360000 MHz TV ref clk\n"); break; case 2: - DRM_INFO("14.318180000 MHz TV ref clk\n"); + DRM_DEBUG_KMS("14.318180000 MHz TV ref clk\n"); break; case 3: - DRM_INFO("27.000000000 MHz TV ref clk\n"); + DRM_DEBUG_KMS("27.000000000 MHz TV ref clk\n"); break; default: break; @@ -1324,7 +1324,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, if (tmds_info) { ver = RBIOS8(tmds_info); - DRM_INFO("DFP table revision: %d\n", ver); + DRM_DEBUG_KMS("DFP table revision: %d\n", ver); if (ver == 3) { n = RBIOS8(tmds_info + 5) + 1; if (n > 4) @@ -1408,7 +1408,7 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); if (offset) { ver = RBIOS8(offset); - DRM_INFO("External TMDS Table revision: %d\n", ver); + DRM_DEBUG_KMS("External TMDS Table revision: %d\n", ver); tmds->slave_addr = RBIOS8(offset + 4 + 2); tmds->slave_addr >>= 1; /* 7 bit addressing */ gpio = RBIOS8(offset + 4 + 3); diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 5731fc9b1ae..3eef567b042 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -203,6 +203,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; int xorigin = 0, yorigin = 0; + int w = radeon_crtc->cursor_width; if (x < 0) xorigin = -x + 1; @@ -213,22 +214,7 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, if (yorigin >= CURSOR_HEIGHT) yorigin = CURSOR_HEIGHT - 1; - radeon_lock_cursor(crtc, true); - if (ASIC_IS_DCE4(rdev)) { - /* cursors are offset into the total surface */ - x += crtc->x; - y += crtc->y; - DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); - - /* XXX: check if evergreen has the same issues as avivo chips */ - WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, - ((xorigin ? 0 : x) << 16) | - (yorigin ? 0 : y)); - WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); - WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, - ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1)); - } else if (ASIC_IS_AVIVO(rdev)) { - int w = radeon_crtc->cursor_width; + if (ASIC_IS_AVIVO(rdev)) { int i = 0; struct drm_crtc *crtc_p; @@ -260,7 +246,17 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, if (w <= 0) w = 1; } + } + radeon_lock_cursor(crtc, true); + if (ASIC_IS_DCE4(rdev)) { + WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, + ((xorigin ? 0 : x) << 16) | + (yorigin ? 0 : y)); + WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, + ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); + } else if (ASIC_IS_AVIVO(rdev)) { WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, ((xorigin ? 0 : x) << 16) | (yorigin ? 0 : y)); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 127a395f70f..b92d2f2fcbe 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev) DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_DFP5_SUPPORT) DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); + if (devices & ATOM_DEVICE_DFP6_SUPPORT) + DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_TV1_SUPPORT) DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); if (devices & ATOM_DEVICE_CV_SUPPORT) @@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); - if (radeon_fb->obj) + if (radeon_fb->obj) { drm_gem_object_unreference_unlocked(radeon_fb->obj); + } drm_framebuffer_cleanup(fb); kfree(radeon_fb); } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 795403b0e2c..29c1237c2e7 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -219,6 +219,7 @@ static struct drm_driver driver_old = { #ifdef CONFIG_COMPAT .compat_ioctl = radeon_compat_ioctl, #endif + .llseek = noop_llseek, }, .pci_driver = { diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index c74a8b20d94..40b0c087b59 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -94,6 +94,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) ret = radeon_bo_reserve(rbo, false); if (likely(ret == 0)) { radeon_bo_kunmap(rbo); + radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } drm_gem_object_unreference_unlocked(gobj); @@ -325,8 +326,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb { struct fb_info *info; struct radeon_framebuffer *rfb = &rfbdev->rfb; - struct radeon_bo *rbo; - int r; if (rfbdev->helper.fbdev) { info = rfbdev->helper.fbdev; @@ -338,14 +337,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb } if (rfb->obj) { - rbo = rfb->obj->driver_private; - r = radeon_bo_reserve(rbo, false); - if (likely(r == 0)) { - radeon_bo_kunmap(rbo); - radeon_bo_unpin(rbo); - radeon_bo_unreserve(rbo); - } - drm_gem_object_unreference_unlocked(rfb->obj); + radeonfb_destroy_pinned_object(rfb->obj); + rfb->obj = NULL; } drm_fb_helper_fini(&rfbdev->helper); drm_framebuffer_cleanup(&rfb->base); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index c578f265b24..d1e595d9172 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, return r; } r = drm_gem_handle_create(filp, gobj, &handle); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(gobj); if (r) { - drm_gem_object_unreference_unlocked(gobj); return r; } - drm_gem_object_handle_unreference_unlocked(gobj); args->handle = handle; return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 5eee3c41d12..8fbbe1c6ebb 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -203,6 +203,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) */ int radeon_driver_firstopen_kms(struct drm_device *dev) { + struct radeon_device *rdev = dev->dev_private; + + if (rdev->powered_down) + return -EINVAL; return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 0afd1e62347..b3b5306bb57 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) u32 c = 0; rbo->placement.fpfn = 0; - rbo->placement.lpfn = 0; + rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT; rbo->placement.placement = rbo->placements; rbo->placement.busy_placement = rbo->placements; if (domain & RADEON_GEM_DOMAIN_VRAM) diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 353998dc2c0..3481bc7f6f5 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -124,11 +124,8 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, int r; r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) - dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo); + if (unlikely(r != 0)) return r; - } spin_lock(&bo->tbo.lock); if (mem_type) *mem_type = bo->tbo.mem.mem_type; diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index cc05b230d7e..51d5f7b5ab2 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -693,6 +693,7 @@ void rs600_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); rdev->mc.mc_vram_size = rdev->mc.real_vram_size; rdev->mc.visible_vram_size = rdev->mc.aper_size; + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); base = RREG32_MC(R_000004_MC_FB_LOCATION); base = G_000004_MC_FB_START(base) << 16; diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3e3f75718be..4dc2a87ea68 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -157,6 +157,7 @@ void rs690_mc_init(struct radeon_device *rdev) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); rdev->mc.visible_vram_size = rdev->mc.aper_size; + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); base = G_000100_MC_FB_START(base) << 16; rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index bfa59db374d..9490da70074 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -267,6 +267,7 @@ static void rv770_mc_program(struct radeon_device *rdev) */ void r700_cp_stop(struct radeon_device *rdev) { + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); } @@ -992,6 +993,7 @@ int rv770_mc_init(struct radeon_device *rdev) rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.visible_vram_size = rdev->mc.aper_size; + rdev->mc.active_vram_size = rdev->mc.visible_vram_size; r600_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index 021de44c15a..2a2830f5a84 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c @@ -54,6 +54,7 @@ static struct drm_driver driver = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, + .llseek = noop_llseek, }, .pci_driver = { diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 776bf9e9ea1..4bb10ef6676 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -83,6 +83,7 @@ static struct drm_driver driver = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, + .llseek = noop_llseek, }, .pci_driver = { .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index ec5a43e6572..640567ef713 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -52,6 +52,7 @@ static struct drm_driver driver = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, + .llseek = noop_llseek, }, .pci_driver = { .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cb4cf7ef4d1..db809e034cc 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -442,6 +442,43 @@ out_err: } /** + * Call bo::reserved and with the lru lock held. + * Will release GPU memory type usage on destruction. + * This is the place to put in driver specific hooks. + * Will release the bo::reserved lock and the + * lru lock on exit. + */ + +static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) +{ + struct ttm_bo_global *glob = bo->glob; + + if (bo->ttm) { + + /** + * Release the lru_lock, since we don't want to have + * an atomic requirement on ttm_tt[unbind|destroy]. + */ + + spin_unlock(&glob->lru_lock); + ttm_tt_unbind(bo->ttm); + ttm_tt_destroy(bo->ttm); + bo->ttm = NULL; + spin_lock(&glob->lru_lock); + } + + if (bo->mem.mm_node) { + drm_mm_put_block(bo->mem.mm_node); + bo->mem.mm_node = NULL; + } + + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + spin_unlock(&glob->lru_lock); +} + + +/** * If bo idle, remove from delayed- and lru lists, and unref. * If not idle, and already on delayed list, do nothing. * If not idle, and not on delayed list, put on delayed list, @@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) int ret; spin_lock(&bo->lock); +retry: (void) ttm_bo_wait(bo, false, false, !remove_all); if (!bo->sync_obj) { @@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) spin_unlock(&bo->lock); spin_lock(&glob->lru_lock); - put_count = ttm_bo_del_from_lru(bo); + ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0); + + /** + * Someone else has the object reserved. Bail and retry. + */ - ret = ttm_bo_reserve_locked(bo, false, false, false, 0); - BUG_ON(ret); - if (bo->ttm) - ttm_tt_unbind(bo->ttm); + if (unlikely(ret == -EBUSY)) { + spin_unlock(&glob->lru_lock); + spin_lock(&bo->lock); + goto requeue; + } + + /** + * We can re-check for sync object without taking + * the bo::lock since setting the sync object requires + * also bo::reserved. A busy object at this point may + * be caused by another thread starting an accelerated + * eviction. + */ + + if (unlikely(bo->sync_obj)) { + atomic_set(&bo->reserved, 0); + wake_up_all(&bo->event_queue); + spin_unlock(&glob->lru_lock); + spin_lock(&bo->lock); + if (remove_all) + goto retry; + else + goto requeue; + } + + put_count = ttm_bo_del_from_lru(bo); if (!list_empty(&bo->ddestroy)) { list_del_init(&bo->ddestroy); ++put_count; } - if (bo->mem.mm_node) { - drm_mm_put_block(bo->mem.mm_node); - bo->mem.mm_node = NULL; - } - spin_unlock(&glob->lru_lock); - atomic_set(&bo->reserved, 0); + ttm_bo_cleanup_memtype_use(bo); while (put_count--) kref_put(&bo->list_kref, ttm_bo_ref_bug); return 0; } - +requeue: spin_lock(&glob->lru_lock); if (list_empty(&bo->ddestroy)) { void *sync_obj = bo->sync_obj; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7cffb3e0423..3451a82adba 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -351,6 +351,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); fbo->vm_node = NULL; + atomic_set(&fbo->cpu_writers, 0); fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); kref_init(&fbo->list_kref); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index ca904799f01..b1e02fffd3c 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -69,7 +69,7 @@ struct ttm_page_pool { spinlock_t lock; bool fill_lock; struct list_head list; - int gfp_flags; + gfp_t gfp_flags; unsigned npages; char *name; unsigned long nfrees; @@ -475,7 +475,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, * This function is reentrant if caller updates count depending on number of * pages returned in pages array. */ -static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags, +static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, int ttm_flags, enum ttm_caching_state cstate, unsigned count) { struct page **caching_array; @@ -666,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags, { struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct page *p = NULL; - int gfp_flags = GFP_USER; + gfp_t gfp_flags = GFP_USER; int r; /* set zero flag for page allocation if required */ @@ -818,7 +818,7 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) return 0; } -void ttm_page_alloc_fini() +void ttm_page_alloc_fini(void) { int i; diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index 7a1b210401e..b8984a5ae52 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c @@ -62,6 +62,7 @@ static struct drm_driver driver = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, + .llseek = noop_llseek, }, .pci_driver = { .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 72ec2e2b6e9..2ef93df9e8a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = { {0, 0, 0} }; -static char *vmw_devname = "vmwgfx"; +static int enable_fbdev; static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static void vmw_master_init(struct vmw_master *); static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr); +MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev"); +module_param_named(enable_fbdev, enable_fbdev, int, 0600); + static void vmw_print_capabilities(uint32_t capabilities) { DRM_INFO("Capabilities:\n"); @@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv) { int ret; - vmw_kms_save_vga(dev_priv); - ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); if (unlikely(ret != 0)) { DRM_ERROR("Unable to initialize FIFO.\n"); @@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv) static void vmw_release_device(struct vmw_private *dev_priv) { vmw_fifo_release(dev_priv, &dev_priv->fifo); - vmw_kms_restore_vga(dev_priv); } +int vmw_3d_resource_inc(struct vmw_private *dev_priv) +{ + int ret = 0; + + mutex_lock(&dev_priv->release_mutex); + if (unlikely(dev_priv->num_3d_resources++ == 0)) { + ret = vmw_request_device(dev_priv); + if (unlikely(ret != 0)) + --dev_priv->num_3d_resources; + } + mutex_unlock(&dev_priv->release_mutex); + return ret; +} + + +void vmw_3d_resource_dec(struct vmw_private *dev_priv) +{ + int32_t n3d; + + mutex_lock(&dev_priv->release_mutex); + if (unlikely(--dev_priv->num_3d_resources == 0)) + vmw_release_device(dev_priv); + n3d = (int32_t) dev_priv->num_3d_resources; + mutex_unlock(&dev_priv->release_mutex); + + BUG_ON(n3d < 0); +} static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) { @@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->last_read_sequence = (uint32_t) -100; mutex_init(&dev_priv->hw_mutex); mutex_init(&dev_priv->cmdbuf_mutex); + mutex_init(&dev_priv->release_mutex); rwlock_init(&dev_priv->resource_lock); idr_init(&dev_priv->context_idr); idr_init(&dev_priv->surface_idr); @@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->vram_start = pci_resource_start(dev->pdev, 1); dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); + dev_priv->enable_fb = enable_fbdev; + mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); @@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev->dev_private = dev_priv; - if (!dev->devname) - dev->devname = vmw_devname; - - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { - ret = drm_irq_install(dev); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed installing irq: %d\n", ret); - goto out_no_irq; - } - } - ret = pci_request_regions(dev->pdev, "vmwgfx probe"); dev_priv->stealth = (ret != 0); if (dev_priv->stealth) { @@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_device; } } - ret = vmw_request_device(dev_priv); + ret = vmw_kms_init(dev_priv); if (unlikely(ret != 0)) - goto out_no_device; - vmw_kms_init(dev_priv); + goto out_no_kms; vmw_overlay_init(dev_priv); - vmw_fb_init(dev_priv); + if (dev_priv->enable_fb) { + ret = vmw_3d_resource_inc(dev_priv); + if (unlikely(ret != 0)) + goto out_no_fifo; + vmw_kms_save_vga(dev_priv); + vmw_fb_init(dev_priv); + DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? + "Detected device 3D availability.\n" : + "Detected no device 3D availability.\n"); + } else { + DRM_INFO("Delayed 3D detection since we're not " + "running the device in SVGA mode yet.\n"); + } + + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { + ret = drm_irq_install(dev); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed installing irq: %d\n", ret); + goto out_no_irq; + } + } dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; register_pm_notifier(&dev_priv->pm_nb); - DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n"); - return 0; -out_no_device: - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) - drm_irq_uninstall(dev_priv->dev); - if (dev->devname == vmw_devname) - dev->devname = NULL; out_no_irq: + if (dev_priv->enable_fb) { + vmw_fb_close(dev_priv); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv); + } +out_no_fifo: + vmw_overlay_close(dev_priv); + vmw_kms_close(dev_priv); +out_no_kms: + if (dev_priv->stealth) + pci_release_region(dev->pdev, 2); + else + pci_release_regions(dev->pdev); +out_no_device: ttm_object_device_release(&dev_priv->tdev); out_err4: iounmap(dev_priv->mmio_virt); @@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev) unregister_pm_notifier(&dev_priv->pm_nb); - vmw_fb_close(dev_priv); + if (dev_priv->capabilities & SVGA_CAP_IRQMASK) + drm_irq_uninstall(dev_priv->dev); + if (dev_priv->enable_fb) { + vmw_fb_close(dev_priv); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv); + } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); - vmw_release_device(dev_priv); if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else pci_release_regions(dev->pdev); - if (dev_priv->capabilities & SVGA_CAP_IRQMASK) - drm_irq_uninstall(dev_priv->dev); - if (dev->devname == vmw_devname) - dev->devname = NULL; ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, @@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, struct drm_ioctl_desc *ioctl = &vmw_ioctls[nr - DRM_COMMAND_BASE]; - if (unlikely(ioctl->cmd != cmd)) { + if (unlikely(ioctl->cmd_drv != cmd)) { DRM_ERROR("Invalid command format, ioctl %d\n", nr - DRM_COMMAND_BASE); return -EINVAL; @@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev, struct vmw_master *vmaster = vmw_master(file_priv->master); int ret = 0; + if (!dev_priv->enable_fb) { + ret = vmw_3d_resource_inc(dev_priv); + if (unlikely(ret != 0)) + return ret; + vmw_kms_save_vga(dev_priv); + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_TRACES, 0); + mutex_unlock(&dev_priv->hw_mutex); + } + if (active) { BUG_ON(active != &dev_priv->fbdev_master); ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); @@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev, return 0; out_no_active_lock: - vmw_release_device(dev_priv); + if (!dev_priv->enable_fb) { + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_TRACES, 1); + mutex_unlock(&dev_priv->hw_mutex); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv); + } return ret; } @@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev, ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); + if (!dev_priv->enable_fb) { + ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); + if (unlikely(ret != 0)) + DRM_ERROR("Unable to clean VRAM on master drop.\n"); + mutex_lock(&dev_priv->hw_mutex); + vmw_write(dev_priv, SVGA_REG_TRACES, 1); + mutex_unlock(&dev_priv->hw_mutex); + vmw_kms_restore_vga(dev_priv); + vmw_3d_resource_dec(dev_priv); + } + dev_priv->active_master = &dev_priv->fbdev_master; ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_vt_unlock(&dev_priv->fbdev_master.lock); - vmw_fb_on(dev_priv); + if (dev_priv->enable_fb) + vmw_fb_on(dev_priv); } @@ -722,6 +796,7 @@ static struct drm_driver driver = { .irq_postinstall = vmw_irq_postinstall, .irq_uninstall = vmw_irq_uninstall, .irq_handler = vmw_irq_handler, + .get_vblank_counter = vmw_get_vblank_counter, .reclaim_buffers_locked = NULL, .get_map_ofs = drm_core_get_map_ofs, .get_reg_ofs = drm_core_get_reg_ofs, @@ -745,6 +820,7 @@ static struct drm_driver driver = { #if defined(CONFIG_COMPAT) .compat_ioctl = drm_compat_ioctl, #endif + .llseek = noop_llseek, }, .pci_driver = { .name = VMWGFX_DRIVER_NAME, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 429f917b60b..58de6393f61 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -277,6 +277,7 @@ struct vmw_private { bool stealth; bool is_opened; + bool enable_fb; /** * Master management. @@ -285,6 +286,9 @@ struct vmw_private { struct vmw_master *active_master; struct vmw_master fbdev_master; struct notifier_block pm_nb; + + struct mutex release_mutex; + uint32_t num_3d_resources; }; static inline struct vmw_private *vmw_priv(struct drm_device *dev) @@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, return val; } +int vmw_3d_resource_inc(struct vmw_private *dev_priv); +void vmw_3d_resource_dec(struct vmw_private *dev_priv); + /** * GMR utilities - vmwgfx_gmr.c */ @@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned bbp, unsigned depth); int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc); /** * Overlay control - vmwgfx_overlay.c diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 870967a97c1..409e172f4ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, if (unlikely(ret != 0)) goto err_unlock; + if (bo->mem.mem_type == TTM_PL_VRAM && + bo->mem.mm_node->start < bo->num_pages) + (void) ttm_bo_validate(bo, &vmw_sys_placement, false, + false, false); + ret = ttm_bo_validate(bo, &ne_placement, false, false, false); /* Could probably bug on */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index e6a1eb7ea95..0fe31766e4c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c @@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) mutex_lock(&dev_priv->hw_mutex); dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); + dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); vmw_write(dev_priv, SVGA_REG_ENABLE, 1); min = 4; @@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) dev_priv->config_done_state); vmw_write(dev_priv, SVGA_REG_ENABLE, dev_priv->enable_state); + vmw_write(dev_priv, SVGA_REG_TRACES, + dev_priv->traces_state); mutex_unlock(&dev_priv->hw_mutex); vmw_fence_queue_takedown(&fifo->fence_queue); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 64d7f47df86..e882ba099f0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); + if (i == 0 && vmw_priv->num_displays == 1 && + save->width == 0 && save->height == 0) { + + /* + * It should be fairly safe to assume that these + * values are uninitialized. + */ + + save->width = vmw_priv->vga_width - save->pos_x; + save->height = vmw_priv->vga_height - save->pos_y; + } } + return 0; } @@ -984,3 +996,8 @@ out_unlock: ttm_read_unlock(&vmaster->lock); return ret; } + +u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc) +{ + return 0; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 7083b1a24df..11cb39e3acc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -27,6 +27,8 @@ #include "vmwgfx_kms.h" +#define VMWGFX_LDU_NUM_DU 8 + #define vmw_crtc_to_ldu(x) \ container_of(x, struct vmw_legacy_display_unit, base.crtc) #define vmw_encoder_to_ldu(x) \ @@ -536,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; + int i; + int ret; + if (dev_priv->ldu_priv) { DRM_INFO("ldu system already on\n"); return -EINVAL; @@ -553,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) drm_mode_create_dirty_info_property(dev_priv->dev); - vmw_ldu_init(dev_priv, 0); - /* for old hardware without multimon only enable one display */ if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { - vmw_ldu_init(dev_priv, 1); - vmw_ldu_init(dev_priv, 2); - vmw_ldu_init(dev_priv, 3); - vmw_ldu_init(dev_priv, 4); - vmw_ldu_init(dev_priv, 5); - vmw_ldu_init(dev_priv, 6); - vmw_ldu_init(dev_priv, 7); + for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i) + vmw_ldu_init(dev_priv, i); + ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU); + } else { + /* for old hardware without multimon only enable one display */ + vmw_ldu_init(dev_priv, 0); + ret = drm_vblank_init(dev, 1); } - return 0; + return ret; } int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) { + struct drm_device *dev = dev_priv->dev; + + drm_vblank_cleanup(dev); if (!dev_priv->ldu_priv) return -ENOSYS; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 5f2d5df01e5..c8c40e9979d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) cmd->body.cid = cpu_to_le32(res->id); vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_3d_resource_dec(dev_priv); } static int vmw_context_init(struct vmw_private *dev_priv, @@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, cmd->body.cid = cpu_to_le32(res->id); vmw_fifo_commit(dev_priv, sizeof(*cmd)); + (void) vmw_3d_resource_inc(dev_priv); vmw_resource_activate(res, vmw_hw_context_destroy); return 0; } @@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) cmd->body.sid = cpu_to_le32(res->id); vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_3d_resource_dec(dev_priv); } void vmw_surface_res_free(struct vmw_resource *res) @@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv, } vmw_fifo_commit(dev_priv, submit_size); + (void) vmw_3d_resource_inc(dev_priv); vmw_resource_activate(res, vmw_hw_surface_destroy); return 0; } diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index b87569e96b1..c380c65da41 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -598,7 +598,7 @@ static inline void vga_update_device_decodes(struct vga_device *vgadev, pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); } -void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) +static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) { struct vga_device *vgadev; unsigned long flags; @@ -1211,6 +1211,7 @@ static const struct file_operations vga_arb_device_fops = { .poll = vga_arb_fpoll, .open = vga_arb_open, .release = vga_arb_release, + .llseek = noop_llseek, }; static struct miscdevice vga_arb_device = { diff --git a/drivers/hid/hid-cando.c b/drivers/hid/hid-cando.c index 4267a6fdc27..5925bdcd417 100644 --- a/drivers/hid/hid-cando.c +++ b/drivers/hid/hid-cando.c @@ -237,6 +237,8 @@ static const struct hid_device_id cando_devices[] = { USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, + USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) }, { } }; MODULE_DEVICE_TABLE(hid, cando_devices); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 3f729248602..a0dea3d1296 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1292,6 +1292,7 @@ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CANDO, USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index 850d02a7a92..61a3e572224 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c @@ -1051,6 +1051,7 @@ static const struct file_operations hid_debug_events_fops = { .read = hid_debug_events_read, .poll = hid_debug_events_poll, .release = hid_debug_events_release, + .llseek = noop_llseek, }; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 765a4f53eb5..c5ae5f1545b 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -134,6 +134,7 @@ #define USB_VENDOR_ID_CANDO 0x2087 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH 0x0a01 #define USB_DEVICE_ID_CANDO_MULTI_TOUCH_11_6 0x0b03 +#define USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6 0x0f01 #define USB_VENDOR_ID_CH 0x068e #define USB_DEVICE_ID_CH_PRO_PEDALS 0x00f2 @@ -503,6 +504,7 @@ #define USB_VENDOR_ID_TURBOX 0x062a #define USB_DEVICE_ID_TURBOX_KEYBOARD 0x0201 +#define USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART 0x7100 #define USB_VENDOR_ID_TWINHAN 0x6253 #define USB_DEVICE_ID_TWINHAN_IR_REMOTE 0x0100 diff --git a/drivers/hid/hid-roccat.c b/drivers/hid/hid-roccat.c index f6e80c7ca61..5a6879e235a 100644 --- a/drivers/hid/hid-roccat.c +++ b/drivers/hid/hid-roccat.c @@ -384,6 +384,7 @@ static const struct file_operations roccat_ops = { .poll = roccat_poll, .open = roccat_open, .release = roccat_release, + .llseek = noop_llseek, }; static int __init roccat_init(void) diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 47d70c523d9..925992f549f 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -109,6 +109,12 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t int ret = 0; mutex_lock(&minors_lock); + + if (!hidraw_table[minor]) { + ret = -ENODEV; + goto out; + } + dev = hidraw_table[minor]->hid; if (!dev->hid_output_raw_report) { @@ -244,6 +250,10 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, mutex_lock(&minors_lock); dev = hidraw_table[minor]; + if (!dev) { + ret = -ENODEV; + goto out; + } switch (cmd) { case HIDIOCGRDESCSIZE: @@ -317,6 +327,7 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, ret = -ENOTTY; } +out: mutex_unlock(&minors_lock); return ret; } @@ -329,6 +340,7 @@ static const struct file_operations hidraw_ops = { .open = hidraw_open, .release = hidraw_release, .unlocked_ioctl = hidraw_ioctl, + .llseek = noop_llseek, }; void hidraw_report_event(struct hid_device *hid, u8 *data, int len) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 70da3181c8a..f0260c699ad 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -36,6 +36,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER, HID_QUIRK_MULTI_INPUT | HID_QUIRK_NOGET }, { USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_MOJO, USB_DEVICE_ID_RETRO_ADAPTER, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_TOUCHSCREEN_MOSART, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 681e620eb95..dfcb27613ec 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -847,6 +847,7 @@ static const struct file_operations hiddev_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = hiddev_compat_ioctl, #endif + .llseek = noop_llseek, }; static char *hiddev_devnode(struct device *dev, mode_t *mode) diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 4d4d09bdec0..97499d00615 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -409,7 +409,7 @@ config SENSORS_CORETEMP config SENSORS_PKGTEMP tristate "Intel processor package temperature sensor" - depends on X86 && PCI && EXPERIMENTAL + depends on X86 && EXPERIMENTAL help If you say yes here you get support for the package level temperature sensor inside your CPU. Check documentation/driver for details. diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index 653db1bda93..23b8555215d 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c @@ -762,6 +762,7 @@ static const struct file_operations atk_debugfs_ggrp_fops = { .read = atk_debugfs_ggrp_read, .open = atk_debugfs_ggrp_open, .release = atk_debugfs_ggrp_release, + .llseek = no_llseek, }; static void atk_debugfs_init(struct atk_data *data) diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index de8111114f4..a23b17a78ac 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -36,6 +36,7 @@ #include <linux/pci.h> #include <asm/msr.h> #include <asm/processor.h> +#include <asm/smp.h> #define DRVNAME "coretemp" @@ -423,9 +424,18 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) int err; struct platform_device *pdev; struct pdev_entry *pdev_entry; -#ifdef CONFIG_SMP struct cpuinfo_x86 *c = &cpu_data(cpu); -#endif + + /* + * CPUID.06H.EAX[0] indicates whether the CPU has thermal + * sensors. We check this bit only, all the early CPUs + * without thermal sensors will be filtered out. + */ + if (!cpu_has(c, X86_FEATURE_DTS)) { + printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" + " has no thermal sensor.\n", c->x86_model); + return 0; + } mutex_lock(&pdev_list_mutex); @@ -482,14 +492,22 @@ exit: static void coretemp_device_remove(unsigned int cpu) { - struct pdev_entry *p, *n; + struct pdev_entry *p; + unsigned int i; + mutex_lock(&pdev_list_mutex); - list_for_each_entry_safe(p, n, &pdev_list, list) { - if (p->cpu == cpu) { - platform_device_unregister(p->pdev); - list_del(&p->list); - kfree(p); - } + list_for_each_entry(p, &pdev_list, list) { + if (p->cpu != cpu) + continue; + + platform_device_unregister(p->pdev); + list_del(&p->list); + mutex_unlock(&pdev_list_mutex); + kfree(p); + for_each_cpu(i, cpu_sibling_mask(cpu)) + if (i != cpu && !coretemp_device_add(i)) + break; + return; } mutex_unlock(&pdev_list_mutex); } @@ -527,30 +545,21 @@ static int __init coretemp_init(void) if (err) goto exit; - for_each_online_cpu(i) { - struct cpuinfo_x86 *c = &cpu_data(i); - /* - * CPUID.06H.EAX[0] indicates whether the CPU has thermal - * sensors. We check this bit only, all the early CPUs - * without thermal sensors will be filtered out. - */ - if (c->cpuid_level >= 6 && (cpuid_eax(0x06) & 0x01)) - coretemp_device_add(i); - else { - printk(KERN_INFO DRVNAME ": CPU (model=0x%x)" - " has no thermal sensor.\n", c->x86_model); - } - } + for_each_online_cpu(i) + coretemp_device_add(i); + +#ifndef CONFIG_HOTPLUG_CPU if (list_empty(&pdev_list)) { err = -ENODEV; goto exit_driver_unreg; } +#endif register_hotcpu_notifier(&coretemp_cpu_notifier); return 0; -exit_driver_unreg: #ifndef CONFIG_HOTPLUG_CPU +exit_driver_unreg: platform_driver_unregister(&coretemp_driver); #endif exit: diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 537841ef44b..75afb3b0e07 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -111,7 +111,7 @@ static struct platform_device *f71882fg_pdev; /* Super-I/O Function prototypes */ static inline int superio_inb(int base, int reg); static inline int superio_inw(int base, int reg); -static inline void superio_enter(int base); +static inline int superio_enter(int base); static inline void superio_select(int base, int ld); static inline void superio_exit(int base); @@ -861,11 +861,20 @@ static int superio_inw(int base, int reg) return val; } -static inline void superio_enter(int base) +static inline int superio_enter(int base) { + /* Don't step on other drivers' I/O space by accident */ + if (!request_muxed_region(base, 2, DRVNAME)) { + printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", + base); + return -EBUSY; + } + /* according to the datasheet the key must be send twice! */ outb(SIO_UNLOCK_KEY, base); outb(SIO_UNLOCK_KEY, base); + + return 0; } static inline void superio_select(int base, int ld) @@ -877,6 +886,7 @@ static inline void superio_select(int base, int ld) static inline void superio_exit(int base) { outb(SIO_LOCK_KEY, base); + release_region(base, 2); } static inline int fan_from_reg(u16 reg) @@ -2175,21 +2185,15 @@ static int f71882fg_remove(struct platform_device *pdev) static int __init f71882fg_find(int sioaddr, unsigned short *address, struct f71882fg_sio_data *sio_data) { - int err = -ENODEV; u16 devid; - - /* Don't step on other drivers' I/O space by accident */ - if (!request_region(sioaddr, 2, DRVNAME)) { - printk(KERN_ERR DRVNAME ": I/O address 0x%04x already in use\n", - (int)sioaddr); - return -EBUSY; - } - - superio_enter(sioaddr); + int err = superio_enter(sioaddr); + if (err) + return err; devid = superio_inw(sioaddr, SIO_REG_MANID); if (devid != SIO_FINTEK_ID) { pr_debug(DRVNAME ": Not a Fintek device\n"); + err = -ENODEV; goto exit; } @@ -2213,6 +2217,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, default: printk(KERN_INFO DRVNAME ": Unsupported Fintek device: %04x\n", (unsigned int)devid); + err = -ENODEV; goto exit; } @@ -2223,12 +2228,14 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, if (!(superio_inb(sioaddr, SIO_REG_ENABLE) & 0x01)) { printk(KERN_WARNING DRVNAME ": Device not activated\n"); + err = -ENODEV; goto exit; } *address = superio_inw(sioaddr, SIO_REG_ADDR); if (*address == 0) { printk(KERN_WARNING DRVNAME ": Base address not set\n"); + err = -ENODEV; goto exit; } *address &= ~(REGION_LENGTH - 1); /* Ignore 3 LSB */ @@ -2239,7 +2246,6 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, (int)superio_inb(sioaddr, SIO_REG_DEVREV)); exit: superio_exit(sioaddr); - release_region(sioaddr, 2); return err; } diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index b7ca2a9676c..d4d4ca65d37 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -38,7 +38,6 @@ #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> -#include <linux/smp_lock.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/sysfs.h> @@ -50,6 +49,7 @@ #include <linux/kref.h> /* Addresses to scan */ +static DEFINE_MUTEX(watchdog_mutex); static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END }; /* Insmod parameters */ @@ -858,7 +858,7 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long ar int i, ret = 0; struct fschmd_data *data = filp->private_data; - lock_kernel(); + mutex_lock(&watchdog_mutex); switch (cmd) { case WDIOC_GETSUPPORT: ident.firmware_version = data->revision; @@ -915,7 +915,7 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, unsigned long ar default: ret = -ENOTTY; } - unlock_kernel(); + mutex_unlock(&watchdog_mutex); return ret; } diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c index 6138f036b15..fc591ae5310 100644 --- a/drivers/hwmon/lis3lv02d.c +++ b/drivers/hwmon/lis3lv02d.c @@ -277,7 +277,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy) wake_up_interruptible(&lis3_dev.misc_wait); kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN); out: - if (lis3_dev.whoami == WAI_8B && lis3_dev.idev && + if (lis3_dev.pdata && lis3_dev.whoami == WAI_8B && lis3_dev.idev && lis3_dev.idev->input->users) return IRQ_WAKE_THREAD; return IRQ_HANDLED; @@ -718,7 +718,7 @@ int lis3lv02d_init_device(struct lis3lv02d *dev) * io-apic is not configurable (and generates a warning) but I keep it * in case of support for other hardware. */ - if (dev->whoami == WAI_8B) + if (dev->pdata && dev->whoami == WAI_8B) thread_fn = lis302dl_interrupt_thread1_8b; else thread_fn = NULL; diff --git a/drivers/hwmon/pkgtemp.c b/drivers/hwmon/pkgtemp.c index 74157fcda6e..f11903936c8 100644 --- a/drivers/hwmon/pkgtemp.c +++ b/drivers/hwmon/pkgtemp.c @@ -33,7 +33,6 @@ #include <linux/list.h> #include <linux/platform_device.h> #include <linux/cpu.h> -#include <linux/pci.h> #include <asm/msr.h> #include <asm/processor.h> @@ -224,7 +223,7 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev) err = sysfs_create_group(&pdev->dev.kobj, &pkgtemp_group); if (err) - goto exit_free; + goto exit_dev; data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { @@ -238,6 +237,8 @@ static int __devinit pkgtemp_probe(struct platform_device *pdev) exit_class: sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); +exit_dev: + device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); exit_free: kfree(data); exit: @@ -250,6 +251,7 @@ static int __devexit pkgtemp_remove(struct platform_device *pdev) hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &pkgtemp_group); + device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_max.dev_attr); platform_set_drvdata(pdev, NULL); kfree(data); return 0; @@ -281,9 +283,10 @@ static int __cpuinit pkgtemp_device_add(unsigned int cpu) int err; struct platform_device *pdev; struct pdev_entry *pdev_entry; -#ifdef CONFIG_SMP struct cpuinfo_x86 *c = &cpu_data(cpu); -#endif + + if (!cpu_has(c, X86_FEATURE_PTS)) + return 0; mutex_lock(&pdev_list_mutex); @@ -339,17 +342,18 @@ exit: #ifdef CONFIG_HOTPLUG_CPU static void pkgtemp_device_remove(unsigned int cpu) { - struct pdev_entry *p, *n; + struct pdev_entry *p; unsigned int i; int err; mutex_lock(&pdev_list_mutex); - list_for_each_entry_safe(p, n, &pdev_list, list) { + list_for_each_entry(p, &pdev_list, list) { if (p->cpu != cpu) continue; platform_device_unregister(p->pdev); list_del(&p->list); + mutex_unlock(&pdev_list_mutex); kfree(p); for_each_cpu(i, cpu_core_mask(cpu)) { if (i != cpu) { @@ -358,7 +362,7 @@ static void pkgtemp_device_remove(unsigned int cpu) break; } } - break; + return; } mutex_unlock(&pdev_list_mutex); } @@ -399,11 +403,6 @@ static int __init pkgtemp_init(void) goto exit; for_each_online_cpu(i) { - struct cpuinfo_x86 *c = &cpu_data(i); - - if (!cpu_has(c, X86_FEATURE_PTS)) - continue; - err = pkgtemp_device_add(i); if (err) goto exit_devices_unreg; diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index 697202e2789..8e540ada47d 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -35,7 +35,6 @@ #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> -#include <linux/smp_lock.h> #include <linux/hwmon-vid.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> @@ -52,6 +51,7 @@ #define WATCHDOG_TIMEOUT 2 /* 2 minute default timeout */ /* Addresses to scan */ +static DEFINE_MUTEX(watchdog_mutex); static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; @@ -1333,7 +1333,7 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, int val, ret = 0; struct w83793_data *data = filp->private_data; - lock_kernel(); + mutex_lock(&watchdog_mutex); switch (cmd) { case WDIOC_GETSUPPORT: if (!nowayout) @@ -1387,7 +1387,7 @@ static long watchdog_ioctl(struct file *filp, unsigned int cmd, default: ret = -ENOTTY; } - unlock_kernel(); + mutex_unlock(&watchdog_mutex); return ret; } diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index f7bd2613cec..f2de3be35df 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -677,6 +677,11 @@ static int __devinit cpm_i2c_probe(struct platform_device *ofdev, dev_dbg(&ofdev->dev, "hw routines for %s registered.\n", cpm->adap.name); + /* + * register OF I2C devices + */ + of_i2c_register_devices(&cpm->adap); + return 0; out_shut: cpm_i2c_shutdown(cpm); diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c index 2222c87876b..5795c8398c7 100644 --- a/drivers/i2c/busses/i2c-davinci.c +++ b/drivers/i2c/busses/i2c-davinci.c @@ -331,21 +331,16 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) INIT_COMPLETION(dev->cmd_complete); dev->cmd_err = 0; - /* Take I2C out of reset, configure it as master and set the - * start bit */ - flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST | DAVINCI_I2C_MDR_STT; + /* Take I2C out of reset and configure it as master */ + flag = DAVINCI_I2C_MDR_IRS | DAVINCI_I2C_MDR_MST; /* if the slave address is ten bit address, enable XA bit */ if (msg->flags & I2C_M_TEN) flag |= DAVINCI_I2C_MDR_XA; if (!(msg->flags & I2C_M_RD)) flag |= DAVINCI_I2C_MDR_TRX; - if (stop) - flag |= DAVINCI_I2C_MDR_STP; - if (msg->len == 0) { + if (msg->len == 0) flag |= DAVINCI_I2C_MDR_RM; - flag &= ~DAVINCI_I2C_MDR_STP; - } /* Enable receive or transmit interrupts */ w = davinci_i2c_read_reg(dev, DAVINCI_I2C_IMR_REG); @@ -357,7 +352,11 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) dev->terminate = 0; - /* write the data into mode register */ + /* + * Write mode register first as needed for correct behaviour + * on OMAP-L138, but don't set STT yet to avoid a race with XRDY + * occuring before we have loaded DXR + */ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); /* @@ -365,12 +364,19 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop) * because transmit-data-ready interrupt can come before * NACK-interrupt during sending of previous message and * ICDXR may have wrong data + * It also saves us one interrupt, slightly faster */ if ((!(msg->flags & I2C_M_RD)) && dev->buf_len) { davinci_i2c_write_reg(dev, DAVINCI_I2C_DXR_REG, *dev->buf++); dev->buf_len--; } + /* Set STT to begin transmit now DXR is loaded */ + flag |= DAVINCI_I2C_MDR_STT; + if (stop && msg->len != 0) + flag |= DAVINCI_I2C_MDR_STP; + davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, flag); + r = wait_for_completion_interruptible_timeout(&dev->cmd_complete, dev->adapter.timeout); if (r == 0) { diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 43ca32fddde..89eedf45d30 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -761,6 +761,9 @@ static int __devinit iic_probe(struct platform_device *ofdev, dev_info(&ofdev->dev, "using %s mode\n", dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); + /* Now register all the child nodes */ + of_i2c_register_devices(adap); + return 0; error_cleanup: diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index d1ff9408dc1..4c2a62b75b5 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -159,15 +159,9 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx) { - int result; - - result = wait_event_interruptible_timeout(i2c_imx->queue, - i2c_imx->i2csr & I2SR_IIF, HZ / 10); + wait_event_timeout(i2c_imx->queue, i2c_imx->i2csr & I2SR_IIF, HZ / 10); - if (unlikely(result < 0)) { - dev_dbg(&i2c_imx->adapter.dev, "<%s> result < 0\n", __func__); - return result; - } else if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) { + if (unlikely(!(i2c_imx->i2csr & I2SR_IIF))) { dev_dbg(&i2c_imx->adapter.dev, "<%s> Timeout\n", __func__); return -ETIMEDOUT; } @@ -295,7 +289,7 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) i2c_imx->i2csr = temp; temp &= ~I2SR_IIF; writeb(temp, i2c_imx->base + IMX_I2C_I2SR); - wake_up_interruptible(&i2c_imx->queue); + wake_up(&i2c_imx->queue); return IRQ_HANDLED; } diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index a1c419a716a..b74e6dc6886 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -632,6 +632,7 @@ static int __devinit fsl_i2c_probe(struct platform_device *op, dev_err(i2c->dev, "failed to add adapter\n"); goto fail_add; } + of_i2c_register_devices(&i2c->adap); return result; diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c index 0e9f85d0a83..56dbe54e881 100644 --- a/drivers/i2c/busses/i2c-octeon.c +++ b/drivers/i2c/busses/i2c-octeon.c @@ -218,7 +218,7 @@ static int octeon_i2c_wait(struct octeon_i2c *i2c) return result; } else if (result == 0) { dev_dbg(i2c->dev, "%s: timeout\n", __func__); - result = -ETIMEDOUT; + return -ETIMEDOUT; } return 0; diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 7674efb5537..b33c78586bf 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -680,6 +680,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) if (r == 0) r = num; + + omap_i2c_wait_for_bb(dev); out: omap_i2c_idle(dev); return r; diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c index 4174101660c..837b8c1aa02 100644 --- a/drivers/i2c/busses/i2c-pasemi.c +++ b/drivers/i2c/busses/i2c-pasemi.c @@ -88,7 +88,7 @@ static void pasemi_smb_clear(struct pasemi_smbus *smbus) reg_write(smbus, REG_SMSTA, status); } -static unsigned int pasemi_smb_waitready(struct pasemi_smbus *smbus) +static int pasemi_smb_waitready(struct pasemi_smbus *smbus) { int timeout = 10; unsigned int status; diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c index bbd77603a41..29933f87d8f 100644 --- a/drivers/i2c/busses/i2c-pca-isa.c +++ b/drivers/i2c/busses/i2c-pca-isa.c @@ -71,8 +71,8 @@ static int pca_isa_readbyte(void *pd, int reg) static int pca_isa_waitforcompletion(void *pd) { - long ret = ~0; unsigned long timeout; + long ret; if (irq > -1) { ret = wait_event_timeout(pca_wait, @@ -81,11 +81,15 @@ static int pca_isa_waitforcompletion(void *pd) } else { /* Do polling */ timeout = jiffies + pca_isa_ops.timeout; - while (((pca_isa_readbyte(pd, I2C_PCA_CON) - & I2C_PCA_CON_SI) == 0) - && (ret = time_before(jiffies, timeout))) + do { + ret = time_before(jiffies, timeout); + if (pca_isa_readbyte(pd, I2C_PCA_CON) + & I2C_PCA_CON_SI) + break; udelay(100); + } while (ret); } + return ret > 0; } diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index ef5c78487eb..5f6d7f89e22 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -80,8 +80,8 @@ static void i2c_pca_pf_writebyte32(void *pd, int reg, int val) static int i2c_pca_pf_waitforcompletion(void *pd) { struct i2c_pca_pf_data *i2c = pd; - long ret = ~0; unsigned long timeout; + long ret; if (i2c->irq) { ret = wait_event_timeout(i2c->wait, @@ -90,10 +90,13 @@ static int i2c_pca_pf_waitforcompletion(void *pd) } else { /* Do polling */ timeout = jiffies + i2c->adap.timeout; - while (((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) - & I2C_PCA_CON_SI) == 0) - && (ret = time_before(jiffies, timeout))) + do { + ret = time_before(jiffies, timeout); + if (i2c->algo_data.read_byte(i2c, I2C_PCA_CON) + & I2C_PCA_CON_SI) + break; udelay(100); + } while (ret); } return ret > 0; diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index 72902e0bbfa..bf831bf8158 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -662,8 +662,8 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got) unsigned long sda_delay; if (pdata->sda_delay) { - sda_delay = (freq / 1000) * pdata->sda_delay; - sda_delay /= 1000000; + sda_delay = clkin * pdata->sda_delay; + sda_delay = DIV_ROUND_UP(sda_delay, 1000000); sda_delay = DIV_ROUND_UP(sda_delay, 5); if (sda_delay > 3) sda_delay = 3; diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 6649176de94..bea4c5021d2 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -32,7 +32,6 @@ #include <linux/init.h> #include <linux/idr.h> #include <linux/mutex.h> -#include <linux/of_i2c.h> #include <linux/of_device.h> #include <linux/completion.h> #include <linux/hardirq.h> @@ -197,11 +196,12 @@ static int i2c_device_pm_suspend(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - if (pm_runtime_suspended(dev)) - return 0; - - if (pm) - return pm->suspend ? pm->suspend(dev) : 0; + if (pm) { + if (pm_runtime_suspended(dev)) + return 0; + else + return pm->suspend ? pm->suspend(dev) : 0; + } return i2c_legacy_suspend(dev, PMSG_SUSPEND); } @@ -216,12 +216,6 @@ static int i2c_device_pm_resume(struct device *dev) else ret = i2c_legacy_resume(dev); - if (!ret) { - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - } - return ret; } @@ -229,11 +223,12 @@ static int i2c_device_pm_freeze(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - if (pm_runtime_suspended(dev)) - return 0; - - if (pm) - return pm->freeze ? pm->freeze(dev) : 0; + if (pm) { + if (pm_runtime_suspended(dev)) + return 0; + else + return pm->freeze ? pm->freeze(dev) : 0; + } return i2c_legacy_suspend(dev, PMSG_FREEZE); } @@ -242,11 +237,12 @@ static int i2c_device_pm_thaw(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - if (pm_runtime_suspended(dev)) - return 0; - - if (pm) - return pm->thaw ? pm->thaw(dev) : 0; + if (pm) { + if (pm_runtime_suspended(dev)) + return 0; + else + return pm->thaw ? pm->thaw(dev) : 0; + } return i2c_legacy_resume(dev); } @@ -255,11 +251,12 @@ static int i2c_device_pm_poweroff(struct device *dev) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - if (pm_runtime_suspended(dev)) - return 0; - - if (pm) - return pm->poweroff ? pm->poweroff(dev) : 0; + if (pm) { + if (pm_runtime_suspended(dev)) + return 0; + else + return pm->poweroff ? pm->poweroff(dev) : 0; + } return i2c_legacy_suspend(dev, PMSG_HIBERNATE); } @@ -876,9 +873,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap) if (adap->nr < __i2c_first_dynamic_bus_num) i2c_scan_static_board_info(adap); - /* Register devices from the device tree */ - of_i2c_register_devices(adap); - /* Notify drivers */ mutex_lock(&core_lock); bus_for_each_drv(&i2c_bus_type, NULL, adap, __process_new_adapter); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 31fc76960a8..0c73fe39a23 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -31,7 +31,6 @@ #include <linux/delay.h> #include <linux/timer.h> #include <linux/seq_file.h> -#include <linux/smp_lock.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/errno.h> @@ -52,6 +51,7 @@ #include "ide-cd.h" +static DEFINE_MUTEX(ide_cd_mutex); static DEFINE_MUTEX(idecd_ref_mutex); static void ide_cd_release(struct device *); @@ -1602,7 +1602,7 @@ static int idecd_open(struct block_device *bdev, fmode_t mode) struct cdrom_info *info; int rc = -ENXIO; - lock_kernel(); + mutex_lock(&ide_cd_mutex); info = ide_cd_get(bdev->bd_disk); if (!info) goto out; @@ -1611,7 +1611,7 @@ static int idecd_open(struct block_device *bdev, fmode_t mode) if (rc < 0) ide_cd_put(info); out: - unlock_kernel(); + mutex_unlock(&ide_cd_mutex); return rc; } @@ -1619,11 +1619,11 @@ static int idecd_release(struct gendisk *disk, fmode_t mode) { struct cdrom_info *info = ide_drv_g(disk, cdrom_info); - lock_kernel(); + mutex_lock(&ide_cd_mutex); cdrom_release(&info->devinfo, mode); ide_cd_put(info); - unlock_kernel(); + mutex_unlock(&ide_cd_mutex); return 0; } @@ -1694,9 +1694,9 @@ static int idecd_ioctl(struct block_device *bdev, fmode_t mode, { int ret; - lock_kernel(); + mutex_lock(&ide_cd_mutex); ret = idecd_locked_ioctl(bdev, mode, cmd, arg); - unlock_kernel(); + mutex_unlock(&ide_cd_mutex); return ret; } diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 2a4cb9c18f0..404843e8611 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c @@ -43,7 +43,6 @@ #include <asm/io.h> #include <asm/system.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> @@ -72,17 +71,6 @@ static int ide_config(struct pcmcia_device *); static void ide_detach(struct pcmcia_device *p_dev); - - - -/*====================================================================== - - ide_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - -======================================================================*/ - static int ide_probe(struct pcmcia_device *link) { ide_info_t *info; @@ -97,23 +85,12 @@ static int ide_probe(struct pcmcia_device *link) info->p_dev = link; link->priv = info; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO | + CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; return ide_config(link); } /* ide_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void ide_detach(struct pcmcia_device *link) { ide_info_t *info = link->priv; @@ -187,79 +164,31 @@ out_release: return NULL; } -/*====================================================================== - - ide_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ide device available to the system. - -======================================================================*/ - -struct pcmcia_config_check { - unsigned long ctl_base; - int skip_vcc; - int is_kme; -}; - -static int pcmcia_check_one_config(struct pcmcia_device *pdev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data) { - struct pcmcia_config_check *stk = priv_data; - - /* Check for matching Vcc, unless we're desperate */ - if (!stk->skip_vcc) { - if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) - return -ENODEV; - } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000) - return -ENODEV; - } - } + int *is_kme = priv_data; - if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) - pdev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; - else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) - pdev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; - - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - pdev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - - pdev->conf.ConfigIndex = cfg->index; - pdev->resource[0]->start = io->win[0].base; - if (!(io->flags & CISTPL_IO_16BIT)) { - pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - } - if (io->nwin == 2) { - pdev->resource[0]->end = 8; - pdev->resource[1]->start = io->win[1].base; - pdev->resource[1]->end = (stk->is_kme) ? 2 : 1; - if (pcmcia_request_io(pdev) != 0) - return -ENODEV; - stk->ctl_base = pdev->resource[1]->start; - } else if ((io->nwin == 1) && (io->win[0].len >= 16)) { - pdev->resource[0]->end = io->win[0].len; - pdev->resource[1]->end = 0; - if (pcmcia_request_io(pdev) != 0) - return -ENODEV; - stk->ctl_base = pdev->resource[0]->start + 0x0e; - } else + if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { + pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + } + pdev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + pdev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + + if (pdev->resource[1]->end) { + pdev->resource[0]->end = 8; + pdev->resource[1]->end = (*is_kme) ? 2 : 1; + } else { + if (pdev->resource[0]->end < 16) return -ENODEV; - /* If we've got this far, we're done */ - return 0; } - return -ENODEV; + + return pcmcia_request_io(pdev); } static int ide_config(struct pcmcia_device *link) { ide_info_t *info = link->priv; - struct pcmcia_config_check *stk = NULL; int ret = 0, is_kme = 0; unsigned long io_base, ctl_base; struct ide_host *host; @@ -270,23 +199,21 @@ static int ide_config(struct pcmcia_device *link) ((link->card_id == PRODID_KME_KXLC005_A) || (link->card_id == PRODID_KME_KXLC005_B))); - stk = kzalloc(sizeof(*stk), GFP_KERNEL); - if (!stk) - goto err_mem; - stk->is_kme = is_kme; - stk->skip_vcc = io_base = ctl_base = 0; - - if (pcmcia_loop_config(link, pcmcia_check_one_config, stk)) { - stk->skip_vcc = 1; - if (pcmcia_loop_config(link, pcmcia_check_one_config, stk)) + if (pcmcia_loop_config(link, pcmcia_check_one_config, &is_kme)) { + link->config_flags &= ~CONF_AUTO_CHECK_VCC; + if (pcmcia_loop_config(link, pcmcia_check_one_config, &is_kme)) goto failed; /* No suitable config found */ } io_base = link->resource[0]->start; - ctl_base = stk->ctl_base; + if (link->resource[1]->end) + ctl_base = link->resource[1]->start; + else + ctl_base = link->resource[0]->start + 0x0e; if (!link->irq) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -311,29 +238,15 @@ static int ide_config(struct pcmcia_device *link) info->host = host; dev_info(&link->dev, "ide-cs: hd%c: Vpp = %d.%d\n", 'a' + host->ports[0]->index * 2, - link->conf.Vpp / 10, link->conf.Vpp % 10); + link->vpp / 10, link->vpp % 10); - kfree(stk); return 0; -err_mem: - printk(KERN_NOTICE "ide-cs: ide_config failed memory allocation\n"); - goto failed; - failed: - kfree(stk); ide_release(link); return -ENODEV; } /* ide_config */ -/*====================================================================== - - After a card is removed, ide_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void ide_release(struct pcmcia_device *link) { ide_info_t *info = link->priv; @@ -359,15 +272,6 @@ static void ide_release(struct pcmcia_device *link) } /* ide_release */ -/*====================================================================== - - The card status event handler. Mostly, this schedules other - stuff to run after an event is received. A CARD_REMOVAL event - also sets some flags to discourage the ide drivers from - talking to the ports. - -======================================================================*/ - static struct pcmcia_device_id ide_ids[] = { PCMCIA_DEVICE_FUNC_ID(4), PCMCIA_DEVICE_MANF_CARD(0x0000, 0x0000), /* Corsair */ @@ -440,9 +344,7 @@ MODULE_DEVICE_TABLE(pcmcia, ide_ids); static struct pcmcia_driver ide_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "ide-cs", - }, + .name = "ide-cs", .probe = ide_probe, .remove = ide_detach, .id_table = ide_ids, diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 7433e07de30..7c5b01ce51d 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -516,10 +516,10 @@ static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect) return ide_no_data_taskfile(drive, &cmd); } -static void update_ordered(ide_drive_t *drive) +static void update_flush(ide_drive_t *drive) { u16 *id = drive->id; - unsigned ordered = QUEUE_ORDERED_NONE; + unsigned flush = 0; if (drive->dev_flags & IDE_DFLAG_WCACHE) { unsigned long long capacity; @@ -543,13 +543,12 @@ static void update_ordered(ide_drive_t *drive) drive->name, barrier ? "" : "not "); if (barrier) { - ordered = QUEUE_ORDERED_DRAIN_FLUSH; + flush = REQ_FLUSH; blk_queue_prep_rq(drive->queue, idedisk_prep_fn); } - } else - ordered = QUEUE_ORDERED_DRAIN; + } - blk_queue_ordered(drive->queue, ordered); + blk_queue_flush(drive->queue, flush); } ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE); @@ -572,7 +571,7 @@ static int set_wcache(ide_drive_t *drive, int arg) } } - update_ordered(drive); + update_flush(drive); return err; } diff --git a/drivers/ide/ide-disk_ioctl.c b/drivers/ide/ide-disk_ioctl.c index ec94c66918f..da36f729ff3 100644 --- a/drivers/ide/ide-disk_ioctl.c +++ b/drivers/ide/ide-disk_ioctl.c @@ -1,10 +1,11 @@ #include <linux/kernel.h> #include <linux/ide.h> #include <linux/hdreg.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include "ide-disk.h" +static DEFINE_MUTEX(ide_disk_ioctl_mutex); static const struct ide_ioctl_devset ide_disk_ioctl_settings[] = { { HDIO_GET_ADDRESS, HDIO_SET_ADDRESS, &ide_devset_address }, { HDIO_GET_MULTCOUNT, HDIO_SET_MULTCOUNT, &ide_devset_multcount }, @@ -19,13 +20,13 @@ int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode, { int err; - lock_kernel(); + mutex_lock(&ide_disk_ioctl_mutex); err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings); if (err != -EOPNOTSUPP) goto out; err = generic_ide_ioctl(drive, bdev, cmd, arg); out: - unlock_kernel(); + mutex_unlock(&ide_disk_ioctl_mutex); return err; } diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c index fd3d05ab341..d267b7affad 100644 --- a/drivers/ide/ide-floppy_ioctl.c +++ b/drivers/ide/ide-floppy_ioctl.c @@ -5,7 +5,7 @@ #include <linux/kernel.h> #include <linux/ide.h> #include <linux/cdrom.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/unaligned.h> @@ -32,6 +32,7 @@ * On exit we set nformats to the number of records we've actually initialized. */ +static DEFINE_MUTEX(ide_floppy_ioctl_mutex); static int ide_floppy_get_format_capacities(ide_drive_t *drive, struct ide_atapi_pc *pc, int __user *arg) @@ -276,7 +277,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev, void __user *argp = (void __user *)arg; int err; - lock_kernel(); + mutex_lock(&ide_floppy_ioctl_mutex); if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) { err = ide_floppy_lockdoor(drive, &pc, arg, cmd); goto out; @@ -298,6 +299,6 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev, err = generic_ide_ioctl(drive, bdev, cmd, arg); out: - unlock_kernel(); + mutex_unlock(&ide_floppy_ioctl_mutex); return err; } diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c index 70aeeb18833..35c4b43585e 100644 --- a/drivers/ide/ide-gd.c +++ b/drivers/ide/ide-gd.c @@ -1,4 +1,3 @@ -#include <linux/smp_lock.h> #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> @@ -23,6 +22,7 @@ #define IDE_GD_VERSION "1.18" /* module parameters */ +static DEFINE_MUTEX(ide_gd_mutex); static unsigned long debug_mask; module_param(debug_mask, ulong, 0644); @@ -242,9 +242,9 @@ static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; - lock_kernel(); + mutex_lock(&ide_gd_mutex); ret = ide_gd_open(bdev, mode); - unlock_kernel(); + mutex_unlock(&ide_gd_mutex); return ret; } @@ -257,7 +257,7 @@ static int ide_gd_release(struct gendisk *disk, fmode_t mode) ide_debug_log(IDE_DBG_FUNC, "enter"); - lock_kernel(); + mutex_lock(&ide_gd_mutex); if (idkp->openers == 1) drive->disk_ops->flush(drive); @@ -269,7 +269,7 @@ static int ide_gd_release(struct gendisk *disk, fmode_t mode) idkp->openers--; ide_disk_put(idkp); - unlock_kernel(); + mutex_unlock(&ide_gd_mutex); return 0; } diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index a381be81407..999dac054bc 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -441,19 +441,6 @@ void do_ide_request(struct request_queue *q) struct request *rq = NULL; ide_startstop_t startstop; - /* - * drive is doing pre-flush, ordered write, post-flush sequence. even - * though that is 3 requests, it must be seen as a single transaction. - * we must not preempt this drive until that is complete - */ - if (blk_queue_flushing(q)) - /* - * small race where queue could get replugged during - * the 3-request flush cycle, just yank the plug since - * we want it to finish asap - */ - blk_remove_plug(q); - spin_unlock_irq(q->queue_lock); /* HLD do_request() callback might sleep, make sure it's okay */ diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 4c3d1bfec0c..068cef0a987 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1448,19 +1448,13 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, if (hwif == NULL) continue; - if (hwif->present) - hwif_register_devices(hwif); - } - - ide_host_for_each_port(i, hwif, host) { - if (hwif == NULL) - continue; - ide_sysfs_register_port(hwif); ide_proc_register_port(hwif); - if (hwif->present) + if (hwif->present) { ide_proc_port_register_devices(hwif); + hwif_register_devices(hwif); + } } return j ? 0 : -1; diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 6d622cb5ac8..7ecb1ade887 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -32,11 +32,9 @@ #include <linux/errno.h> #include <linux/genhd.h> #include <linux/seq_file.h> -#include <linux/smp_lock.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/ide.h> -#include <linux/smp_lock.h> #include <linux/completion.h> #include <linux/bitops.h> #include <linux/mutex.h> @@ -220,6 +218,7 @@ typedef struct ide_tape_obj { char write_prot; } idetape_tape_t; +static DEFINE_MUTEX(ide_tape_mutex); static DEFINE_MUTEX(idetape_ref_mutex); static DEFINE_MUTEX(idetape_chrdev_mutex); @@ -1426,9 +1425,9 @@ static long idetape_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; - lock_kernel(); + mutex_lock(&ide_tape_mutex); ret = do_idetape_chrdev_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&ide_tape_mutex); return ret; } @@ -1903,15 +1902,16 @@ static const struct file_operations idetape_fops = { .unlocked_ioctl = idetape_chrdev_ioctl, .open = idetape_chrdev_open, .release = idetape_chrdev_release, + .llseek = noop_llseek, }; static int idetape_open(struct block_device *bdev, fmode_t mode) { struct ide_tape_obj *tape; - lock_kernel(); + mutex_lock(&ide_tape_mutex); tape = ide_tape_get(bdev->bd_disk, false, 0); - unlock_kernel(); + mutex_unlock(&ide_tape_mutex); if (!tape) return -ENXIO; @@ -1923,9 +1923,9 @@ static int idetape_release(struct gendisk *disk, fmode_t mode) { struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj); - lock_kernel(); + mutex_lock(&ide_tape_mutex); ide_tape_put(tape); - unlock_kernel(); + mutex_unlock(&ide_tape_mutex); return 0; } @@ -1937,11 +1937,11 @@ static int idetape_ioctl(struct block_device *bdev, fmode_t mode, ide_drive_t *drive = tape->drive; int err; - lock_kernel(); + mutex_lock(&ide_tape_mutex); err = generic_ide_ioctl(drive, bdev, cmd, arg); if (err == -EINVAL) err = idetape_blkdev_ioctl(drive, cmd, arg); - unlock_kernel(); + mutex_unlock(&ide_tape_mutex); return err; } diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c index 15341fc1c68..c976285d313 100644 --- a/drivers/idle/i7300_idle.c +++ b/drivers/idle/i7300_idle.c @@ -536,6 +536,7 @@ static ssize_t stats_read_ul(struct file *fp, char __user *ubuf, size_t count, static const struct file_operations idle_fops = { .open = stats_open_generic, .read = stats_read_ul, + .llseek = default_llseek, }; struct debugfs_file_info { diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index a10152bb142..cb3ccf3ed22 100755..100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -59,18 +59,11 @@ #include <linux/hrtimer.h> /* ktime_get_real() */ #include <trace/events/power.h> #include <linux/sched.h> +#include <asm/mwait.h> #define INTEL_IDLE_VERSION "0.4" #define PREFIX "intel_idle: " -#define MWAIT_SUBSTATE_MASK (0xf) -#define MWAIT_CSTATE_MASK (0xf) -#define MWAIT_SUBSTATE_SIZE (4) -#define MWAIT_MAX_NUM_CSTATES 8 -#define CPUID_MWAIT_LEAF (5) -#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) -#define CPUID5_ECX_INTERRUPT_BREAK (0x2) - static struct cpuidle_driver intel_idle_driver = { .name = "intel_idle", .owner = THIS_MODULE, @@ -83,7 +76,7 @@ static unsigned int mwait_substates; /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ static unsigned int lapic_timer_reliable_states; -static struct cpuidle_device *intel_idle_cpuidle_devices; +static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); static struct cpuidle_state *cpuidle_state_table; @@ -108,7 +101,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { .name = "NHM-C3", .desc = "MWAIT 0x10", .driver_data = (void *) 0x10, - .flags = CPUIDLE_FLAG_TIME_VALID, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 20, .power_usage = 500, .target_residency = 80, @@ -117,7 +110,7 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { .name = "NHM-C6", .desc = "MWAIT 0x20", .driver_data = (void *) 0x20, - .flags = CPUIDLE_FLAG_TIME_VALID, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 200, .power_usage = 350, .target_residency = 800, @@ -149,7 +142,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { .name = "ATM-C4", .desc = "MWAIT 0x30", .driver_data = (void *) 0x30, - .flags = CPUIDLE_FLAG_TIME_VALID, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, .exit_latency = 100, .power_usage = 250, .target_residency = 400, @@ -157,13 +150,13 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { { /* MWAIT C5 */ }, { /* MWAIT C6 */ .name = "ATM-C6", - .desc = "MWAIT 0x40", - .driver_data = (void *) 0x40, - .flags = CPUIDLE_FLAG_TIME_VALID, - .exit_latency = 200, + .desc = "MWAIT 0x52", + .driver_data = (void *) 0x52, + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 140, .power_usage = 150, - .target_residency = 800, - .enter = NULL }, /* disabled */ + .target_residency = 560, + .enter = &intel_idle }, }; /** @@ -185,6 +178,16 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) local_irq_disable(); + /* + * If the state flag indicates that the TLB will be flushed or if this + * is the deepest c-state supported, do a voluntary leave mm to avoid + * costly and mostly unnecessary wakeups for flushing the user TLB's + * associated with the active mm. + */ + if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED || + (&dev->states[dev->state_count - 1] == state)) + leave_mm(cpu); + if (!(lapic_timer_reliable_states & (1 << (cstate)))) clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index d88077a2199..13c88871dc3 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -463,7 +463,8 @@ static int send_connect(struct iwch_ep *ep) V_MSS_IDX(mtu_idx) | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); - opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); + opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) | + V_CONG_CONTROL_FLAVOR(cong_flavor); skb->priority = CPL_PRIORITY_SETUP; set_arp_failure_handler(skb, act_open_req_arp_failure); @@ -1280,7 +1281,8 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb) V_MSS_IDX(mtu_idx) | V_L2T_IDX(ep->l2t->idx) | V_TX_CHANNEL(ep->l2t->smt_idx); opt0l = V_TOS((ep->tos >> 2) & M_TOS) | V_RCV_BUFSIZ(rcv_win>>10); - opt2 = V_FLAVORS_VALID(1) | V_CONG_CONTROL_FLAVOR(cong_flavor); + opt2 = F_RX_COALESCE_VALID | V_RX_COALESCE(0) | V_FLAVORS_VALID(1) | + V_CONG_CONTROL_FLAVOR(cong_flavor); rpl = cplhdr(skb); rpl->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); diff --git a/drivers/infiniband/hw/ipath/ipath_diag.c b/drivers/infiniband/hw/ipath/ipath_diag.c index d4ce8b63e19..daef61d5e5b 100644 --- a/drivers/infiniband/hw/ipath/ipath_diag.c +++ b/drivers/infiniband/hw/ipath/ipath_diag.c @@ -65,7 +65,8 @@ static const struct file_operations diag_file_ops = { .write = ipath_diag_write, .read = ipath_diag_read, .open = ipath_diag_open, - .release = ipath_diag_release + .release = ipath_diag_release, + .llseek = default_llseek, }; static ssize_t ipath_diagpkt_write(struct file *fp, @@ -75,6 +76,7 @@ static ssize_t ipath_diagpkt_write(struct file *fp, static const struct file_operations diagpkt_file_ops = { .owner = THIS_MODULE, .write = ipath_diagpkt_write, + .llseek = noop_llseek, }; static atomic_t diagpkt_count = ATOMIC_INIT(0); diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 65eb8929db2..6078992da3f 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -63,7 +63,8 @@ static const struct file_operations ipath_file_ops = { .open = ipath_open, .release = ipath_close, .poll = ipath_poll, - .mmap = ipath_mmap + .mmap = ipath_mmap, + .llseek = noop_llseek, }; /* diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 2fca70836da..d13e72685dc 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -103,6 +103,7 @@ static ssize_t atomic_stats_read(struct file *file, char __user *buf, static const struct file_operations atomic_stats_ops = { .read = atomic_stats_read, + .llseek = default_llseek, }; static ssize_t atomic_counters_read(struct file *file, char __user *buf, @@ -120,6 +121,7 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf, static const struct file_operations atomic_counters_ops = { .read = atomic_counters_read, + .llseek = default_llseek, }; static ssize_t flash_read(struct file *file, char __user *buf, @@ -224,6 +226,7 @@ bail: static const struct file_operations flash_ops = { .read = flash_read, .write = flash_write, + .llseek = default_llseek, }; static int create_device_files(struct super_block *sb, diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c index 05dcf0d9a7d..204c4dd9dce 100644 --- a/drivers/infiniband/hw/qib/qib_diag.c +++ b/drivers/infiniband/hw/qib/qib_diag.c @@ -136,7 +136,8 @@ static const struct file_operations diag_file_ops = { .write = qib_diag_write, .read = qib_diag_read, .open = qib_diag_open, - .release = qib_diag_release + .release = qib_diag_release, + .llseek = default_llseek, }; static atomic_t diagpkt_count = ATOMIC_INIT(0); @@ -149,6 +150,7 @@ static ssize_t qib_diagpkt_write(struct file *fp, const char __user *data, static const struct file_operations diagpkt_file_ops = { .owner = THIS_MODULE, .write = qib_diagpkt_write, + .llseek = noop_llseek, }; int qib_diag_add(struct qib_devdata *dd) diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 6b11645edf3..aa2be214270 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -63,7 +63,8 @@ static const struct file_operations qib_file_ops = { .open = qib_open, .release = qib_close, .poll = qib_poll, - .mmap = qib_mmapf + .mmap = qib_mmapf, + .llseek = noop_llseek, }; /* diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 9f989c0ba9d..a0e6613e8be 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -367,6 +367,7 @@ bail: static const struct file_operations flash_ops = { .read = flash_read, .write = flash_write, + .llseek = default_llseek, }; static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index c908c5f8364..535fea4fe67 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -28,7 +28,7 @@ struct evdev { int minor; struct input_handle handle; wait_queue_head_t wait; - struct evdev_client *grab; + struct evdev_client __rcu *grab; struct list_head client_list; spinlock_t client_lock; /* protects client_list */ struct mutex mutex; @@ -669,6 +669,9 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) { + if (!dev->absinfo) + return -EINVAL; + t = _IOC_NR(cmd) & ABS_MAX; abs = dev->absinfo[t]; @@ -680,10 +683,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, } } - if (_IOC_DIR(cmd) == _IOC_READ) { + if (_IOC_DIR(cmd) == _IOC_WRITE) { if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) { + if (!dev->absinfo) + return -EINVAL; + t = _IOC_NR(cmd) & ABS_MAX; if (copy_from_user(&abs, p, min_t(size_t, @@ -761,7 +767,8 @@ static const struct file_operations evdev_fops = { .compat_ioctl = evdev_ioctl_compat, #endif .fasync = evdev_fasync, - .flush = evdev_flush + .flush = evdev_flush, + .llseek = no_llseek, }; static int evdev_install_chrdev(struct evdev *evdev) diff --git a/drivers/input/input.c b/drivers/input/input.c index ab698205651..7919c253722 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -2047,6 +2047,7 @@ out: static const struct file_operations input_fops = { .owner = THIS_MODULE, .open = input_open_file, + .llseek = noop_llseek, }; static int __init input_init(void) diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index d85bd8a7967..9d424cebfd2 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -483,6 +483,9 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev, memcpy(joydev->abspam, abspam, len); + for (i = 0; i < joydev->nabs; i++) + joydev->absmap[joydev->abspam[i]] = i; + out: kfree(abspam); return retval; @@ -736,6 +739,7 @@ static const struct file_operations joydev_fops = { .compat_ioctl = joydev_compat_ioctl, #endif .fasync = joydev_fasync, + .llseek = no_llseek, }; static int joydev_install_chrdev(struct joydev *joydev) diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 9cc488d2149..aa037fec2f8 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -338,7 +338,7 @@ config KEYBOARD_OPENCORES config KEYBOARD_PXA27x tristate "PXA27x/PXA3xx keypad support" - depends on PXA27x || PXA3xx + depends on PXA27x || PXA3xx || ARCH_MMP help Enable support for PXA27x/PXA3xx keypad controller. diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index f32404f9918..4b0ec35259a 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c @@ -32,7 +32,7 @@ #include <asm/mach/map.h> #include <mach/hardware.h> -#include <mach/pxa27x_keypad.h> +#include <plat/pxa27x_keypad.h> /* * Keypad Controller registers */ @@ -330,11 +330,21 @@ static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad) keypad->direct_key_state = new_state; } +static void clear_wakeup_event(struct pxa27x_keypad *keypad) +{ + struct pxa27x_keypad_platform_data *pdata = keypad->pdata; + + if (pdata->clear_wakeup_event) + (pdata->clear_wakeup_event)(); +} + static irqreturn_t pxa27x_keypad_irq_handler(int irq, void *dev_id) { struct pxa27x_keypad *keypad = dev_id; unsigned long kpc = keypad_readl(KPC); + clear_wakeup_event(keypad); + if (kpc & KPC_DI) pxa27x_keypad_scan_direct(keypad); diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c index c1906647905..0b4f54265f6 100644 --- a/drivers/input/misc/hp_sdc_rtc.c +++ b/drivers/input/misc/hp_sdc_rtc.c @@ -43,7 +43,7 @@ #include <linux/proc_fs.h> #include <linux/poll.h> #include <linux/rtc.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/semaphore.h> MODULE_AUTHOR("Brian S. Julin <bri@calyx.com>"); @@ -52,6 +52,7 @@ MODULE_LICENSE("Dual BSD/GPL"); #define RTC_VERSION "1.10d" +static DEFINE_MUTEX(hp_sdc_rtc_mutex); static unsigned long epoch = 2000; static struct semaphore i8042tregs; @@ -104,7 +105,7 @@ static int hp_sdc_rtc_do_read_bbrtc (struct rtc_time *rtctm) t.endidx = 91; t.seq = tseq; t.act.semaphore = &tsem; - init_MUTEX_LOCKED(&tsem); + sema_init(&tsem, 0); if (hp_sdc_enqueue_transaction(&t)) return -1; @@ -665,9 +666,9 @@ static long hp_sdc_rtc_unlocked_ioctl(struct file *file, { int ret; - lock_kernel(); + mutex_lock(&hp_sdc_rtc_mutex); ret = hp_sdc_rtc_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&hp_sdc_rtc_mutex); return ret; } @@ -698,7 +699,7 @@ static int __init hp_sdc_rtc_init(void) return -ENODEV; #endif - init_MUTEX(&i8042tregs); + sema_init(&i8042tregs, 1); if ((ret = hp_sdc_request_timer_irq(&hp_sdc_rtc_isr))) return ret; diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c index 0d4266a533a..b9410784e6a 100644 --- a/drivers/input/misc/uinput.c +++ b/drivers/input/misc/uinput.c @@ -404,6 +404,13 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu retval = uinput_validate_absbits(dev); if (retval < 0) goto exit; + if (test_bit(ABS_MT_SLOT, dev->absbit)) { + int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; + input_mt_create_slots(dev, nslot); + input_set_events_per_packet(dev, 6 * nslot); + } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { + input_set_events_per_packet(dev, 60); + } } udev->state = UIST_SETUP_COMPLETE; @@ -804,6 +811,7 @@ static const struct file_operations uinput_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = uinput_compat_ioctl, #endif + .llseek = no_llseek, }; static struct miscdevice uinput_misc = { diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index d528a2dba06..31ec7265aac 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -792,6 +792,7 @@ static const struct file_operations mousedev_fops = { .open = mousedev_open, .release = mousedev_release, .fasync = mousedev_fasync, + .llseek = noop_llseek, }; static int mousedev_install_chrdev(struct mousedev *mousedev) diff --git a/drivers/input/serio/hil_mlc.c b/drivers/input/serio/hil_mlc.c index c92f4edfee7..e5624d8f170 100644 --- a/drivers/input/serio/hil_mlc.c +++ b/drivers/input/serio/hil_mlc.c @@ -915,15 +915,15 @@ int hil_mlc_register(hil_mlc *mlc) mlc->ostarted = 0; rwlock_init(&mlc->lock); - init_MUTEX(&mlc->osem); + sema_init(&mlc->osem, 1); - init_MUTEX(&mlc->isem); + sema_init(&mlc->isem, 1); mlc->icount = -1; mlc->imatch = 0; mlc->opercnt = 0; - init_MUTEX_LOCKED(&(mlc->csem)); + sema_init(&(mlc->csem), 0); hil_mlc_clear_di_scratch(mlc); hil_mlc_clear_di_map(mlc, 0); diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c index bcc2d30ec24..8c0b51c3142 100644 --- a/drivers/input/serio/hp_sdc.c +++ b/drivers/input/serio/hp_sdc.c @@ -905,7 +905,7 @@ static int __init hp_sdc_init(void) ts_sync[1] = 0x0f; ts_sync[2] = ts_sync[3] = ts_sync[4] = ts_sync[5] = 0; t_sync.act.semaphore = &s_sync; - init_MUTEX_LOCKED(&s_sync); + sema_init(&s_sync, 0); hp_sdc_enqueue_transaction(&t_sync); down(&s_sync); /* Wait for t_sync to complete */ @@ -1039,7 +1039,7 @@ static int __init hp_sdc_register(void) return hp_sdc.dev_err; } - init_MUTEX_LOCKED(&tq_init_sem); + sema_init(&tq_init_sem, 0); tq_init.actidx = 0; tq_init.idx = 1; diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index 99866485444..cd82bb12591 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -243,6 +243,7 @@ static const struct file_operations serio_raw_fops = { .write = serio_raw_write, .poll = serio_raw_poll, .fasync = serio_raw_fasync, + .llseek = noop_llseek, }; diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c index 42ba3691d90..b35876ee690 100644 --- a/drivers/input/tablet/wacom_sys.c +++ b/drivers/input/tablet/wacom_sys.c @@ -103,27 +103,26 @@ static void wacom_sys_irq(struct urb *urb) static int wacom_open(struct input_dev *dev) { struct wacom *wacom = input_get_drvdata(dev); + int retval = 0; - mutex_lock(&wacom->lock); - - wacom->irq->dev = wacom->usbdev; - - if (usb_autopm_get_interface(wacom->intf) < 0) { - mutex_unlock(&wacom->lock); + if (usb_autopm_get_interface(wacom->intf) < 0) return -EIO; - } + + mutex_lock(&wacom->lock); if (usb_submit_urb(wacom->irq, GFP_KERNEL)) { - usb_autopm_put_interface(wacom->intf); - mutex_unlock(&wacom->lock); - return -EIO; + retval = -EIO; + goto out; } wacom->open = true; wacom->intf->needs_remote_wakeup = 1; +out: mutex_unlock(&wacom->lock); - return 0; + if (retval) + usb_autopm_put_interface(wacom->intf); + return retval; } static void wacom_close(struct input_dev *dev) @@ -135,6 +134,8 @@ static void wacom_close(struct input_dev *dev) wacom->open = false; wacom->intf->needs_remote_wakeup = 0; mutex_unlock(&wacom->lock); + + usb_autopm_put_interface(wacom->intf); } static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc, diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 6e29badb969..47fd7a041c5 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -442,8 +442,10 @@ static void wacom_intuos_general(struct wacom_wac *wacom) /* general pen packet */ if ((data[1] & 0xb8) == 0xa0) { t = (data[6] << 2) | ((data[7] >> 6) & 3); - if (features->type >= INTUOS4S && features->type <= INTUOS4L) + if ((features->type >= INTUOS4S && features->type <= INTUOS4L) || + features->type == WACOM_21UX2) { t = (t << 1) | (data[1] & 1); + } input_report_abs(input, ABS_PRESSURE, t); input_report_abs(input, ABS_TILT_X, ((data[7] << 1) & 0x7e) | (data[8] >> 7)); diff --git a/drivers/isdn/act2000/act2000.h b/drivers/isdn/act2000/act2000.h index d4c50512a1f..88c9423500d 100644 --- a/drivers/isdn/act2000/act2000.h +++ b/drivers/isdn/act2000/act2000.h @@ -141,9 +141,9 @@ typedef struct irq_data_isa { __u8 rcvhdr[8]; } irq_data_isa; -typedef union irq_data { +typedef union act2000_irq_data { irq_data_isa isa; -} irq_data; +} act2000_irq_data; /* * Per card driver data @@ -176,7 +176,7 @@ typedef struct act2000_card { char *status_buf_read; char *status_buf_write; char *status_buf_end; - irq_data idat; /* Data used for IRQ handler */ + act2000_irq_data idat; /* Data used for IRQ handler */ isdn_if interface; /* Interface to upper layer */ char regname[35]; /* Name used for request_region */ } act2000_card; diff --git a/drivers/isdn/hardware/avm/avm_cs.c b/drivers/isdn/hardware/avm/avm_cs.c index 09b1795516f..91f06a3ef00 100644 --- a/drivers/isdn/hardware/avm/avm_cs.c +++ b/drivers/isdn/hardware/avm/avm_cs.c @@ -20,7 +20,6 @@ #include <asm/io.h> #include <asm/system.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> @@ -39,87 +38,32 @@ MODULE_LICENSE("GPL"); /*====================================================================*/ -/* - The event() function is this driver's Card Services event handler. - It will be called by Card Services when an appropriate card status - event is received. The config() and release() entry points are - used to configure or release a socket, in response to card insertion - and ejection events. They are invoked from the skeleton event - handler. -*/ - static int avmcs_config(struct pcmcia_device *link); static void avmcs_release(struct pcmcia_device *link); - -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static void avmcs_detach(struct pcmcia_device *p_dev); -/*====================================================================== - - avmcs_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - -======================================================================*/ - static int avmcs_probe(struct pcmcia_device *p_dev) { - - /* The io structure describes IO port mapping */ - p_dev->resource[0]->end = 16; - p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - /* General socket configuration */ - p_dev->conf.Attributes = CONF_ENABLE_IRQ; - p_dev->conf.IntType = INT_MEMORY_AND_IO; - p_dev->conf.ConfigIndex = 1; - p_dev->conf.Present = PRESENT_OPTION; + p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + p_dev->config_index = 1; + p_dev->config_regs = PRESENT_OPTION; return avmcs_config(p_dev); } /* avmcs_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ static void avmcs_detach(struct pcmcia_device *link) { avmcs_release(link); } /* avmcs_detach */ -/*====================================================================== - - avmcs_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ethernet device available to the system. - -======================================================================*/ - -static int avmcs_configcheck(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int avmcs_configcheck(struct pcmcia_device *p_dev, void *priv_data) { - if (cf->io.nwin <= 0) - return -ENODEV; + p_dev->resource[0]->end = 16; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - p_dev->resource[0]->start = cf->io.win[0].base; - p_dev->resource[0]->end = cf->io.win[0].len; return pcmcia_request_io(p_dev); } @@ -150,7 +94,7 @@ static int avmcs_config(struct pcmcia_device *link) /* * configure the PCMCIA socket */ - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i != 0) { pcmcia_disable_device(link); break; @@ -197,13 +141,6 @@ static int avmcs_config(struct pcmcia_device *link) } /* avmcs_config */ -/*====================================================================== - - After a card is removed, avmcs_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ static void avmcs_release(struct pcmcia_device *link) { @@ -222,9 +159,7 @@ MODULE_DEVICE_TABLE(pcmcia, avmcs_ids); static struct pcmcia_driver avmcs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "avm_cs", - }, + .name = "avm_cs", .probe = avmcs_probe, .remove = avmcs_detach, .id_table = avmcs_ids, diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c index 94263c22b87..ac4dd7857cb 100644 --- a/drivers/isdn/hisax/avma1_cs.c +++ b/drivers/isdn/hisax/avma1_cs.c @@ -20,7 +20,6 @@ #include <asm/io.h> #include <asm/system.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include "hisax_cfg.h" @@ -40,67 +39,22 @@ module_param(isdnprot, int, 0); /*====================================================================*/ -/* - The event() function is this driver's Card Services event handler. - It will be called by Card Services when an appropriate card status - event is received. The config() and release() entry points are - used to configure or release a socket, in response to card insertion - and ejection events. They are invoked from the skeleton event - handler. -*/ - static int avma1cs_config(struct pcmcia_device *link) __devinit ; static void avma1cs_release(struct pcmcia_device *link); - -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ; - -/*====================================================================== - - avma1cs_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - -======================================================================*/ - static int __devinit avma1cs_probe(struct pcmcia_device *p_dev) { dev_dbg(&p_dev->dev, "avma1cs_attach()\n"); - /* The io structure describes IO port mapping */ - p_dev->resource[0]->end = 16; - p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - p_dev->resource[1]->end = 16; - p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_16; - /* General socket configuration */ - p_dev->conf.Attributes = CONF_ENABLE_IRQ; - p_dev->conf.IntType = INT_MEMORY_AND_IO; - p_dev->conf.ConfigIndex = 1; - p_dev->conf.Present = PRESENT_OPTION; + p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + p_dev->config_index = 1; + p_dev->config_regs = PRESENT_OPTION; return avma1cs_config(p_dev); } /* avma1cs_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void __devexit avma1cs_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link); @@ -108,26 +62,13 @@ static void __devexit avma1cs_detach(struct pcmcia_device *link) kfree(link->priv); } /* avma1cs_detach */ -/*====================================================================== - - avma1cs_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ethernet device available to the system. - -======================================================================*/ - -static int avma1cs_configcheck(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int avma1cs_configcheck(struct pcmcia_device *p_dev, void *priv_data) { - if (cf->io.nwin <= 0) - return -ENODEV; - - p_dev->resource[0]->start = cf->io.win[0].base; - p_dev->resource[0]->end = cf->io.win[0].len; + p_dev->resource[0]->end = 16; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->io_lines = 5; + return pcmcia_request_io(p_dev); } @@ -161,7 +102,7 @@ static int __devinit avma1cs_config(struct pcmcia_device *link) /* * configure the PCMCIA socket */ - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i != 0) { pcmcia_disable_device(link); break; @@ -175,9 +116,6 @@ static int __devinit avma1cs_config(struct pcmcia_device *link) return -ENODEV; } - printk(KERN_NOTICE "avma1_cs: checking at i/o %#x, irq %d\n", - (unsigned int) link->resource[0]->start, link->irq); - icard.para[0] = link->irq; icard.para[1] = link->resource[0]->start; icard.protocol = isdnprot; @@ -196,14 +134,6 @@ static int __devinit avma1cs_config(struct pcmcia_device *link) return 0; } /* avma1cs_config */ -/*====================================================================== - - After a card is removed, avma1cs_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void avma1cs_release(struct pcmcia_device *link) { unsigned long minor = (unsigned long) link->priv; @@ -216,7 +146,6 @@ static void avma1cs_release(struct pcmcia_device *link) pcmcia_disable_device(link); } /* avma1cs_release */ - static struct pcmcia_device_id avma1cs_ids[] = { PCMCIA_DEVICE_PROD_ID12("AVM", "ISDN A", 0x95d42008, 0xadc9d4bb), PCMCIA_DEVICE_PROD_ID12("ISDN", "CARD", 0x8d9761c8, 0x01c5aa7b), @@ -226,19 +155,15 @@ MODULE_DEVICE_TABLE(pcmcia, avma1cs_ids); static struct pcmcia_driver avma1cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "avma1_cs", - }, + .name = "avma1_cs", .probe = avma1cs_probe, .remove = __devexit_p(avma1cs_detach), .id_table = avma1cs_ids, }; -/*====================================================================*/ - static int __init init_avma1_cs(void) { - return(pcmcia_register_driver(&avma1cs_driver)); + return pcmcia_register_driver(&avma1cs_driver); } static void __exit exit_avma1_cs(void) diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index 6f9afcd5ca4..b133378d4dc 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -801,6 +801,16 @@ static void closecard(int cardnr) ll_unload(csta); } +static irqreturn_t card_irq(int intno, void *dev_id) +{ + struct IsdnCardState *cs = dev_id; + irqreturn_t ret = cs->irq_func(intno, cs); + + if (ret == IRQ_HANDLED) + cs->irq_cnt++; + return ret; +} + static int init_card(struct IsdnCardState *cs) { int irq_cnt, cnt = 3, ret; @@ -809,10 +819,10 @@ static int init_card(struct IsdnCardState *cs) ret = cs->cardmsg(cs, CARD_INIT, NULL); return(ret); } - irq_cnt = kstat_irqs(cs->irq); + irq_cnt = cs->irq_cnt = 0; printk(KERN_INFO "%s: IRQ %d count %d\n", CardType[cs->typ], cs->irq, irq_cnt); - if (request_irq(cs->irq, cs->irq_func, cs->irq_flags, "HiSax", cs)) { + if (request_irq(cs->irq, card_irq, cs->irq_flags, "HiSax", cs)) { printk(KERN_WARNING "HiSax: couldn't get interrupt %d\n", cs->irq); return 1; @@ -822,8 +832,8 @@ static int init_card(struct IsdnCardState *cs) /* Timeout 10ms */ msleep(10); printk(KERN_INFO "%s: IRQ %d count %d\n", - CardType[cs->typ], cs->irq, kstat_irqs(cs->irq)); - if (kstat_irqs(cs->irq) == irq_cnt) { + CardType[cs->typ], cs->irq, cs->irq_cnt); + if (cs->irq_cnt == irq_cnt) { printk(KERN_WARNING "%s: IRQ(%d) getting no interrupts during init %d\n", CardType[cs->typ], cs->irq, 4 - cnt); diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c index b3c08aaf41c..496d477af0f 100644 --- a/drivers/isdn/hisax/elsa_cs.c +++ b/drivers/isdn/hisax/elsa_cs.c @@ -46,7 +46,6 @@ #include <asm/io.h> #include <asm/system.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -64,26 +63,8 @@ MODULE_LICENSE("Dual MPL/GPL"); static int protocol = 2; /* EURO-ISDN Default */ module_param(protocol, int, 0); -/*====================================================================*/ - -/* - The event() function is this driver's Card Services event handler. - It will be called by Card Services when an appropriate card status - event is received. The config() and release() entry points are - used to configure or release a socket, in response to card insertion - and ejection events. They are invoked from the elsa_cs event - handler. -*/ - static int elsa_cs_config(struct pcmcia_device *link) __devinit ; static void elsa_cs_release(struct pcmcia_device *link); - -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit; typedef struct local_info_t { @@ -92,18 +73,6 @@ typedef struct local_info_t { int cardnr; } local_info_t; -/*====================================================================== - - elsa_cs_attach() creates an "instance" of the driver, allocatingx - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - -======================================================================*/ - static int __devinit elsa_cs_probe(struct pcmcia_device *link) { local_info_t *local; @@ -119,31 +88,9 @@ static int __devinit elsa_cs_probe(struct pcmcia_device *link) local->cardnr = -1; - /* - General socket configuration defaults can go here. In this - client, we assume very little, and rely on the CIS for almost - everything. In most clients, many details (i.e., number, sizes, - and attributes of IO windows) are fixed by the nature of the - device, and can be hard-wired here. - */ - link->resource[0]->end = 8; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - return elsa_cs_config(link); } /* elsa_cs_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void __devexit elsa_cs_detach(struct pcmcia_device *link) { local_info_t *info = link->priv; @@ -156,27 +103,17 @@ static void __devexit elsa_cs_detach(struct pcmcia_device *link) kfree(info); } /* elsa_cs_detach */ -/*====================================================================== - - elsa_cs_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. - -======================================================================*/ - -static int elsa_cs_configcheck(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int elsa_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data) { int j; p_dev->io_lines = 3; + p_dev->resource[0]->end = 8; + p_dev->resource[0]->flags &= IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - if ((cf->io.nwin > 0) && cf->io.win[0].base) { + if ((p_dev->resource[0]->end) && p_dev->resource[0]->start) { printk(KERN_INFO "(elsa_cs: looks like the 96 model)\n"); - p_dev->resource[0]->start = cf->io.win[0].base; if (!pcmcia_request_io(p_dev)) return 0; } else { @@ -199,6 +136,8 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link) dev_dbg(&link->dev, "elsa_config(0x%p)\n", link); dev = link->priv; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + i = pcmcia_loop_config(link, elsa_cs_configcheck, NULL); if (i != 0) goto failed; @@ -206,21 +145,10 @@ static int __devinit elsa_cs_config(struct pcmcia_device *link) if (!link->irq) goto failed; - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i != 0) goto failed; - /* Finally, report what we've done */ - dev_info(&link->dev, "index 0x%02x: ", - link->conf.ConfigIndex); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %d", link->irq); - if (link->resource[0]) - printk(" & %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - printk("\n"); - icard.para[0] = link->irq; icard.para[1] = link->resource[0]->start; icard.protocol = protocol; @@ -240,14 +168,6 @@ failed: return -ENODEV; } /* elsa_cs_config */ -/*====================================================================== - - After a card is removed, elsa_cs_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void elsa_cs_release(struct pcmcia_device *link) { local_info_t *local = link->priv; @@ -291,9 +211,7 @@ MODULE_DEVICE_TABLE(pcmcia, elsa_ids); static struct pcmcia_driver elsa_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "elsa_cs", - }, + .name = "elsa_cs", .probe = elsa_cs_probe, .remove = __devexit_p(elsa_cs_detach), .id_table = elsa_ids, diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h index 832a87855ff..32ab3924aa7 100644 --- a/drivers/isdn/hisax/hisax.h +++ b/drivers/isdn/hisax/hisax.h @@ -959,6 +959,7 @@ struct IsdnCardState { u_long event; struct work_struct tqueue; struct timer_list dbusytimer; + unsigned int irq_cnt; #ifdef ERROR_STATISTIC int err_crc; int err_tx; diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c index a024192b672..360204bc277 100644 --- a/drivers/isdn/hisax/sedlbauer_cs.c +++ b/drivers/isdn/hisax/sedlbauer_cs.c @@ -46,7 +46,6 @@ #include <asm/io.h> #include <asm/system.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -64,26 +63,9 @@ MODULE_LICENSE("Dual MPL/GPL"); static int protocol = 2; /* EURO-ISDN Default */ module_param(protocol, int, 0); -/*====================================================================*/ - -/* - The event() function is this driver's Card Services event handler. - It will be called by Card Services when an appropriate card status - event is received. The config() and release() entry points are - used to configure or release a socket, in response to card - insertion and ejection events. They are invoked from the sedlbauer - event handler. -*/ - static int sedlbauer_config(struct pcmcia_device *link) __devinit ; static void sedlbauer_release(struct pcmcia_device *link); -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit; typedef struct local_info_t { @@ -92,18 +74,6 @@ typedef struct local_info_t { int cardnr; } local_info_t; -/*====================================================================== - - sedlbauer_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - -======================================================================*/ - static int __devinit sedlbauer_probe(struct pcmcia_device *link) { local_info_t *local; @@ -118,35 +88,9 @@ static int __devinit sedlbauer_probe(struct pcmcia_device *link) local->p_dev = link; link->priv = local; - /* - General socket configuration defaults can go here. In this - client, we assume very little, and rely on the CIS for almost - everything. In most clients, many details (i.e., number, sizes, - and attributes of IO windows) are fixed by the nature of the - device, and can be hard-wired here. - */ - - /* from old sedl_cs - */ - /* The io structure describes IO port mapping */ - link->resource[0]->end = 8; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - return sedlbauer_config(link); } /* sedlbauer_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void __devexit sedlbauer_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link); @@ -158,70 +102,15 @@ static void __devexit sedlbauer_detach(struct pcmcia_device *link) kfree(link->priv); } /* sedlbauer_detach */ -/*====================================================================== - - sedlbauer_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. - -======================================================================*/ -static int sedlbauer_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int sedlbauer_config_check(struct pcmcia_device *p_dev, void *priv_data) { - if (cfg->index == 0) - return -ENODEV; - - /* Does this card need audio output? */ - if (cfg->flags & CISTPL_CFTABLE_AUDIO) { - p_dev->conf.Attributes |= CONF_ENABLE_SPKR; - p_dev->conf.Status = CCSR_AUDIO_ENA; - } + if (p_dev->config_index == 0) + return -EINVAL; - /* Use power settings for Vcc and Vpp if present */ - /* Note that the CIS values need to be rescaled */ - if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM)) { - if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM]/10000) - return -ENODEV; - } else if (dflt->vcc.present & (1<<CISTPL_POWER_VNOM)) { - if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM]/10000) - return -ENODEV; - } - - if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM]/10000; - else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; - - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - /* This reserves IO space but doesn't actually enable it */ - p_dev->io_lines = 3; - if (pcmcia_request_io(p_dev) != 0) - return -ENODEV; - } - - return 0; + p_dev->io_lines = 3; + return pcmcia_request_io(p_dev); } - - static int __devinit sedlbauer_config(struct pcmcia_device *link) { int ret; @@ -229,44 +118,17 @@ static int __devinit sedlbauer_config(struct pcmcia_device *link) dev_dbg(&link->dev, "sedlbauer_config(0x%p)\n", link); - /* - In this loop, we scan the CIS for configuration table entries, - each of which describes a valid card configuration, including - voltage, IO window, memory window, and interrupt settings. - - We make no assumptions about the card to be configured: we use - just the information available in the CIS. In an ideal world, - this would work for any PCMCIA card, but it requires a complete - and accurate CIS. In practice, a driver usually "knows" most of - these things without consulting the CIS, and most client drivers - will only use the CIS to fill in implementation-defined details. - */ + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_CHECK_VCC | + CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, sedlbauer_config_check, NULL); if (ret) goto failed; - /* - This actually configures the PCMCIA socket -- setting up - the I/O windows and the interrupt mapping, and putting the - card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; - /* Finally, report what we've done */ - dev_info(&link->dev, "index 0x%02x:", - link->conf.ConfigIndex); - if (link->conf.Vpp) - printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %d", link->irq); - if (link->resource[0]) - printk(" & %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - printk("\n"); - icard.para[0] = link->irq; icard.para[1] = link->resource[0]->start; icard.protocol = protocol; @@ -290,14 +152,6 @@ failed: } /* sedlbauer_config */ -/*====================================================================== - - After a card is removed, sedlbauer_release() will unregister the - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void sedlbauer_release(struct pcmcia_device *link) { local_info_t *local = link->priv; @@ -346,9 +200,7 @@ MODULE_DEVICE_TABLE(pcmcia, sedlbauer_ids); static struct pcmcia_driver sedlbauer_driver = { .owner = THIS_MODULE, - .drv = { - .name = "sedlbauer_cs", - }, + .name = "sedlbauer_cs", .probe = sedlbauer_probe, .remove = __devexit_p(sedlbauer_detach), .id_table = sedlbauer_ids, diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c index 7296102ca25..282a4467ef1 100644 --- a/drivers/isdn/hisax/teles_cs.c +++ b/drivers/isdn/hisax/teles_cs.c @@ -27,7 +27,6 @@ #include <asm/io.h> #include <asm/system.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -45,26 +44,8 @@ MODULE_LICENSE("GPL"); static int protocol = 2; /* EURO-ISDN Default */ module_param(protocol, int, 0); -/*====================================================================*/ - -/* - The event() function is this driver's Card Services event handler. - It will be called by Card Services when an appropriate card status - event is received. The config() and release() entry points are - used to configure or release a socket, in response to card insertion - and ejection events. They are invoked from the teles_cs event - handler. -*/ - static int teles_cs_config(struct pcmcia_device *link) __devinit ; static void teles_cs_release(struct pcmcia_device *link); - -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static void teles_detach(struct pcmcia_device *p_dev) __devexit ; typedef struct local_info_t { @@ -73,18 +54,6 @@ typedef struct local_info_t { int cardnr; } local_info_t; -/*====================================================================== - - teles_attach() creates an "instance" of the driver, allocatingx - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - -======================================================================*/ - static int __devinit teles_probe(struct pcmcia_device *link) { local_info_t *local; @@ -99,31 +68,11 @@ static int __devinit teles_probe(struct pcmcia_device *link) local->p_dev = link; link->priv = local; - /* - General socket configuration defaults can go here. In this - client, we assume very little, and rely on the CIS for almost - everything. In most clients, many details (i.e., number, sizes, - and attributes of IO windows) are fixed by the nature of the - device, and can be hard-wired here. - */ - link->resource[0]->end = 96; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; return teles_cs_config(link); } /* teles_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void __devexit teles_detach(struct pcmcia_device *link) { local_info_t *info = link->priv; @@ -136,27 +85,17 @@ static void __devexit teles_detach(struct pcmcia_device *link) kfree(info); } /* teles_detach */ -/*====================================================================== - - teles_cs_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. - -======================================================================*/ - -static int teles_cs_configcheck(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int teles_cs_configcheck(struct pcmcia_device *p_dev, void *priv_data) { int j; p_dev->io_lines = 5; + p_dev->resource[0]->end = 96; + p_dev->resource[0]->flags &= IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - if ((cf->io.nwin > 0) && cf->io.win[0].base) { + if ((p_dev->resource[0]->end) && p_dev->resource[0]->start) { printk(KERN_INFO "(teles_cs: looks like the 96 model)\n"); - p_dev->resource[0]->start = cf->io.win[0].base; if (!pcmcia_request_io(p_dev)) return 0; } else { @@ -186,21 +125,10 @@ static int __devinit teles_cs_config(struct pcmcia_device *link) if (!link->irq) goto cs_failed; - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i != 0) goto cs_failed; - /* Finally, report what we've done */ - dev_info(&link->dev, "index 0x%02x:", - link->conf.ConfigIndex); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %d", link->irq); - if (link->resource[0]) - printk(" & %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - printk("\n"); - icard.para[0] = link->irq; icard.para[1] = link->resource[0]->start; icard.protocol = protocol; @@ -222,14 +150,6 @@ cs_failed: return -ENODEV; } /* teles_cs_config */ -/*====================================================================== - - After a card is removed, teles_cs_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void teles_cs_release(struct pcmcia_device *link) { local_info_t *local = link->priv; @@ -273,9 +193,7 @@ MODULE_DEVICE_TABLE(pcmcia, teles_ids); static struct pcmcia_driver teles_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "teles_cs", - }, + .name = "teles_cs", .probe = teles_probe, .remove = __devexit_p(teles_detach), .id_table = teles_ids, diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index de43c8c70ad..859c81e9483 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c @@ -267,6 +267,7 @@ static const struct file_operations mISDN_fops = { .unlocked_ioctl = mISDN_ioctl, .open = mISDN_open, .release = mISDN_close, + .llseek = no_llseek, }; static struct miscdevice mISDNtimer = { diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c index 485be8b1e1b..f0225bc0f26 100644 --- a/drivers/isdn/sc/interrupt.c +++ b/drivers/isdn/sc/interrupt.c @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst) } else if(callid>=0x0000 && callid<=0x7FFF) { + int len; + pr_debug("%s: Got Incoming Call\n", sc_adapter[card]->devicename); - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4])); - strcpy(setup.eazmsn, - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn); + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]), + sizeof(setup.phone)); + if (len >= sizeof(setup.phone)) + continue; + len = strlcpy(setup.eazmsn, + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, + sizeof(setup.eazmsn)); + if (len >= sizeof(setup.eazmsn)) + continue; setup.si1 = 7; setup.si2 = 0; setup.plan = 0; @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst) * Handle a GetMyNumber Rsp */ if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){ - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array); + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn, + rcvmsg.msg_data.byte_array, + sizeof(rcvmsg.msg_data.byte_array)); continue; } diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index e4112622e5a..cc2a88d5192 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -304,13 +304,22 @@ config LEDS_MC13783 config LEDS_NS2 tristate "LED support for Network Space v2 GPIO LEDs" - depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 + depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 || D2NET_V2 default y help This option enable support for the dual-GPIO LED found on the Network Space v2 board (and parents). This include Internet Space v2, Network Space (Max) v2 and d2 Network v2 boards. +config LEDS_NETXBIG + tristate "LED support for Big Network series LEDs" + depends on MACH_NET2BIG_V2 || MACH_NET5BIG_V2 + default y + help + This option enable support for LEDs found on the LaCie 2Big + and 5Big Network v2 boards. The LEDs are wired to a CPLD and are + controlled through a GPIO extension bus. + config LEDS_TRIGGERS bool "LED Trigger support" help diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 7d6b95831f8..9c96db40ef6 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o obj-$(CONFIG_LEDS_NS2) += leds-ns2.o +obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c new file mode 100644 index 00000000000..f2e51c13439 --- /dev/null +++ b/drivers/leds/leds-netxbig.c @@ -0,0 +1,449 @@ +/* + * leds-netxbig.c - Driver for the 2Big and 5Big Network series LEDs + * + * Copyright (C) 2010 LaCie + * + * Author: Simon Guinot <sguinot@lacie.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> +#include <linux/leds.h> +#include <mach/leds-netxbig.h> + +/* + * GPIO extension bus. + */ + +static DEFINE_SPINLOCK(gpio_ext_lock); + +static void gpio_ext_set_addr(struct netxbig_gpio_ext *gpio_ext, int addr) +{ + int pin; + + for (pin = 0; pin < gpio_ext->num_addr; pin++) + gpio_set_value(gpio_ext->addr[pin], (addr >> pin) & 1); +} + +static void gpio_ext_set_data(struct netxbig_gpio_ext *gpio_ext, int data) +{ + int pin; + + for (pin = 0; pin < gpio_ext->num_data; pin++) + gpio_set_value(gpio_ext->data[pin], (data >> pin) & 1); +} + +static void gpio_ext_enable_select(struct netxbig_gpio_ext *gpio_ext) +{ + /* Enable select is done on the raising edge. */ + gpio_set_value(gpio_ext->enable, 0); + gpio_set_value(gpio_ext->enable, 1); +} + +static void gpio_ext_set_value(struct netxbig_gpio_ext *gpio_ext, + int addr, int value) +{ + unsigned long flags; + + spin_lock_irqsave(&gpio_ext_lock, flags); + gpio_ext_set_addr(gpio_ext, addr); + gpio_ext_set_data(gpio_ext, value); + gpio_ext_enable_select(gpio_ext); + spin_unlock_irqrestore(&gpio_ext_lock, flags); +} + +static int __devinit gpio_ext_init(struct netxbig_gpio_ext *gpio_ext) +{ + int err; + int i; + + if (unlikely(!gpio_ext)) + return -EINVAL; + + /* Configure address GPIOs. */ + for (i = 0; i < gpio_ext->num_addr; i++) { + err = gpio_request(gpio_ext->addr[i], "GPIO extension addr"); + if (err) + goto err_free_addr; + err = gpio_direction_output(gpio_ext->addr[i], 0); + if (err) { + gpio_free(gpio_ext->addr[i]); + goto err_free_addr; + } + } + /* Configure data GPIOs. */ + for (i = 0; i < gpio_ext->num_data; i++) { + err = gpio_request(gpio_ext->data[i], "GPIO extension data"); + if (err) + goto err_free_data; + err = gpio_direction_output(gpio_ext->data[i], 0); + if (err) { + gpio_free(gpio_ext->data[i]); + goto err_free_data; + } + } + /* Configure "enable select" GPIO. */ + err = gpio_request(gpio_ext->enable, "GPIO extension enable"); + if (err) + goto err_free_data; + err = gpio_direction_output(gpio_ext->enable, 0); + if (err) { + gpio_free(gpio_ext->enable); + goto err_free_data; + } + + return 0; + +err_free_data: + for (i = i - 1; i >= 0; i--) + gpio_free(gpio_ext->data[i]); + i = gpio_ext->num_addr; +err_free_addr: + for (i = i - 1; i >= 0; i--) + gpio_free(gpio_ext->addr[i]); + + return err; +} + +static void __devexit gpio_ext_free(struct netxbig_gpio_ext *gpio_ext) +{ + int i; + + gpio_free(gpio_ext->enable); + for (i = gpio_ext->num_addr - 1; i >= 0; i--) + gpio_free(gpio_ext->addr[i]); + for (i = gpio_ext->num_data - 1; i >= 0; i--) + gpio_free(gpio_ext->data[i]); +} + +/* + * Class LED driver. + */ + +struct netxbig_led_data { + struct netxbig_gpio_ext *gpio_ext; + struct led_classdev cdev; + int mode_addr; + int *mode_val; + int bright_addr; + int bright_max; + struct netxbig_led_timer *timer; + int num_timer; + enum netxbig_led_mode mode; + int sata; + spinlock_t lock; +}; + +static int netxbig_led_get_timer_mode(enum netxbig_led_mode *mode, + unsigned long delay_on, + unsigned long delay_off, + struct netxbig_led_timer *timer, + int num_timer) +{ + int i; + + for (i = 0; i < num_timer; i++) { + if (timer[i].delay_on == delay_on && + timer[i].delay_off == delay_off) { + *mode = timer[i].mode; + return 0; + } + } + return -EINVAL; +} + +static int netxbig_led_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct netxbig_led_data *led_dat = + container_of(led_cdev, struct netxbig_led_data, cdev); + enum netxbig_led_mode mode; + int mode_val; + int ret; + + /* Look for a LED mode with the requested timer frequency. */ + ret = netxbig_led_get_timer_mode(&mode, *delay_on, *delay_off, + led_dat->timer, led_dat->num_timer); + if (ret < 0) + return ret; + + mode_val = led_dat->mode_val[mode]; + if (mode_val == NETXBIG_LED_INVALID_MODE) + return -EINVAL; + + spin_lock_irq(&led_dat->lock); + + gpio_ext_set_value(led_dat->gpio_ext, led_dat->mode_addr, mode_val); + led_dat->mode = mode; + + spin_unlock_irq(&led_dat->lock); + + return 0; +} + +static void netxbig_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct netxbig_led_data *led_dat = + container_of(led_cdev, struct netxbig_led_data, cdev); + enum netxbig_led_mode mode; + int mode_val, bright_val; + int set_brightness = 1; + unsigned long flags; + + spin_lock_irqsave(&led_dat->lock, flags); + + if (value == LED_OFF) { + mode = NETXBIG_LED_OFF; + set_brightness = 0; + } else { + if (led_dat->sata) + mode = NETXBIG_LED_SATA; + else if (led_dat->mode == NETXBIG_LED_OFF) + mode = NETXBIG_LED_ON; + else /* Keep 'timer' mode. */ + mode = led_dat->mode; + } + mode_val = led_dat->mode_val[mode]; + + gpio_ext_set_value(led_dat->gpio_ext, led_dat->mode_addr, mode_val); + led_dat->mode = mode; + /* + * Note that the brightness register is shared between all the + * SATA LEDs. So, change the brightness setting for a single + * SATA LED will affect all the others. + */ + if (set_brightness) { + bright_val = DIV_ROUND_UP(value * led_dat->bright_max, + LED_FULL); + gpio_ext_set_value(led_dat->gpio_ext, + led_dat->bright_addr, bright_val); + } + + spin_unlock_irqrestore(&led_dat->lock, flags); +} + +static ssize_t netxbig_led_sata_store(struct device *dev, + struct device_attribute *attr, + const char *buff, size_t count) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct netxbig_led_data *led_dat = + container_of(led_cdev, struct netxbig_led_data, cdev); + unsigned long enable; + enum netxbig_led_mode mode; + int mode_val; + int ret; + + ret = strict_strtoul(buff, 10, &enable); + if (ret < 0) + return ret; + + enable = !!enable; + + spin_lock_irq(&led_dat->lock); + + if (led_dat->sata == enable) { + ret = count; + goto exit_unlock; + } + + if (led_dat->mode != NETXBIG_LED_ON && + led_dat->mode != NETXBIG_LED_SATA) + mode = led_dat->mode; /* Keep modes 'off' and 'timer'. */ + else if (enable) + mode = NETXBIG_LED_SATA; + else + mode = NETXBIG_LED_ON; + + mode_val = led_dat->mode_val[mode]; + if (mode_val == NETXBIG_LED_INVALID_MODE) { + ret = -EINVAL; + goto exit_unlock; + } + + gpio_ext_set_value(led_dat->gpio_ext, led_dat->mode_addr, mode_val); + led_dat->mode = mode; + led_dat->sata = enable; + + ret = count; + +exit_unlock: + spin_unlock_irq(&led_dat->lock); + + return ret; +} + +static ssize_t netxbig_led_sata_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct netxbig_led_data *led_dat = + container_of(led_cdev, struct netxbig_led_data, cdev); + + return sprintf(buf, "%d\n", led_dat->sata); +} + +static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store); + +static void __devexit delete_netxbig_led(struct netxbig_led_data *led_dat) +{ + if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) + device_remove_file(led_dat->cdev.dev, &dev_attr_sata); + led_classdev_unregister(&led_dat->cdev); +} + +static int __devinit +create_netxbig_led(struct platform_device *pdev, + struct netxbig_led_data *led_dat, + const struct netxbig_led *template) +{ + struct netxbig_led_platform_data *pdata = pdev->dev.platform_data; + int ret; + + spin_lock_init(&led_dat->lock); + led_dat->gpio_ext = pdata->gpio_ext; + led_dat->cdev.name = template->name; + led_dat->cdev.default_trigger = template->default_trigger; + led_dat->cdev.blink_set = netxbig_led_blink_set; + led_dat->cdev.brightness_set = netxbig_led_set; + /* + * Because the GPIO extension bus don't allow to read registers + * value, there is no way to probe the LED initial state. + * So, the initial sysfs LED value for the "brightness" and "sata" + * attributes are inconsistent. + * + * Note that the initial LED state can't be reconfigured. + * The reason is that the LED behaviour must stay uniform during + * the whole boot process (bootloader+linux). + */ + led_dat->sata = 0; + led_dat->cdev.brightness = LED_OFF; + led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; + led_dat->mode_addr = template->mode_addr; + led_dat->mode_val = template->mode_val; + led_dat->bright_addr = template->bright_addr; + led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1; + led_dat->timer = pdata->timer; + led_dat->num_timer = pdata->num_timer; + + ret = led_classdev_register(&pdev->dev, &led_dat->cdev); + if (ret < 0) + return ret; + + /* + * If available, expose the SATA activity blink capability through + * a "sata" sysfs attribute. + */ + if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) { + ret = device_create_file(led_dat->cdev.dev, &dev_attr_sata); + if (ret) + led_classdev_unregister(&led_dat->cdev); + } + + return ret; +} + +static int __devinit netxbig_led_probe(struct platform_device *pdev) +{ + struct netxbig_led_platform_data *pdata = pdev->dev.platform_data; + struct netxbig_led_data *leds_data; + int i; + int ret; + + if (!pdata) + return -EINVAL; + + leds_data = kzalloc(sizeof(struct netxbig_led_data) * pdata->num_leds, + GFP_KERNEL); + if (!leds_data) + return -ENOMEM; + + ret = gpio_ext_init(pdata->gpio_ext); + if (ret < 0) + goto err_free_data; + + for (i = 0; i < pdata->num_leds; i++) { + ret = create_netxbig_led(pdev, &leds_data[i], &pdata->leds[i]); + if (ret < 0) + goto err_free_leds; + } + + platform_set_drvdata(pdev, leds_data); + + return 0; + +err_free_leds: + for (i = i - 1; i >= 0; i--) + delete_netxbig_led(&leds_data[i]); + + gpio_ext_free(pdata->gpio_ext); +err_free_data: + kfree(leds_data); + + return ret; +} + +static int __devexit netxbig_led_remove(struct platform_device *pdev) +{ + struct netxbig_led_platform_data *pdata = pdev->dev.platform_data; + struct netxbig_led_data *leds_data; + int i; + + leds_data = platform_get_drvdata(pdev); + + for (i = 0; i < pdata->num_leds; i++) + delete_netxbig_led(&leds_data[i]); + + gpio_ext_free(pdata->gpio_ext); + kfree(leds_data); + + return 0; +} + +static struct platform_driver netxbig_led_driver = { + .probe = netxbig_led_probe, + .remove = __devexit_p(netxbig_led_remove), + .driver = { + .name = "leds-netxbig", + .owner = THIS_MODULE, + }, +}; +MODULE_ALIAS("platform:leds-netxbig"); + +static int __init netxbig_led_init(void) +{ + return platform_driver_register(&netxbig_led_driver); +} + +static void __exit netxbig_led_exit(void) +{ + platform_driver_unregister(&netxbig_led_driver); +} + +module_init(netxbig_led_init); +module_exit(netxbig_led_exit); + +MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>"); +MODULE_DESCRIPTION("LED driver for LaCie xBig Network boards"); +MODULE_LICENSE("GPL"); diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c index 74dce4ba026..f77d48d0b3e 100644 --- a/drivers/leds/leds-ns2.c +++ b/drivers/leds/leds-ns2.c @@ -81,7 +81,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat, int cmd_level; int slow_level; - read_lock(&led_dat->rw_lock); + read_lock_irq(&led_dat->rw_lock); cmd_level = gpio_get_value(led_dat->cmd); slow_level = gpio_get_value(led_dat->slow); @@ -95,7 +95,7 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat, } } - read_unlock(&led_dat->rw_lock); + read_unlock_irq(&led_dat->rw_lock); return ret; } @@ -104,8 +104,9 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat, enum ns2_led_modes mode) { int i; + unsigned long flags; - write_lock(&led_dat->rw_lock); + write_lock_irqsave(&led_dat->rw_lock, flags); for (i = 0; i < ARRAY_SIZE(ns2_led_modval); i++) { if (mode == ns2_led_modval[i].mode) { @@ -116,7 +117,7 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat, } } - write_unlock(&led_dat->rw_lock); + write_unlock_irqrestore(&led_dat->rw_lock, flags); } static void ns2_led_set(struct led_classdev *led_cdev, @@ -140,10 +141,12 @@ static ssize_t ns2_led_sata_store(struct device *dev, struct device_attribute *attr, const char *buff, size_t count) { + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct ns2_led_data *led_dat = + container_of(led_cdev, struct ns2_led_data, cdev); int ret; unsigned long enable; enum ns2_led_modes mode; - struct ns2_led_data *led_dat = dev_get_drvdata(dev); ret = strict_strtoul(buff, 10, &enable); if (ret < 0) @@ -171,7 +174,9 @@ static ssize_t ns2_led_sata_store(struct device *dev, static ssize_t ns2_led_sata_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct ns2_led_data *led_dat = dev_get_drvdata(dev); + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct ns2_led_data *led_dat = + container_of(led_cdev, struct ns2_led_data, cdev); return sprintf(buf, "%d\n", led_dat->sata); } @@ -233,7 +238,6 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat, if (ret < 0) goto err_free_slow; - dev_set_drvdata(led_dat->cdev.dev, led_dat); ret = device_create_file(led_dat->cdev.dev, &dev_attr_sata); if (ret < 0) goto err_free_cdev; diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 85b714df8ea..3c781cdddda 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -514,6 +514,7 @@ static const struct file_operations lguest_fops = { .release = close, .write = write, .read = read, + .llseek = default_llseek, }; /* diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c index 1c4ee6e7793..75049e76519 100644 --- a/drivers/macintosh/adb.c +++ b/drivers/macintosh/adb.c @@ -24,7 +24,6 @@ #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched.h> -#include <linux/smp_lock.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> @@ -55,6 +54,7 @@ extern struct adb_driver adb_iop_driver; extern struct adb_driver via_pmu_driver; extern struct adb_driver macio_adb_driver; +static DEFINE_MUTEX(adb_mutex); static struct adb_driver *adb_driver_list[] = { #ifdef CONFIG_ADB_MACII &via_macii_driver, @@ -83,7 +83,7 @@ static struct adb_driver *adb_controller; BLOCKING_NOTIFIER_HEAD(adb_client_list); static int adb_got_sleep; static int adb_inited; -static DECLARE_MUTEX(adb_probe_mutex); +static DEFINE_SEMAPHORE(adb_probe_mutex); static int sleepy_trackpad; static int autopoll_devs; int __adb_probe_sync; @@ -647,7 +647,7 @@ static int adb_open(struct inode *inode, struct file *file) struct adbdev_state *state; int ret = 0; - lock_kernel(); + mutex_lock(&adb_mutex); if (iminor(inode) > 0 || adb_controller == NULL) { ret = -ENXIO; goto out; @@ -665,7 +665,7 @@ static int adb_open(struct inode *inode, struct file *file) state->inuse = 1; out: - unlock_kernel(); + mutex_unlock(&adb_mutex); return ret; } @@ -674,7 +674,7 @@ static int adb_release(struct inode *inode, struct file *file) struct adbdev_state *state = file->private_data; unsigned long flags; - lock_kernel(); + mutex_lock(&adb_mutex); if (state) { file->private_data = NULL; spin_lock_irqsave(&state->lock, flags); @@ -687,7 +687,7 @@ static int adb_release(struct inode *inode, struct file *file) spin_unlock_irqrestore(&state->lock, flags); } } - unlock_kernel(); + mutex_unlock(&adb_mutex); return 0; } diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c index a3d25da2f27..1a57e88a38f 100644 --- a/drivers/macintosh/ans-lcd.c +++ b/drivers/macintosh/ans-lcd.c @@ -137,6 +137,7 @@ const struct file_operations anslcd_fops = { .write = anslcd_write, .unlocked_ioctl = anslcd_ioctl, .open = anslcd_open, + .llseek = default_llseek, }; static struct miscdevice anslcd_dev = { diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index e58c3d33e03..290cb325a94 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -19,7 +19,6 @@ * the userland interface */ -#include <linux/smp_lock.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/device.h> @@ -97,6 +96,7 @@ struct smu_device { * I don't think there will ever be more than one SMU, so * for now, just hard code that */ +static DEFINE_MUTEX(smu_mutex); static struct smu_device *smu; static DEFINE_MUTEX(smu_part_access); static int smu_irq_inited; @@ -1095,12 +1095,12 @@ static int smu_open(struct inode *inode, struct file *file) pp->mode = smu_file_commands; init_waitqueue_head(&pp->wait); - lock_kernel(); + mutex_lock(&smu_mutex); spin_lock_irqsave(&smu_clist_lock, flags); list_add(&pp->list, &smu_clist); spin_unlock_irqrestore(&smu_clist_lock, flags); file->private_data = pp; - unlock_kernel(); + mutex_unlock(&smu_mutex); return 0; } diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c index d242976bcfe..19c371809d7 100644 --- a/drivers/macintosh/via-pmu-led.c +++ b/drivers/macintosh/via-pmu-led.c @@ -92,8 +92,10 @@ static int __init via_pmu_led_init(void) if (dt == NULL) return -ENODEV; model = of_get_property(dt, "model", NULL); - if (model == NULL) + if (model == NULL) { + of_node_put(dt); return -ENODEV; + } if (strncmp(model, "PowerBook", strlen("PowerBook")) != 0 && strncmp(model, "iBook", strlen("iBook")) != 0 && strcmp(model, "PowerMac7,2") != 0 && diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index 2d17e76066b..cd29c824838 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -18,7 +18,7 @@ * */ #include <stdarg.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -73,6 +73,7 @@ /* How many iterations between battery polls */ #define BATTERY_POLLING_COUNT 2 +static DEFINE_MUTEX(pmu_info_proc_mutex); static volatile unsigned char __iomem *via; /* VIA registers - spaced 0x200 bytes apart */ @@ -2078,7 +2079,7 @@ pmu_open(struct inode *inode, struct file *file) pp->rb_get = pp->rb_put = 0; spin_lock_init(&pp->lock); init_waitqueue_head(&pp->wait); - lock_kernel(); + mutex_lock(&pmu_info_proc_mutex); spin_lock_irqsave(&all_pvt_lock, flags); #if defined(CONFIG_INPUT_ADBHID) && defined(CONFIG_PMAC_BACKLIGHT) pp->backlight_locker = 0; @@ -2086,7 +2087,7 @@ pmu_open(struct inode *inode, struct file *file) list_add(&pp->list, &all_pmu_pvt); spin_unlock_irqrestore(&all_pvt_lock, flags); file->private_data = pp; - unlock_kernel(); + mutex_unlock(&pmu_info_proc_mutex); return 0; } @@ -2343,9 +2344,9 @@ static long pmu_unlocked_ioctl(struct file *filp, { int ret; - lock_kernel(); + mutex_lock(&pmu_info_proc_mutex); ret = pmu_ioctl(filp, cmd, arg); - unlock_kernel(); + mutex_unlock(&pmu_info_proc_mutex); return ret; } @@ -2398,6 +2399,7 @@ static const struct file_operations pmu_device_fops = { #endif .open = pmu_open, .release = pmu_release, + .llseek = noop_llseek, }; static struct miscdevice pmu_device = { diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ed4900ade93..e4fb58db545 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1000,10 +1000,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) page = bitmap->sb_page; offset = sizeof(bitmap_super_t); if (!file) - read_sb_page(bitmap->mddev, - bitmap->mddev->bitmap_info.offset, - page, - index, count); + page = read_sb_page( + bitmap->mddev, + bitmap->mddev->bitmap_info.offset, + page, + index, count); } else if (file) { page = read_page(file, index, bitmap, count); offset = 0; diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 368e8e98f70..d5b0e4c0e70 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1278,7 +1278,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, struct dm_crypt_io *io; struct crypt_config *cc; - if (unlikely(bio_empty_barrier(bio))) { + if (bio->bi_rw & REQ_FLUSH) { cc = ti->private; bio->bi_bdev = cc->dev->bdev; return DM_MAPIO_REMAPPED; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 0590c75b0ab..136d4f71a11 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -31,7 +31,6 @@ struct dm_io_client { */ struct io { unsigned long error_bits; - unsigned long eopnotsupp_bits; atomic_t count; struct task_struct *sleeper; struct dm_io_client *client; @@ -130,11 +129,8 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, *---------------------------------------------------------------*/ static void dec_count(struct io *io, unsigned int region, int error) { - if (error) { + if (error) set_bit(region, &io->error_bits); - if (error == -EOPNOTSUPP) - set_bit(region, &io->eopnotsupp_bits); - } if (atomic_dec_and_test(&io->count)) { if (io->sleeper) @@ -310,8 +306,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, sector_t remaining = where->count; /* - * where->count may be zero if rw holds a write barrier and we - * need to send a zero-sized barrier. + * where->count may be zero if rw holds a flush and we need to + * send a zero-sized flush. */ do { /* @@ -364,7 +360,7 @@ static void dispatch_io(int rw, unsigned int num_regions, */ for (i = 0; i < num_regions; i++) { *dp = old_pages; - if (where[i].count || (rw & REQ_HARDBARRIER)) + if (where[i].count || (rw & REQ_FLUSH)) do_region(rw, i, where + i, dp, io); } @@ -393,9 +389,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions, return -EIO; } -retry: io->error_bits = 0; - io->eopnotsupp_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ io->sleeper = current; io->client = client; @@ -412,11 +406,6 @@ retry: } set_current_state(TASK_RUNNING); - if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) { - rw &= ~REQ_HARDBARRIER; - goto retry; - } - if (error_bits) *error_bits = io->error_bits; @@ -437,7 +426,6 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions, io = mempool_alloc(client->pool, GFP_NOIO); io->error_bits = 0; - io->eopnotsupp_bits = 0; atomic_set(&io->count, 1); /* see dispatch_io() */ io->sleeper = NULL; io->client = client; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 3e39193e503..4b54618b415 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1596,6 +1596,7 @@ static const struct file_operations _ctl_fops = { .unlocked_ioctl = dm_ctl_ioctl, .compat_ioctl = dm_compat_ctl_ioctl, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice _dm_misc = { diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 5a08be0222d..33420e68d15 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -300,7 +300,7 @@ static int flush_header(struct log_c *lc) .count = 0, }; - lc->io_req.bi_rw = WRITE_BARRIER; + lc->io_req.bi_rw = WRITE_FLUSH; return dm_io(&lc->io_req, 1, &null_location, NULL); } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 7c081bcbc3c..19a59b041c2 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -259,7 +259,7 @@ static int mirror_flush(struct dm_target *ti) struct dm_io_region io[ms->nr_mirrors]; struct mirror *m; struct dm_io_request io_req = { - .bi_rw = WRITE_BARRIER, + .bi_rw = WRITE_FLUSH, .mem.type = DM_IO_KMEM, .mem.ptr.bvec = NULL, .client = ms->io_client, @@ -629,7 +629,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) struct dm_io_region io[ms->nr_mirrors], *dest = io; struct mirror *m; struct dm_io_request io_req = { - .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER), + .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), .mem.type = DM_IO_BVEC, .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, .notify.fn = write_callback, @@ -670,7 +670,7 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) bio_list_init(&requeue); while ((bio = bio_list_pop(writes))) { - if (unlikely(bio_empty_barrier(bio))) { + if (bio->bi_rw & REQ_FLUSH) { bio_list_add(&sync, bio); continue; } @@ -1203,7 +1203,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, * We need to dec pending if this was a write. */ if (rw == WRITE) { - if (likely(!bio_empty_barrier(bio))) + if (!(bio->bi_rw & REQ_FLUSH)) dm_rh_dec(ms->rh, map_context->ll); return error; } diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index bd5c58b2886..dad011aed0c 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -81,9 +81,9 @@ struct dm_region_hash { struct list_head failed_recovered_regions; /* - * If there was a barrier failure no regions can be marked clean. + * If there was a flush failure no regions can be marked clean. */ - int barrier_failure; + int flush_failure; void *context; sector_t target_begin; @@ -217,7 +217,7 @@ struct dm_region_hash *dm_region_hash_create( INIT_LIST_HEAD(&rh->quiesced_regions); INIT_LIST_HEAD(&rh->recovered_regions); INIT_LIST_HEAD(&rh->failed_recovered_regions); - rh->barrier_failure = 0; + rh->flush_failure = 0; rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, sizeof(struct dm_region)); @@ -399,8 +399,8 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) region_t region = dm_rh_bio_to_region(rh, bio); int recovering = 0; - if (bio_empty_barrier(bio)) { - rh->barrier_failure = 1; + if (bio->bi_rw & REQ_FLUSH) { + rh->flush_failure = 1; return; } @@ -524,7 +524,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) struct bio *bio; for (bio = bios->head; bio; bio = bio->bi_next) { - if (bio_empty_barrier(bio)) + if (bio->bi_rw & REQ_FLUSH) continue; rh_inc(rh, dm_rh_bio_to_region(rh, bio)); } @@ -555,9 +555,9 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region) */ /* do nothing for DM_RH_NOSYNC */ - if (unlikely(rh->barrier_failure)) { + if (unlikely(rh->flush_failure)) { /* - * If a write barrier failed some time ago, we + * If a write flush failed some time ago, we * don't know whether or not this write made it * to the disk, so we must resync the device. */ diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index cc2bdb83f9a..0b61792a278 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -687,7 +687,7 @@ static void persistent_commit_exception(struct dm_exception_store *store, /* * Commit exceptions to disk. */ - if (ps->valid && area_io(ps, WRITE_BARRIER)) + if (ps->valid && area_io(ps, WRITE_FLUSH_FUA)) ps->valid = 0; /* diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 5974d3094d9..53cf79d8bcb 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -706,8 +706,6 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) return 0; } -#define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r))) - /* * Return a minimum chunk size of all snapshots that have the specified origin. * Return zero if the origin has no snapshots. @@ -1587,7 +1585,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio, chunk_t chunk; struct dm_snap_pending_exception *pe = NULL; - if (unlikely(bio_empty_barrier(bio))) { + if (bio->bi_rw & REQ_FLUSH) { bio->bi_bdev = s->cow->bdev; return DM_MAPIO_REMAPPED; } @@ -1691,7 +1689,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, int r = DM_MAPIO_REMAPPED; chunk_t chunk; - if (unlikely(bio_empty_barrier(bio))) { + if (bio->bi_rw & REQ_FLUSH) { if (!map_context->target_request_nr) bio->bi_bdev = s->origin->bdev; else @@ -2135,7 +2133,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio, struct dm_dev *dev = ti->private; bio->bi_bdev = dev->bdev; - if (unlikely(bio_empty_barrier(bio))) + if (bio->bi_rw & REQ_FLUSH) return DM_MAPIO_REMAPPED; /* Only tell snapshots if this is a write */ diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index c297f6da91e..f0371b4c4fb 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -271,7 +271,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio, uint32_t stripe; unsigned target_request_nr; - if (unlikely(bio_empty_barrier(bio))) { + if (bio->bi_rw & REQ_FLUSH) { target_request_nr = map_context->target_request_nr; BUG_ON(target_request_nr >= sc->stripes); bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index f9fc07d7a4b..90267f8d64e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -486,11 +486,6 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, return 0; } -/* - * Returns the minimum that is _not_ zero, unless both are zero. - */ -#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) - int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ac384b2a6a3..7cb1352f7e7 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -15,7 +15,6 @@ #include <linux/blkpg.h> #include <linux/bio.h> #include <linux/buffer_head.h> -#include <linux/smp_lock.h> #include <linux/mempool.h> #include <linux/slab.h> #include <linux/idr.h> @@ -33,6 +32,7 @@ #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE" #define DM_COOKIE_LENGTH 24 +static DEFINE_MUTEX(dm_mutex); static const char *_name = DM_NAME; static unsigned int major = 0; @@ -110,7 +110,6 @@ EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo); #define DMF_FREEING 3 #define DMF_DELETING 4 #define DMF_NOFLUSH_SUSPENDING 5 -#define DMF_QUEUE_IO_TO_THREAD 6 /* * Work processed by per-device workqueue. @@ -144,24 +143,9 @@ struct mapped_device { spinlock_t deferred_lock; /* - * An error from the barrier request currently being processed. - */ - int barrier_error; - - /* - * Protect barrier_error from concurrent endio processing - * in request-based dm. - */ - spinlock_t barrier_error_lock; - - /* - * Processing queue (flush/barriers) + * Processing queue (flush) */ struct workqueue_struct *wq; - struct work_struct barrier_work; - - /* A pointer to the currently processing pre/post flush request */ - struct request *flush_request; /* * The current mapping. @@ -200,8 +184,8 @@ struct mapped_device { /* sysfs handle */ struct kobject kobj; - /* zero-length barrier that will be cloned and submitted to targets */ - struct bio barrier_bio; + /* zero-length flush that will be cloned and submitted to targets */ + struct bio flush_bio; }; /* @@ -344,7 +328,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) { struct mapped_device *md; - lock_kernel(); + mutex_lock(&dm_mutex); spin_lock(&_minor_lock); md = bdev->bd_disk->private_data; @@ -362,7 +346,7 @@ static int dm_blk_open(struct block_device *bdev, fmode_t mode) out: spin_unlock(&_minor_lock); - unlock_kernel(); + mutex_unlock(&dm_mutex); return md ? 0 : -ENXIO; } @@ -371,10 +355,10 @@ static int dm_blk_close(struct gendisk *disk, fmode_t mode) { struct mapped_device *md = disk->private_data; - lock_kernel(); + mutex_lock(&dm_mutex); atomic_dec(&md->open_count); dm_put(md); - unlock_kernel(); + mutex_unlock(&dm_mutex); return 0; } @@ -512,7 +496,7 @@ static void end_io_acct(struct dm_io *io) /* * After this is decremented the bio must not be touched if it is - * a barrier. + * a flush. */ dm_disk(md)->part0.in_flight[rw] = pending = atomic_dec_return(&md->pending[rw]); @@ -528,16 +512,12 @@ static void end_io_acct(struct dm_io *io) */ static void queue_io(struct mapped_device *md, struct bio *bio) { - down_write(&md->io_lock); + unsigned long flags; - spin_lock_irq(&md->deferred_lock); + spin_lock_irqsave(&md->deferred_lock, flags); bio_list_add(&md->deferred, bio); - spin_unlock_irq(&md->deferred_lock); - - if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) - queue_work(md->wq, &md->work); - - up_write(&md->io_lock); + spin_unlock_irqrestore(&md->deferred_lock, flags); + queue_work(md->wq, &md->work); } /* @@ -625,11 +605,9 @@ static void dec_pending(struct dm_io *io, int error) * Target requested pushing back the I/O. */ spin_lock_irqsave(&md->deferred_lock, flags); - if (__noflush_suspending(md)) { - if (!(io->bio->bi_rw & REQ_HARDBARRIER)) - bio_list_add_head(&md->deferred, - io->bio); - } else + if (__noflush_suspending(md)) + bio_list_add_head(&md->deferred, io->bio); + else /* noflush suspend was interrupted. */ io->error = -EIO; spin_unlock_irqrestore(&md->deferred_lock, flags); @@ -637,32 +615,23 @@ static void dec_pending(struct dm_io *io, int error) io_error = io->error; bio = io->bio; + end_io_acct(io); + free_io(md, io); + + if (io_error == DM_ENDIO_REQUEUE) + return; - if (bio->bi_rw & REQ_HARDBARRIER) { + if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { /* - * There can be just one barrier request so we use - * a per-device variable for error reporting. - * Note that you can't touch the bio after end_io_acct - * - * We ignore -EOPNOTSUPP for empty flush reported by - * underlying devices. We assume that if the device - * doesn't support empty barriers, it doesn't need - * cache flushing commands. + * Preflush done for flush with data, reissue + * without REQ_FLUSH. */ - if (!md->barrier_error && - !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP)) - md->barrier_error = io_error; - end_io_acct(io); - free_io(md, io); + bio->bi_rw &= ~REQ_FLUSH; + queue_io(md, bio); } else { - end_io_acct(io); - free_io(md, io); - - if (io_error != DM_ENDIO_REQUEUE) { - trace_block_bio_complete(md->queue, bio); - - bio_endio(bio, io_error); - } + /* done with normal IO or empty flush */ + trace_block_bio_complete(md->queue, bio); + bio_endio(bio, io_error); } } } @@ -755,23 +724,6 @@ static void end_clone_bio(struct bio *clone, int error) blk_update_request(tio->orig, 0, nr_bytes); } -static void store_barrier_error(struct mapped_device *md, int error) -{ - unsigned long flags; - - spin_lock_irqsave(&md->barrier_error_lock, flags); - /* - * Basically, the first error is taken, but: - * -EOPNOTSUPP supersedes any I/O error. - * Requeue request supersedes any I/O error but -EOPNOTSUPP. - */ - if (!md->barrier_error || error == -EOPNOTSUPP || - (md->barrier_error != -EOPNOTSUPP && - error == DM_ENDIO_REQUEUE)) - md->barrier_error = error; - spin_unlock_irqrestore(&md->barrier_error_lock, flags); -} - /* * Don't touch any member of the md after calling this function because * the md may be freed in dm_put() at the end of this function. @@ -809,13 +761,11 @@ static void free_rq_clone(struct request *clone) static void dm_end_request(struct request *clone, int error) { int rw = rq_data_dir(clone); - int run_queue = 1; - bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER; struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; struct request *rq = tio->orig; - if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) { + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { rq->errors = clone->errors; rq->resid_len = clone->resid_len; @@ -829,15 +779,8 @@ static void dm_end_request(struct request *clone, int error) } free_rq_clone(clone); - - if (unlikely(is_barrier)) { - if (unlikely(error)) - store_barrier_error(md, error); - run_queue = 0; - } else - blk_end_request_all(rq, error); - - rq_completed(md, rw, run_queue); + blk_end_request_all(rq, error); + rq_completed(md, rw, true); } static void dm_unprep_request(struct request *rq) @@ -862,16 +805,6 @@ void dm_requeue_unmapped_request(struct request *clone) struct request_queue *q = rq->q; unsigned long flags; - if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { - /* - * Barrier clones share an original request. - * Leave it to dm_end_request(), which handles this special - * case. - */ - dm_end_request(clone, DM_ENDIO_REQUEUE); - return; - } - dm_unprep_request(rq); spin_lock_irqsave(q->queue_lock, flags); @@ -961,19 +894,6 @@ static void dm_complete_request(struct request *clone, int error) struct dm_rq_target_io *tio = clone->end_io_data; struct request *rq = tio->orig; - if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { - /* - * Barrier clones share an original request. So can't use - * softirq_done with the original. - * Pass the clone to dm_done() directly in this special case. - * It is safe (even if clone->q->queue_lock is held here) - * because there is no I/O dispatching during the completion - * of barrier clone. - */ - dm_done(clone, error, true); - return; - } - tio->error = error; rq->completion_data = clone; blk_complete_request(rq); @@ -990,17 +910,6 @@ void dm_kill_unmapped_request(struct request *clone, int error) struct dm_rq_target_io *tio = clone->end_io_data; struct request *rq = tio->orig; - if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { - /* - * Barrier clones share an original request. - * Leave it to dm_end_request(), which handles this special - * case. - */ - BUG_ON(error > 0); - dm_end_request(clone, error); - return; - } - rq->cmd_flags |= REQ_FAILED; dm_complete_request(clone, error); } @@ -1119,7 +1028,7 @@ static void dm_bio_destructor(struct bio *bio) } /* - * Creates a little bio that is just does part of a bvec. + * Creates a little bio that just does part of a bvec. */ static struct bio *split_bvec(struct bio *bio, sector_t sector, unsigned short idx, unsigned int offset, @@ -1134,7 +1043,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, clone->bi_sector = sector; clone->bi_bdev = bio->bi_bdev; - clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER; + clone->bi_rw = bio->bi_rw; clone->bi_vcnt = 1; clone->bi_size = to_bytes(len); clone->bi_io_vec->bv_offset = offset; @@ -1161,7 +1070,6 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); __bio_clone(clone, bio); - clone->bi_rw &= ~REQ_HARDBARRIER; clone->bi_destructor = dm_bio_destructor; clone->bi_sector = sector; clone->bi_idx = idx; @@ -1225,16 +1133,15 @@ static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti, __issue_target_request(ci, ti, request_nr, len); } -static int __clone_and_map_empty_barrier(struct clone_info *ci) +static int __clone_and_map_empty_flush(struct clone_info *ci) { unsigned target_nr = 0; struct dm_target *ti; + BUG_ON(bio_has_data(ci->bio)); while ((ti = dm_table_get_target(ci->map, target_nr++))) __issue_target_requests(ci, ti, ti->num_flush_requests, 0); - ci->sector_count = 0; - return 0; } @@ -1289,9 +1196,6 @@ static int __clone_and_map(struct clone_info *ci) sector_t len = 0, max; struct dm_target_io *tio; - if (unlikely(bio_empty_barrier(bio))) - return __clone_and_map_empty_barrier(ci); - if (unlikely(bio->bi_rw & REQ_DISCARD)) return __clone_and_map_discard(ci); @@ -1383,16 +1287,11 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) ci.map = dm_get_live_table(md); if (unlikely(!ci.map)) { - if (!(bio->bi_rw & REQ_HARDBARRIER)) - bio_io_error(bio); - else - if (!md->barrier_error) - md->barrier_error = -EIO; + bio_io_error(bio); return; } ci.md = md; - ci.bio = bio; ci.io = alloc_io(md); ci.io->error = 0; atomic_set(&ci.io->io_count, 1); @@ -1400,14 +1299,20 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) ci.io->md = md; spin_lock_init(&ci.io->endio_lock); ci.sector = bio->bi_sector; - ci.sector_count = bio_sectors(bio); - if (unlikely(bio_empty_barrier(bio))) - ci.sector_count = 1; ci.idx = bio->bi_idx; start_io_acct(ci.io); - while (ci.sector_count && !error) - error = __clone_and_map(&ci); + if (bio->bi_rw & REQ_FLUSH) { + ci.bio = &ci.md->flush_bio; + ci.sector_count = 0; + error = __clone_and_map_empty_flush(&ci); + /* dec_pending submits any data associated with flush */ + } else { + ci.bio = bio; + ci.sector_count = bio_sectors(bio); + while (ci.sector_count && !error) + error = __clone_and_map(&ci); + } /* drop the extra reference count */ dec_pending(ci.io, error); @@ -1491,22 +1396,14 @@ static int _dm_request(struct request_queue *q, struct bio *bio) part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio)); part_stat_unlock(); - /* - * If we're suspended or the thread is processing barriers - * we have to queue this io for later. - */ - if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || - unlikely(bio->bi_rw & REQ_HARDBARRIER)) { + /* if we're suspended, we have to queue this io for later */ + if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { up_read(&md->io_lock); - if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && - bio_rw(bio) == READA) { + if (bio_rw(bio) != READA) + queue_io(md, bio); + else bio_io_error(bio); - return 0; - } - - queue_io(md, bio); - return 0; } @@ -1537,14 +1434,6 @@ static int dm_request(struct request_queue *q, struct bio *bio) return _dm_request(q, bio); } -static bool dm_rq_is_flush_request(struct request *rq) -{ - if (rq->cmd_flags & REQ_FLUSH) - return true; - else - return false; -} - void dm_dispatch_request(struct request *rq) { int r; @@ -1592,22 +1481,15 @@ static int setup_clone(struct request *clone, struct request *rq, { int r; - if (dm_rq_is_flush_request(rq)) { - blk_rq_init(NULL, clone); - clone->cmd_type = REQ_TYPE_FS; - clone->cmd_flags |= (REQ_HARDBARRIER | WRITE); - } else { - r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, - dm_rq_bio_constructor, tio); - if (r) - return r; - - clone->cmd = rq->cmd; - clone->cmd_len = rq->cmd_len; - clone->sense = rq->sense; - clone->buffer = rq->buffer; - } + r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC, + dm_rq_bio_constructor, tio); + if (r) + return r; + clone->cmd = rq->cmd; + clone->cmd_len = rq->cmd_len; + clone->sense = rq->sense; + clone->buffer = rq->buffer; clone->end_io = end_clone_request; clone->end_io_data = tio; @@ -1648,9 +1530,6 @@ static int dm_prep_fn(struct request_queue *q, struct request *rq) struct mapped_device *md = q->queuedata; struct request *clone; - if (unlikely(dm_rq_is_flush_request(rq))) - return BLKPREP_OK; - if (unlikely(rq->special)) { DMWARN("Already has something in rq->special."); return BLKPREP_KILL; @@ -1727,6 +1606,7 @@ static void dm_request_fn(struct request_queue *q) struct dm_table *map = dm_get_live_table(md); struct dm_target *ti; struct request *rq, *clone; + sector_t pos; /* * For suspend, check blk_queue_stopped() and increment @@ -1739,15 +1619,14 @@ static void dm_request_fn(struct request_queue *q) if (!rq) goto plug_and_out; - if (unlikely(dm_rq_is_flush_request(rq))) { - BUG_ON(md->flush_request); - md->flush_request = rq; - blk_start_request(rq); - queue_work(md->wq, &md->barrier_work); - goto out; - } + /* always use block 0 to find the target for flushes for now */ + pos = 0; + if (!(rq->cmd_flags & REQ_FLUSH)) + pos = blk_rq_pos(rq); + + ti = dm_table_find_target(map, pos); + BUG_ON(!dm_target_is_valid(ti)); - ti = dm_table_find_target(map, blk_rq_pos(rq)); if (ti->type->busy && ti->type->busy(ti)) goto plug_and_out; @@ -1918,7 +1797,6 @@ out: static const struct block_device_operations dm_blk_dops; static void dm_wq_work(struct work_struct *work); -static void dm_rq_barrier_work(struct work_struct *work); static void dm_init_md_queue(struct mapped_device *md) { @@ -1940,6 +1818,7 @@ static void dm_init_md_queue(struct mapped_device *md) blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY); md->queue->unplug_fn = dm_unplug_all; blk_queue_merge_bvec(md->queue, dm_merge_bvec); + blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA); } /* @@ -1972,7 +1851,6 @@ static struct mapped_device *alloc_dev(int minor) mutex_init(&md->suspend_lock); mutex_init(&md->type_lock); spin_lock_init(&md->deferred_lock); - spin_lock_init(&md->barrier_error_lock); rwlock_init(&md->map_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); @@ -1995,7 +1873,6 @@ static struct mapped_device *alloc_dev(int minor) atomic_set(&md->pending[1], 0); init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); - INIT_WORK(&md->barrier_work, dm_rq_barrier_work); init_waitqueue_head(&md->eventq); md->disk->major = _major; @@ -2015,6 +1892,10 @@ static struct mapped_device *alloc_dev(int minor) if (!md->bdev) goto bad_bdev; + bio_init(&md->flush_bio); + md->flush_bio.bi_bdev = md->bdev; + md->flush_bio.bi_rw = WRITE_FLUSH; + /* Populate the mapping, nobody knows we exist yet */ spin_lock(&_minor_lock); old_md = idr_replace(&_minor_idr, md, minor); @@ -2245,7 +2126,6 @@ static int dm_init_request_based_queue(struct mapped_device *md) blk_queue_softirq_done(md->queue, dm_softirq_done); blk_queue_prep_rq(md->queue, dm_prep_fn); blk_queue_lld_busy(md->queue, dm_lld_busy); - blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH); elv_register_queue(md->queue); @@ -2406,43 +2286,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) return r; } -static void dm_flush(struct mapped_device *md) -{ - dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); - - bio_init(&md->barrier_bio); - md->barrier_bio.bi_bdev = md->bdev; - md->barrier_bio.bi_rw = WRITE_BARRIER; - __split_and_process_bio(md, &md->barrier_bio); - - dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); -} - -static void process_barrier(struct mapped_device *md, struct bio *bio) -{ - md->barrier_error = 0; - - dm_flush(md); - - if (!bio_empty_barrier(bio)) { - __split_and_process_bio(md, bio); - /* - * If the request isn't supported, don't waste time with - * the second flush. - */ - if (md->barrier_error != -EOPNOTSUPP) - dm_flush(md); - } - - if (md->barrier_error != DM_ENDIO_REQUEUE) - bio_endio(bio, md->barrier_error); - else { - spin_lock_irq(&md->deferred_lock); - bio_list_add_head(&md->deferred, bio); - spin_unlock_irq(&md->deferred_lock); - } -} - /* * Process the deferred bios */ @@ -2452,33 +2295,27 @@ static void dm_wq_work(struct work_struct *work) work); struct bio *c; - down_write(&md->io_lock); + down_read(&md->io_lock); while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { spin_lock_irq(&md->deferred_lock); c = bio_list_pop(&md->deferred); spin_unlock_irq(&md->deferred_lock); - if (!c) { - clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); + if (!c) break; - } - up_write(&md->io_lock); + up_read(&md->io_lock); if (dm_request_based(md)) generic_make_request(c); - else { - if (c->bi_rw & REQ_HARDBARRIER) - process_barrier(md, c); - else - __split_and_process_bio(md, c); - } + else + __split_and_process_bio(md, c); - down_write(&md->io_lock); + down_read(&md->io_lock); } - up_write(&md->io_lock); + up_read(&md->io_lock); } static void dm_queue_flush(struct mapped_device *md) @@ -2488,73 +2325,6 @@ static void dm_queue_flush(struct mapped_device *md) queue_work(md->wq, &md->work); } -static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr) -{ - struct dm_rq_target_io *tio = clone->end_io_data; - - tio->info.target_request_nr = request_nr; -} - -/* Issue barrier requests to targets and wait for their completion. */ -static int dm_rq_barrier(struct mapped_device *md) -{ - int i, j; - struct dm_table *map = dm_get_live_table(md); - unsigned num_targets = dm_table_get_num_targets(map); - struct dm_target *ti; - struct request *clone; - - md->barrier_error = 0; - - for (i = 0; i < num_targets; i++) { - ti = dm_table_get_target(map, i); - for (j = 0; j < ti->num_flush_requests; j++) { - clone = clone_rq(md->flush_request, md, GFP_NOIO); - dm_rq_set_target_request_nr(clone, j); - atomic_inc(&md->pending[rq_data_dir(clone)]); - map_request(ti, clone, md); - } - } - - dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); - dm_table_put(map); - - return md->barrier_error; -} - -static void dm_rq_barrier_work(struct work_struct *work) -{ - int error; - struct mapped_device *md = container_of(work, struct mapped_device, - barrier_work); - struct request_queue *q = md->queue; - struct request *rq; - unsigned long flags; - - /* - * Hold the md reference here and leave it at the last part so that - * the md can't be deleted by device opener when the barrier request - * completes. - */ - dm_get(md); - - error = dm_rq_barrier(md); - - rq = md->flush_request; - md->flush_request = NULL; - - if (error == DM_ENDIO_REQUEUE) { - spin_lock_irqsave(q->queue_lock, flags); - blk_requeue_request(q, rq); - spin_unlock_irqrestore(q->queue_lock, flags); - } else - blk_end_request_all(rq, error); - - blk_run_queue(q); - - dm_put(md); -} - /* * Swap in a new table, returning the old one for the caller to destroy. */ @@ -2677,23 +2447,17 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) * * To get all processes out of __split_and_process_bio in dm_request, * we take the write lock. To prevent any process from reentering - * __split_and_process_bio from dm_request, we set - * DMF_QUEUE_IO_TO_THREAD. - * - * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND - * and call flush_workqueue(md->wq). flush_workqueue will wait until - * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any - * further calls to __split_and_process_bio from dm_wq_work. + * __split_and_process_bio from dm_request and quiesce the thread + * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call + * flush_workqueue(md->wq). */ down_write(&md->io_lock); set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); - set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); up_write(&md->io_lock); /* - * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which - * can be kicked until md->queue is stopped. So stop md->queue before - * flushing md->wq. + * Stop md->queue before flushing md->wq in case request-based + * dm defers requests to md->wq from md->queue. */ if (dm_request_based(md)) stop_queue(md->queue); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index ba19060bcf3..8a2f767f26d 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -294,8 +294,8 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio) dev_info_t *tmp_dev; sector_t start_sector; - if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { - md_barrier_request(mddev, bio); + if (unlikely(bio->bi_rw & REQ_FLUSH)) { + md_flush_request(mddev, bio); return 0; } diff --git a/drivers/md/md.c b/drivers/md/md.c index f20d13e717d..225815197a3 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -36,7 +36,7 @@ #include <linux/blkdev.h> #include <linux/sysctl.h> #include <linux/seq_file.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/buffer_head.h> /* for invalidate_bdev */ #include <linux/poll.h> #include <linux/ctype.h> @@ -57,6 +57,7 @@ #define DEBUG 0 #define dprintk(x...) ((void)(DEBUG && printk(x))) +static DEFINE_MUTEX(md_mutex); #ifndef MODULE static void autostart_arrays(int part); @@ -226,12 +227,12 @@ static int md_make_request(struct request_queue *q, struct bio *bio) return 0; } rcu_read_lock(); - if (mddev->suspended || mddev->barrier) { + if (mddev->suspended) { DEFINE_WAIT(__wait); for (;;) { prepare_to_wait(&mddev->sb_wait, &__wait, TASK_UNINTERRUPTIBLE); - if (!mddev->suspended && !mddev->barrier) + if (!mddev->suspended) break; rcu_read_unlock(); schedule(); @@ -282,40 +283,29 @@ EXPORT_SYMBOL_GPL(mddev_resume); int mddev_congested(mddev_t *mddev, int bits) { - if (mddev->barrier) - return 1; return mddev->suspended; } EXPORT_SYMBOL(mddev_congested); /* - * Generic barrier handling for md + * Generic flush handling for md */ -#define POST_REQUEST_BARRIER ((void*)1) - -static void md_end_barrier(struct bio *bio, int err) +static void md_end_flush(struct bio *bio, int err) { mdk_rdev_t *rdev = bio->bi_private; mddev_t *mddev = rdev->mddev; - if (err == -EOPNOTSUPP && mddev->barrier != POST_REQUEST_BARRIER) - set_bit(BIO_EOPNOTSUPP, &mddev->barrier->bi_flags); rdev_dec_pending(rdev, mddev); if (atomic_dec_and_test(&mddev->flush_pending)) { - if (mddev->barrier == POST_REQUEST_BARRIER) { - /* This was a post-request barrier */ - mddev->barrier = NULL; - wake_up(&mddev->sb_wait); - } else - /* The pre-request barrier has finished */ - schedule_work(&mddev->barrier_work); + /* The pre-request flush has finished */ + schedule_work(&mddev->flush_work); } bio_put(bio); } -static void submit_barriers(mddev_t *mddev) +static void submit_flushes(mddev_t *mddev) { mdk_rdev_t *rdev; @@ -332,60 +322,56 @@ static void submit_barriers(mddev_t *mddev) atomic_inc(&rdev->nr_pending); rcu_read_unlock(); bi = bio_alloc(GFP_KERNEL, 0); - bi->bi_end_io = md_end_barrier; + bi->bi_end_io = md_end_flush; bi->bi_private = rdev; bi->bi_bdev = rdev->bdev; atomic_inc(&mddev->flush_pending); - submit_bio(WRITE_BARRIER, bi); + submit_bio(WRITE_FLUSH, bi); rcu_read_lock(); rdev_dec_pending(rdev, mddev); } rcu_read_unlock(); } -static void md_submit_barrier(struct work_struct *ws) +static void md_submit_flush_data(struct work_struct *ws) { - mddev_t *mddev = container_of(ws, mddev_t, barrier_work); - struct bio *bio = mddev->barrier; + mddev_t *mddev = container_of(ws, mddev_t, flush_work); + struct bio *bio = mddev->flush_bio; atomic_set(&mddev->flush_pending, 1); - if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) - bio_endio(bio, -EOPNOTSUPP); - else if (bio->bi_size == 0) + if (bio->bi_size == 0) /* an empty barrier - all done */ bio_endio(bio, 0); else { - bio->bi_rw &= ~REQ_HARDBARRIER; + bio->bi_rw &= ~REQ_FLUSH; if (mddev->pers->make_request(mddev, bio)) generic_make_request(bio); - mddev->barrier = POST_REQUEST_BARRIER; - submit_barriers(mddev); } if (atomic_dec_and_test(&mddev->flush_pending)) { - mddev->barrier = NULL; + mddev->flush_bio = NULL; wake_up(&mddev->sb_wait); } } -void md_barrier_request(mddev_t *mddev, struct bio *bio) +void md_flush_request(mddev_t *mddev, struct bio *bio) { spin_lock_irq(&mddev->write_lock); wait_event_lock_irq(mddev->sb_wait, - !mddev->barrier, + !mddev->flush_bio, mddev->write_lock, /*nothing*/); - mddev->barrier = bio; + mddev->flush_bio = bio; spin_unlock_irq(&mddev->write_lock); atomic_set(&mddev->flush_pending, 1); - INIT_WORK(&mddev->barrier_work, md_submit_barrier); + INIT_WORK(&mddev->flush_work, md_submit_flush_data); - submit_barriers(mddev); + submit_flushes(mddev); if (atomic_dec_and_test(&mddev->flush_pending)) - schedule_work(&mddev->barrier_work); + schedule_work(&mddev->flush_work); } -EXPORT_SYMBOL(md_barrier_request); +EXPORT_SYMBOL(md_flush_request); /* Support for plugging. * This mirrors the plugging support in request_queue, but does not @@ -696,31 +682,6 @@ static void super_written(struct bio *bio, int error) bio_put(bio); } -static void super_written_barrier(struct bio *bio, int error) -{ - struct bio *bio2 = bio->bi_private; - mdk_rdev_t *rdev = bio2->bi_private; - mddev_t *mddev = rdev->mddev; - - if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && - error == -EOPNOTSUPP) { - unsigned long flags; - /* barriers don't appear to be supported :-( */ - set_bit(BarriersNotsupp, &rdev->flags); - mddev->barriers_work = 0; - spin_lock_irqsave(&mddev->write_lock, flags); - bio2->bi_next = mddev->biolist; - mddev->biolist = bio2; - spin_unlock_irqrestore(&mddev->write_lock, flags); - wake_up(&mddev->sb_wait); - bio_put(bio); - } else { - bio_put(bio2); - bio->bi_private = rdev; - super_written(bio, error); - } -} - void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page) { @@ -729,51 +690,28 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, * and decrement it on completion, waking up sb_wait * if zero is reached. * If an error occurred, call md_error - * - * As we might need to resubmit the request if REQ_HARDBARRIER - * causes ENOTSUPP, we allocate a spare bio... */ struct bio *bio = bio_alloc(GFP_NOIO, 1); - int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG; bio->bi_bdev = rdev->bdev; bio->bi_sector = sector; bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; - bio->bi_rw = rw; atomic_inc(&mddev->pending_writes); - if (!test_bit(BarriersNotsupp, &rdev->flags)) { - struct bio *rbio; - rw |= REQ_HARDBARRIER; - rbio = bio_clone(bio, GFP_NOIO); - rbio->bi_private = bio; - rbio->bi_end_io = super_written_barrier; - submit_bio(rw, rbio); - } else - submit_bio(rw, bio); + submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA, + bio); } void md_super_wait(mddev_t *mddev) { - /* wait for all superblock writes that were scheduled to complete. - * if any had to be retried (due to BARRIER problems), retry them - */ + /* wait for all superblock writes that were scheduled to complete */ DEFINE_WAIT(wq); for(;;) { prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); if (atomic_read(&mddev->pending_writes)==0) break; - while (mddev->biolist) { - struct bio *bio; - spin_lock_irq(&mddev->write_lock); - bio = mddev->biolist; - mddev->biolist = bio->bi_next ; - bio->bi_next = NULL; - spin_unlock_irq(&mddev->write_lock); - submit_bio(bio->bi_rw, bio); - } schedule(); } finish_wait(&mddev->sb_wait, &wq); @@ -1070,7 +1008,6 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) clear_bit(Faulty, &rdev->flags); clear_bit(In_sync, &rdev->flags); clear_bit(WriteMostly, &rdev->flags); - clear_bit(BarriersNotsupp, &rdev->flags); if (mddev->raid_disks == 0) { mddev->major_version = 0; @@ -1485,7 +1422,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) clear_bit(Faulty, &rdev->flags); clear_bit(In_sync, &rdev->flags); clear_bit(WriteMostly, &rdev->flags); - clear_bit(BarriersNotsupp, &rdev->flags); if (mddev->raid_disks == 0) { mddev->major_version = 1; @@ -4504,7 +4440,6 @@ int md_run(mddev_t *mddev) /* may be over-ridden by personality */ mddev->resync_max_sectors = mddev->dev_sectors; - mddev->barriers_work = 1; mddev->ok_start_degraded = start_dirty_degraded; if (start_readonly && mddev->ro == 0) @@ -4683,7 +4618,6 @@ static void md_clean(mddev_t *mddev) mddev->recovery = 0; mddev->in_sync = 0; mddev->degraded = 0; - mddev->barriers_work = 0; mddev->safemode = 0; mddev->bitmap_info.offset = 0; mddev->bitmap_info.default_offset = 0; @@ -5951,7 +5885,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) mddev_t *mddev = mddev_find(bdev->bd_dev); int err; - lock_kernel(); + mutex_lock(&md_mutex); if (mddev->gendisk != bdev->bd_disk) { /* we are racing with mddev_put which is discarding this * bd_disk. @@ -5960,7 +5894,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) /* Wait until bdev->bd_disk is definitely gone */ flush_scheduled_work(); /* Then retry the open from the top */ - unlock_kernel(); + mutex_unlock(&md_mutex); return -ERESTARTSYS; } BUG_ON(mddev != bdev->bd_disk->private_data); @@ -5974,7 +5908,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) check_disk_size_change(mddev->gendisk, bdev); out: - unlock_kernel(); + mutex_unlock(&md_mutex); return err; } @@ -5983,10 +5917,10 @@ static int md_release(struct gendisk *disk, fmode_t mode) mddev_t *mddev = disk->private_data; BUG_ON(!mddev); - lock_kernel(); + mutex_lock(&md_mutex); atomic_dec(&mddev->openers); mddev_put(mddev); - unlock_kernel(); + mutex_unlock(&md_mutex); return 0; } diff --git a/drivers/md/md.h b/drivers/md/md.h index 3931299788d..112a2c32db0 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -87,7 +87,6 @@ struct mdk_rdev_s #define Faulty 1 /* device is known to have a fault */ #define In_sync 2 /* device is in_sync with rest of array */ #define WriteMostly 4 /* Avoid reading if at all possible */ -#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */ #define AllReserved 6 /* If whole device is reserved for * one array */ #define AutoDetected 7 /* added by auto-detect */ @@ -273,13 +272,6 @@ struct mddev_s int degraded; /* whether md should consider * adding a spare */ - int barriers_work; /* initialised to true, cleared as soon - * as a barrier request to slave - * fails. Only supported - */ - struct bio *biolist; /* bios that need to be retried - * because REQ_HARDBARRIER is not supported - */ atomic_t recovery_active; /* blocks scheduled, but not written */ wait_queue_head_t recovery_wait; @@ -339,16 +331,13 @@ struct mddev_s struct attribute_group *to_remove; struct plug_handle *plug; /* if used by personality */ - /* Generic barrier handling. - * If there is a pending barrier request, all other - * writes are blocked while the devices are flushed. - * The last to finish a flush schedules a worker to - * submit the barrier request (without the barrier flag), - * then submit more flush requests. + /* Generic flush handling. + * The last to finish preflush schedules a worker to submit + * the rest of the request (without the REQ_FLUSH flag). */ - struct bio *barrier; + struct bio *flush_bio; atomic_t flush_pending; - struct work_struct barrier_work; + struct work_struct flush_work; struct work_struct event_work; /* used by dm to report failure event */ }; @@ -502,7 +491,7 @@ extern void md_done_sync(mddev_t *mddev, int blocks, int ok); extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); extern int mddev_congested(mddev_t *mddev, int bits); -extern void md_barrier_request(mddev_t *mddev, struct bio *bio); +extern void md_flush_request(mddev_t *mddev, struct bio *bio); extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, sector_t sector, int size, struct page *page); extern void md_super_wait(mddev_t *mddev); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 0307d217e7a..6d7ddf32ef2 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -142,8 +142,8 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) struct multipath_bh * mp_bh; struct multipath_info *multipath; - if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { - md_barrier_request(mddev, bio); + if (unlikely(bio->bi_rw & REQ_FLUSH)) { + md_flush_request(mddev, bio); return 0; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 6f7af46d623..a39f4c355e5 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -483,8 +483,8 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio) struct strip_zone *zone; mdk_rdev_t *tmp_dev; - if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { - md_barrier_request(mddev, bio); + if (unlikely(bio->bi_rw & REQ_FLUSH)) { + md_flush_request(mddev, bio); return 0; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index ad83a4dcadc..378a25894c5 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -319,83 +319,74 @@ static void raid1_end_write_request(struct bio *bio, int error) if (r1_bio->bios[mirror] == bio) break; - if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { - set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags); - set_bit(R1BIO_BarrierRetry, &r1_bio->state); - r1_bio->mddev->barriers_work = 0; - /* Don't rdev_dec_pending in this branch - keep it for the retry */ - } else { + /* + * 'one mirror IO has finished' event handler: + */ + r1_bio->bios[mirror] = NULL; + to_put = bio; + if (!uptodate) { + md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); + /* an I/O failed, we can't clear the bitmap */ + set_bit(R1BIO_Degraded, &r1_bio->state); + } else /* - * this branch is our 'one mirror IO has finished' event handler: + * Set R1BIO_Uptodate in our master bio, so that we + * will return a good error code for to the higher + * levels even if IO on some other mirrored buffer + * fails. + * + * The 'master' represents the composite IO operation + * to user-side. So if something waits for IO, then it + * will wait for the 'master' bio. */ - r1_bio->bios[mirror] = NULL; - to_put = bio; - if (!uptodate) { - md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); - /* an I/O failed, we can't clear the bitmap */ - set_bit(R1BIO_Degraded, &r1_bio->state); - } else - /* - * Set R1BIO_Uptodate in our master bio, so that - * we will return a good error code for to the higher - * levels even if IO on some other mirrored buffer fails. - * - * The 'master' represents the composite IO operation to - * user-side. So if something waits for IO, then it will - * wait for the 'master' bio. - */ - set_bit(R1BIO_Uptodate, &r1_bio->state); - - update_head_pos(mirror, r1_bio); - - if (behind) { - if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) - atomic_dec(&r1_bio->behind_remaining); - - /* In behind mode, we ACK the master bio once the I/O has safely - * reached all non-writemostly disks. Setting the Returned bit - * ensures that this gets done only once -- we don't ever want to - * return -EIO here, instead we'll wait */ - - if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && - test_bit(R1BIO_Uptodate, &r1_bio->state)) { - /* Maybe we can return now */ - if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { - struct bio *mbio = r1_bio->master_bio; - PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", - (unsigned long long) mbio->bi_sector, - (unsigned long long) mbio->bi_sector + - (mbio->bi_size >> 9) - 1); - bio_endio(mbio, 0); - } + set_bit(R1BIO_Uptodate, &r1_bio->state); + + update_head_pos(mirror, r1_bio); + + if (behind) { + if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) + atomic_dec(&r1_bio->behind_remaining); + + /* + * In behind mode, we ACK the master bio once the I/O + * has safely reached all non-writemostly + * disks. Setting the Returned bit ensures that this + * gets done only once -- we don't ever want to return + * -EIO here, instead we'll wait + */ + if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && + test_bit(R1BIO_Uptodate, &r1_bio->state)) { + /* Maybe we can return now */ + if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { + struct bio *mbio = r1_bio->master_bio; + PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", + (unsigned long long) mbio->bi_sector, + (unsigned long long) mbio->bi_sector + + (mbio->bi_size >> 9) - 1); + bio_endio(mbio, 0); } } - rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); } + rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); + /* - * * Let's see if all mirrored write operations have finished * already. */ if (atomic_dec_and_test(&r1_bio->remaining)) { - if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) - reschedule_retry(r1_bio); - else { - /* it really is the end of this request */ - if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { - /* free extra copy of the data pages */ - int i = bio->bi_vcnt; - while (i--) - safe_put_page(bio->bi_io_vec[i].bv_page); - } - /* clear the bitmap if all writes complete successfully */ - bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, - r1_bio->sectors, - !test_bit(R1BIO_Degraded, &r1_bio->state), - behind); - md_write_end(r1_bio->mddev); - raid_end_bio_io(r1_bio); + if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { + /* free extra copy of the data pages */ + int i = bio->bi_vcnt; + while (i--) + safe_put_page(bio->bi_io_vec[i].bv_page); } + /* clear the bitmap if all writes complete successfully */ + bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, + r1_bio->sectors, + !test_bit(R1BIO_Degraded, &r1_bio->state), + behind); + md_write_end(r1_bio->mddev); + raid_end_bio_io(r1_bio); } if (to_put) @@ -788,16 +779,13 @@ static int make_request(mddev_t *mddev, struct bio * bio) struct page **behind_pages = NULL; const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); - unsigned long do_barriers; + const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); mdk_rdev_t *blocked_rdev; /* * Register the new request and wait if the reconstruction * thread has put up a bar for new requests. * Continue immediately if no resync is active currently. - * We test barriers_work *after* md_write_start as md_write_start - * may cause the first superblock write, and that will check out - * if barriers work. */ md_write_start(mddev, bio); /* wait on superblock update early */ @@ -821,13 +809,6 @@ static int make_request(mddev_t *mddev, struct bio * bio) } finish_wait(&conf->wait_barrier, &w); } - if (unlikely(!mddev->barriers_work && - (bio->bi_rw & REQ_HARDBARRIER))) { - if (rw == WRITE) - md_write_end(mddev); - bio_endio(bio, -EOPNOTSUPP); - return 0; - } wait_barrier(conf); @@ -959,10 +940,6 @@ static int make_request(mddev_t *mddev, struct bio * bio) atomic_set(&r1_bio->remaining, 0); atomic_set(&r1_bio->behind_remaining, 0); - do_barriers = bio->bi_rw & REQ_HARDBARRIER; - if (do_barriers) - set_bit(R1BIO_Barrier, &r1_bio->state); - bio_list_init(&bl); for (i = 0; i < disks; i++) { struct bio *mbio; @@ -975,7 +952,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; mbio->bi_bdev = conf->mirrors[i].rdev->bdev; mbio->bi_end_io = raid1_end_write_request; - mbio->bi_rw = WRITE | do_barriers | do_sync; + mbio->bi_rw = WRITE | do_flush_fua | do_sync; mbio->bi_private = r1_bio; if (behind_pages) { @@ -1634,41 +1611,6 @@ static void raid1d(mddev_t *mddev) if (test_bit(R1BIO_IsSync, &r1_bio->state)) { sync_request_write(mddev, r1_bio); unplug = 1; - } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { - /* some requests in the r1bio were REQ_HARDBARRIER - * requests which failed with -EOPNOTSUPP. Hohumm.. - * Better resubmit without the barrier. - * We know which devices to resubmit for, because - * all others have had their bios[] entry cleared. - * We already have a nr_pending reference on these rdevs. - */ - int i; - const unsigned long do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC); - clear_bit(R1BIO_BarrierRetry, &r1_bio->state); - clear_bit(R1BIO_Barrier, &r1_bio->state); - for (i=0; i < conf->raid_disks; i++) - if (r1_bio->bios[i]) - atomic_inc(&r1_bio->remaining); - for (i=0; i < conf->raid_disks; i++) - if (r1_bio->bios[i]) { - struct bio_vec *bvec; - int j; - - bio = bio_clone(r1_bio->master_bio, GFP_NOIO); - /* copy pages from the failed bio, as - * this might be a write-behind device */ - __bio_for_each_segment(bvec, bio, j, 0) - bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page; - bio_put(r1_bio->bios[i]); - bio->bi_sector = r1_bio->sector + - conf->mirrors[i].rdev->data_offset; - bio->bi_bdev = conf->mirrors[i].rdev->bdev; - bio->bi_end_io = raid1_end_write_request; - bio->bi_rw = WRITE | do_sync; - bio->bi_private = r1_bio; - r1_bio->bios[i] = bio; - generic_make_request(bio); - } } else { int disk; @@ -1839,7 +1781,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i /* take from bio_init */ bio->bi_next = NULL; + bio->bi_flags &= ~(BIO_POOL_MASK-1); bio->bi_flags |= 1 << BIO_UPTODATE; + bio->bi_comp_cpu = -1; bio->bi_rw = READ; bio->bi_vcnt = 0; bio->bi_idx = 0; @@ -1912,7 +1856,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) break; BUG_ON(sync_blocks < (PAGE_SIZE>>9)); - if (len > (sync_blocks<<9)) + if ((len >> 9) > sync_blocks) len = sync_blocks<<9; } diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 5f2d443ae28..adf8cfd7331 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -117,8 +117,6 @@ struct r1bio_s { #define R1BIO_IsSync 1 #define R1BIO_Degraded 2 #define R1BIO_BehindIO 3 -#define R1BIO_Barrier 4 -#define R1BIO_BarrierRetry 5 /* For write-behind requests, we call bi_end_io when * the last non-write-behind device completes, providing * any write was successful. Otherwise we call when diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 84718383124..f0d082f749b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -800,12 +800,13 @@ static int make_request(mddev_t *mddev, struct bio * bio) int chunk_sects = conf->chunk_mask + 1; const int rw = bio_data_dir(bio); const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); + const unsigned long do_fua = (bio->bi_rw & REQ_FUA); struct bio_list bl; unsigned long flags; mdk_rdev_t *blocked_rdev; - if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { - md_barrier_request(mddev, bio); + if (unlikely(bio->bi_rw & REQ_FLUSH)) { + md_flush_request(mddev, bio); return 0; } @@ -965,7 +966,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) conf->mirrors[d].rdev->data_offset; mbio->bi_bdev = conf->mirrors[d].rdev->bdev; mbio->bi_end_io = raid10_end_write_request; - mbio->bi_rw = WRITE | do_sync; + mbio->bi_rw = WRITE | do_sync | do_fua; mbio->bi_private = r10_bio; atomic_inc(&r10_bio->remaining); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 69b0a169e43..31140d1259d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -506,9 +506,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) int rw; struct bio *bi; mdk_rdev_t *rdev; - if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) - rw = WRITE; - else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) + if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { + if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) + rw = WRITE_FUA; + else + rw = WRITE; + } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) rw = READ; else continue; @@ -1031,6 +1034,8 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { + if (wbi->bi_rw & REQ_FUA) + set_bit(R5_WantFUA, &dev->flags); tx = async_copy_data(1, wbi, dev->page, dev->sector, tx); wbi = r5_next_bio(wbi, dev->sector); @@ -1048,15 +1053,22 @@ static void ops_complete_reconstruct(void *stripe_head_ref) int pd_idx = sh->pd_idx; int qd_idx = sh->qd_idx; int i; + bool fua = false; pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); + for (i = disks; i--; ) + fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); + for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; - if (dev->written || i == pd_idx || i == qd_idx) + if (dev->written || i == pd_idx || i == qd_idx) { set_bit(R5_UPTODATE, &dev->flags); + if (fua) + set_bit(R5_WantFUA, &dev->flags); + } } if (sh->reconstruct_state == reconstruct_state_drain_run) @@ -3281,7 +3293,7 @@ static void handle_stripe5(struct stripe_head *sh) if (dec_preread_active) { /* We delay this until after ops_run_io so that if make_request - * is waiting on a barrier, it won't continue until the writes + * is waiting on a flush, it won't continue until the writes * have actually been submitted. */ atomic_dec(&conf->preread_active_stripes); @@ -3583,7 +3595,7 @@ static void handle_stripe6(struct stripe_head *sh) if (dec_preread_active) { /* We delay this until after ops_run_io so that if make_request - * is waiting on a barrier, it won't continue until the writes + * is waiting on a flush, it won't continue until the writes * have actually been submitted. */ atomic_dec(&conf->preread_active_stripes); @@ -3978,14 +3990,8 @@ static int make_request(mddev_t *mddev, struct bio * bi) const int rw = bio_data_dir(bi); int remaining; - if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) { - /* Drain all pending writes. We only really need - * to ensure they have been submitted, but this is - * easier. - */ - mddev->pers->quiesce(mddev, 1); - mddev->pers->quiesce(mddev, 0); - md_barrier_request(mddev, bi); + if (unlikely(bi->bi_rw & REQ_FLUSH)) { + md_flush_request(mddev, bi); return 0; } @@ -4103,7 +4109,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) finish_wait(&conf->wait_for_overlap, &w); set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); - if (mddev->barrier && + if ((bi->bi_rw & REQ_SYNC) && !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) atomic_inc(&conf->preread_active_stripes); release_stripe(sh); @@ -4126,13 +4132,6 @@ static int make_request(mddev_t *mddev, struct bio * bi) bio_endio(bi, 0); } - if (mddev->barrier) { - /* We need to wait for the stripes to all be handled. - * So: wait for preread_active_stripes to drop to 0. - */ - wait_event(mddev->thread->wqueue, - atomic_read(&conf->preread_active_stripes) == 0); - } return 0; } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 36eaed5dfd6..2ace0582b40 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -275,6 +275,7 @@ struct r6_state { * filling */ #define R5_Wantdrain 13 /* dev->towrite needs to be drained */ +#define R5_WantFUA 14 /* Write should be FUA */ /* * Write method */ diff --git a/drivers/media/IR/imon.c b/drivers/media/IR/imon.c index c185422ef28..faed5a332c7 100644 --- a/drivers/media/IR/imon.c +++ b/drivers/media/IR/imon.c @@ -151,7 +151,8 @@ static const struct file_operations vfd_fops = { .owner = THIS_MODULE, .open = &display_open, .write = &vfd_write, - .release = &display_close + .release = &display_close, + .llseek = noop_llseek, }; /* lcd character device file operations */ @@ -159,7 +160,8 @@ static const struct file_operations lcd_fops = { .owner = THIS_MODULE, .open = &display_open, .write = &lcd_write, - .release = &display_close + .release = &display_close, + .llseek = noop_llseek, }; enum { diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c index 7e82a9df726..7961d59f5ca 100644 --- a/drivers/media/IR/ir-keytable.c +++ b/drivers/media/IR/ir-keytable.c @@ -319,7 +319,7 @@ static void ir_timer_keyup(unsigned long cookie) * a keyup event might follow immediately after the keydown. */ spin_lock_irqsave(&ir->keylock, flags); - if (time_is_after_eq_jiffies(ir->keyup_jiffies)) + if (time_is_before_eq_jiffies(ir->keyup_jiffies)) ir_keyup(ir); spin_unlock_irqrestore(&ir->keylock, flags); } @@ -510,6 +510,13 @@ int __ir_input_register(struct input_dev *input_dev, (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_IR_RAW) ? " in raw mode" : ""); + /* + * Default delay of 250ms is too short for some protocols, expecially + * since the timeout is currently set to 250ms. Increase it to 500ms, + * to avoid wrong repetition of the keycodes. + */ + input_dev->rep[REP_DELAY] = 500; + return 0; out_event: diff --git a/drivers/media/IR/ir-lirc-codec.c b/drivers/media/IR/ir-lirc-codec.c index 77b5946413c..1983cd3f399 100644 --- a/drivers/media/IR/ir-lirc-codec.c +++ b/drivers/media/IR/ir-lirc-codec.c @@ -235,6 +235,7 @@ static struct file_operations lirc_fops = { .poll = lirc_dev_fop_poll, .open = lirc_dev_fop_open, .release = lirc_dev_fop_close, + .llseek = no_llseek, }; static int ir_lirc_register(struct input_dev *input_dev) @@ -267,7 +268,7 @@ static int ir_lirc_register(struct input_dev *input_dev) features |= LIRC_CAN_SET_SEND_CARRIER; if (ir_dev->props->s_tx_duty_cycle) - features |= LIRC_CAN_SET_REC_DUTY_CYCLE; + features |= LIRC_CAN_SET_SEND_DUTY_CYCLE; } if (ir_dev->props->s_rx_carrier_range) diff --git a/drivers/media/IR/ir-raw-event.c b/drivers/media/IR/ir-raw-event.c index 43094e7eccf..8e0e1b1f8c8 100644 --- a/drivers/media/IR/ir-raw-event.c +++ b/drivers/media/IR/ir-raw-event.c @@ -279,9 +279,11 @@ int ir_raw_event_register(struct input_dev *input_dev) "rc%u", (unsigned int)ir->devno); if (IS_ERR(ir->raw->thread)) { + int ret = PTR_ERR(ir->raw->thread); + kfree(ir->raw); ir->raw = NULL; - return PTR_ERR(ir->raw->thread); + return ret; } mutex_lock(&ir_raw_handler_lock); diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c index 96dafc425c8..46d42467f9b 100644 --- a/drivers/media/IR/ir-sysfs.c +++ b/drivers/media/IR/ir-sysfs.c @@ -67,13 +67,14 @@ static ssize_t show_protocols(struct device *d, char *tmp = buf; int i; - if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { + if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { enabled = ir_dev->rc_tab.ir_type; allowed = ir_dev->props->allowed_protos; - } else { + } else if (ir_dev->raw) { enabled = ir_dev->raw->enabled_protocols; allowed = ir_raw_get_allowed_protocols(); - } + } else + return sprintf(tmp, "[builtin]\n"); IR_dprintk(1, "allowed - 0x%llx, enabled - 0x%llx\n", (long long)allowed, @@ -121,10 +122,14 @@ static ssize_t store_protocols(struct device *d, int rc, i, count = 0; unsigned long flags; - if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) + if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) type = ir_dev->rc_tab.ir_type; - else + else if (ir_dev->raw) type = ir_dev->raw->enabled_protocols; + else { + IR_dprintk(1, "Protocol switching not supported\n"); + return -EINVAL; + } while ((tmp = strsep((char **) &data, " \n")) != NULL) { if (!*tmp) @@ -185,7 +190,7 @@ static ssize_t store_protocols(struct device *d, } } - if (ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { + if (ir_dev->props && ir_dev->props->driver_type == RC_DRIVER_SCANCODE) { spin_lock_irqsave(&ir_dev->rc_tab.lock, flags); ir_dev->rc_tab.ir_type = type; spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags); diff --git a/drivers/media/IR/keymaps/rc-rc6-mce.c b/drivers/media/IR/keymaps/rc-rc6-mce.c index 64264f7f838..39557ad401b 100644 --- a/drivers/media/IR/keymaps/rc-rc6-mce.c +++ b/drivers/media/IR/keymaps/rc-rc6-mce.c @@ -19,6 +19,7 @@ static struct ir_scancode rc6_mce[] = { { 0x800f0416, KEY_PLAY }, { 0x800f0418, KEY_PAUSE }, + { 0x800f046e, KEY_PLAYPAUSE }, { 0x800f0419, KEY_STOP }, { 0x800f0417, KEY_RECORD }, @@ -37,6 +38,8 @@ static struct ir_scancode rc6_mce[] = { { 0x800f0411, KEY_VOLUMEDOWN }, { 0x800f0412, KEY_CHANNELUP }, { 0x800f0413, KEY_CHANNELDOWN }, + { 0x800f043a, KEY_BRIGHTNESSUP }, + { 0x800f0480, KEY_BRIGHTNESSDOWN }, { 0x800f0401, KEY_NUMERIC_1 }, { 0x800f0402, KEY_NUMERIC_2 }, diff --git a/drivers/media/IR/lirc_dev.c b/drivers/media/IR/lirc_dev.c index 899891bec35..0acf6396e06 100644 --- a/drivers/media/IR/lirc_dev.c +++ b/drivers/media/IR/lirc_dev.c @@ -163,6 +163,7 @@ static struct file_operations fops = { .unlocked_ioctl = lirc_dev_fop_ioctl, .open = lirc_dev_fop_open, .release = lirc_dev_fop_close, + .llseek = noop_llseek, }; static int lirc_cdev_add(struct irctl *ir) @@ -460,6 +461,8 @@ error: mutex_unlock(&lirc_dev_lock); + nonseekable_open(inode, file); + return retval; } EXPORT_SYMBOL(lirc_dev_fop_open); diff --git a/drivers/media/IR/mceusb.c b/drivers/media/IR/mceusb.c index ac6bb2c01a4..bc620e10ef7 100644 --- a/drivers/media/IR/mceusb.c +++ b/drivers/media/IR/mceusb.c @@ -120,6 +120,10 @@ static struct usb_device_id mceusb_dev_table[] = { { USB_DEVICE(VENDOR_PHILIPS, 0x0613) }, /* Philips eHome Infrared Transceiver */ { USB_DEVICE(VENDOR_PHILIPS, 0x0815) }, + /* Philips/Spinel plus IR transceiver for ASUS */ + { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, + /* Philips/Spinel plus IR transceiver for ASUS */ + { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, /* Realtek MCE IR Receiver */ { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, /* SMK/Toshiba G83C0004D410 */ diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index a28541b2b1a..bad2cedb8d9 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -19,6 +19,7 @@ comment "Multimedia core support" config VIDEO_DEV tristate "Video For Linux" + depends on BKL # used in many drivers for ioctl handling, need to kill ---help--- V4L core support for video capture and overlay devices, webcams and AM/FM radio cards. diff --git a/drivers/media/dvb/bt8xx/dst_ca.c b/drivers/media/dvb/bt8xx/dst_ca.c index cf870516284..48e48e8af55 100644 --- a/drivers/media/dvb/bt8xx/dst_ca.c +++ b/drivers/media/dvb/bt8xx/dst_ca.c @@ -22,7 +22,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/string.h> #include <linux/dvb/ca.h> #include "dvbdev.h" @@ -52,6 +52,7 @@ } while(0) +static DEFINE_MUTEX(dst_ca_mutex); static unsigned int verbose = 5; module_param(verbose, int, 0644); MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)"); @@ -564,7 +565,7 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct void __user *arg = (void __user *)ioctl_arg; int result = 0; - lock_kernel(); + mutex_lock(&dst_ca_mutex); dvbdev = file->private_data; state = (struct dst_state *)dvbdev->priv; p_ca_message = kmalloc(sizeof (struct ca_msg), GFP_KERNEL); @@ -652,7 +653,7 @@ static long dst_ca_ioctl(struct file *file, unsigned int cmd, unsigned long ioct kfree (p_ca_slot_info); kfree (p_ca_caps); - unlock_kernel(); + mutex_unlock(&dst_ca_mutex); return result; } @@ -694,7 +695,8 @@ static const struct file_operations dst_ca_fops = { .open = dst_ca_open, .release = dst_ca_release, .read = dst_ca_read, - .write = dst_ca_write + .write = dst_ca_write, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_ca = { diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c index 0042306ea11..ad1f61d301e 100644 --- a/drivers/media/dvb/dvb-core/dmxdev.c +++ b/drivers/media/dvb/dvb-core/dmxdev.c @@ -25,7 +25,6 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/module.h> -#include <linux/smp_lock.h> #include <linux/poll.h> #include <linux/ioctl.h> #include <linux/wait.h> @@ -1088,13 +1087,7 @@ static int dvb_demux_do_ioctl(struct file *file, static long dvb_demux_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - int ret; - - lock_kernel(); - ret = dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl); - unlock_kernel(); - - return ret; + return dvb_usercopy(file, cmd, arg, dvb_demux_do_ioctl); } static unsigned int dvb_demux_poll(struct file *file, poll_table *wait) @@ -1150,6 +1143,7 @@ static const struct file_operations dvb_demux_fops = { .open = dvb_demux_open, .release = dvb_demux_release, .poll = dvb_demux_poll, + .llseek = default_llseek, }; static struct dvb_device dvbdev_demux = { @@ -1186,13 +1180,7 @@ static int dvb_dvr_do_ioctl(struct file *file, static long dvb_dvr_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - int ret; - - lock_kernel(); - ret = dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl); - unlock_kernel(); - - return ret; + return dvb_usercopy(file, cmd, arg, dvb_dvr_do_ioctl); } static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait) @@ -1225,6 +1213,7 @@ static const struct file_operations dvb_dvr_fops = { .open = dvb_dvr_open, .release = dvb_dvr_release, .poll = dvb_dvr_poll, + .llseek = default_llseek, }; static struct dvb_device dvbdev_dvr = { diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c index cb97e6b8543..4d0646da608 100644 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c @@ -1259,13 +1259,7 @@ static int dvb_ca_en50221_io_do_ioctl(struct file *file, static long dvb_ca_en50221_io_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - int ret; - - lock_kernel(); - ret = dvb_usercopy(file, cmd, arg, dvb_ca_en50221_io_do_ioctl); - unlock_kernel(); - - return ret; + return dvb_usercopy(file, cmd, arg, dvb_ca_en50221_io_do_ioctl); } @@ -1628,6 +1622,7 @@ static const struct file_operations dvb_ca_fops = { .open = dvb_ca_en50221_io_open, .release = dvb_ca_en50221_io_release, .poll = dvb_ca_en50221_io_poll, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_ca = { diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index 4d45b7d6b3f..970c9b8882d 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -2034,7 +2034,8 @@ static const struct file_operations dvb_frontend_fops = { .unlocked_ioctl = dvb_generic_ioctl, .poll = dvb_frontend_poll, .open = dvb_frontend_open, - .release = dvb_frontend_release + .release = dvb_frontend_release, + .llseek = noop_llseek, }; int dvb_register_frontend(struct dvb_adapter* dvb, diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c index 6c3a8a06cca..4df42aaae7f 100644 --- a/drivers/media/dvb/dvb-core/dvb_net.c +++ b/drivers/media/dvb/dvb-core/dvb_net.c @@ -59,7 +59,6 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/dvb/net.h> -#include <linux/smp_lock.h> #include <linux/uio.h> #include <asm/uaccess.h> #include <linux/crc32.h> @@ -1445,13 +1444,7 @@ static int dvb_net_do_ioctl(struct file *file, static long dvb_net_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - int ret; - - lock_kernel(); - ret = dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl); - unlock_kernel(); - - return ret; + return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl); } static int dvb_net_close(struct inode *inode, struct file *file) @@ -1475,6 +1468,7 @@ static const struct file_operations dvb_net_fops = { .unlocked_ioctl = dvb_net_ioctl, .open = dvb_generic_open, .release = dvb_net_close, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_net = { diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c index b915c39d782..f7328777595 100644 --- a/drivers/media/dvb/dvb-core/dvbdev.c +++ b/drivers/media/dvb/dvb-core/dvbdev.c @@ -32,9 +32,9 @@ #include <linux/fs.h> #include <linux/cdev.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include "dvbdev.h" +static DEFINE_MUTEX(dvbdev_mutex); static int dvbdev_debug; module_param(dvbdev_debug, int, 0644); @@ -68,7 +68,7 @@ static int dvb_device_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev; - lock_kernel(); + mutex_lock(&dvbdev_mutex); down_read(&minor_rwsem); dvbdev = dvb_minors[iminor(inode)]; @@ -91,12 +91,12 @@ static int dvb_device_open(struct inode *inode, struct file *file) } fops_put(old_fops); up_read(&minor_rwsem); - unlock_kernel(); + mutex_unlock(&dvbdev_mutex); return err; } fail: up_read(&minor_rwsem); - unlock_kernel(); + mutex_unlock(&dvbdev_mutex); return -ENODEV; } @@ -105,6 +105,7 @@ static const struct file_operations dvb_device_fops = { .owner = THIS_MODULE, .open = dvb_device_open, + .llseek = noop_llseek, }; static struct cdev dvb_device_cdev; @@ -158,7 +159,6 @@ long dvb_generic_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct dvb_device *dvbdev = file->private_data; - int ret; if (!dvbdev) return -ENODEV; @@ -166,11 +166,7 @@ long dvb_generic_ioctl(struct file *file, if (!dvbdev->kernel_ioctl) return -EINVAL; - lock_kernel(); - ret = dvb_usercopy(file, cmd, arg, dvbdev->kernel_ioctl); - unlock_kernel(); - - return ret; + return dvb_usercopy(file, cmd, arg, dvbdev->kernel_ioctl); } EXPORT_SYMBOL(dvb_generic_ioctl); @@ -421,8 +417,10 @@ int dvb_usercopy(struct file *file, } /* call driver */ + mutex_lock(&dvbdev_mutex); if ((err = func(file, cmd, parg)) == -ENOIOCTLCMD) err = -EINVAL; + mutex_unlock(&dvbdev_mutex); if (err < 0) goto out; diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c index fe818348b8a..48397f103d3 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_core.c +++ b/drivers/media/dvb/dvb-usb/dib0700_core.c @@ -673,9 +673,6 @@ static int dib0700_probe(struct usb_interface *intf, else dev->props.rc.core.bulk_mode = false; - /* Need a higher delay, to avoid wrong repeat */ - dev->rc_input_dev->rep[REP_DELAY] = 500; - dib0700_rc_setup(dev); return 0; diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c index f634d2e784b..e06acd1fecb 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c @@ -940,6 +940,58 @@ static int stk7070p_frontend_attach(struct dvb_usb_adapter *adap) return adap->fe == NULL ? -ENODEV : 0; } +/* STK7770P */ +static struct dib7000p_config dib7770p_dib7000p_config = { + .output_mpeg2_in_188_bytes = 1, + + .agc_config_count = 1, + .agc = &dib7070_agc_config, + .bw = &dib7070_bw_config_12_mhz, + .tuner_is_baseband = 1, + .spur_protect = 1, + + .gpio_dir = DIB7000P_GPIO_DEFAULT_DIRECTIONS, + .gpio_val = DIB7000P_GPIO_DEFAULT_VALUES, + .gpio_pwm_pos = DIB7000P_GPIO_DEFAULT_PWM_POS, + + .hostbus_diversity = 1, + .enable_current_mirror = 1, + .disable_sample_and_hold = 0, +}; + +static int stk7770p_frontend_attach(struct dvb_usb_adapter *adap) +{ + struct usb_device_descriptor *p = &adap->dev->udev->descriptor; + if (p->idVendor == cpu_to_le16(USB_VID_PINNACLE) && + p->idProduct == cpu_to_le16(USB_PID_PINNACLE_PCTV72E)) + dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 0); + else + dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1); + msleep(10); + dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1); + dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1); + dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1); + dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0); + + dib0700_ctrl_clock(adap->dev, 72, 1); + + msleep(10); + dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1); + msleep(10); + dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1); + + if (dib7000p_i2c_enumeration(&adap->dev->i2c_adap, 1, 18, + &dib7770p_dib7000p_config) != 0) { + err("%s: dib7000p_i2c_enumeration failed. Cannot continue\n", + __func__); + return -ENODEV; + } + + adap->fe = dvb_attach(dib7000p_attach, &adap->dev->i2c_adap, 0x80, + &dib7770p_dib7000p_config); + return adap->fe == NULL ? -ENODEV : 0; +} + /* DIB807x generic */ static struct dibx000_agc_config dib807x_agc_config[2] = { { @@ -1781,7 +1833,7 @@ struct usb_device_id dib0700_usb_id_table[] = { /* 60 */{ USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_T_XXS_2) }, { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XPVR) }, { USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_STK807XP) }, - { USB_DEVICE(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD) }, + { USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x000, 0x3f00) }, { USB_DEVICE(USB_VID_EVOLUTEPC, USB_PID_TVWAY_PLUS) }, /* 65 */{ USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV73ESE) }, { USB_DEVICE(USB_VID_PINNACLE, USB_PID_PINNACLE_PCTV282E) }, @@ -2406,7 +2458,7 @@ struct dvb_usb_device_properties dib0700_devices[] = { .pid_filter_count = 32, .pid_filter = stk70x0p_pid_filter, .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, - .frontend_attach = stk7070p_frontend_attach, + .frontend_attach = stk7770p_frontend_attach, .tuner_attach = dib7770p_tuner_attach, DIB0700_DEFAULT_STREAMING_CONFIG(0x02), diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c index 6b22ec64ab0..f896337b453 100644 --- a/drivers/media/dvb/dvb-usb/opera1.c +++ b/drivers/media/dvb/dvb-usb/opera1.c @@ -483,9 +483,7 @@ static int opera1_xilinx_load_firmware(struct usb_device *dev, } } kfree(p); - if (fw) { - release_firmware(fw); - } + release_firmware(fw); return ret; } diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c index d3c2cf60de7..8ffb565f070 100644 --- a/drivers/media/dvb/firewire/firedtv-ci.c +++ b/drivers/media/dvb/firewire/firedtv-ci.c @@ -220,6 +220,7 @@ static const struct file_operations fdtv_ca_fops = { .open = dvb_generic_open, .release = dvb_generic_release, .poll = fdtv_ca_io_poll, + .llseek = noop_llseek, }; static struct dvb_device fdtv_ca = { diff --git a/drivers/media/dvb/frontends/dib7000p.c b/drivers/media/dvb/frontends/dib7000p.c index 2e28b973dfd..3aed0d43392 100644 --- a/drivers/media/dvb/frontends/dib7000p.c +++ b/drivers/media/dvb/frontends/dib7000p.c @@ -260,6 +260,9 @@ static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_ad // dprintk( "908: %x, 909: %x\n", reg_908, reg_909); + reg_909 |= (state->cfg.disable_sample_and_hold & 1) << 4; + reg_908 |= (state->cfg.enable_current_mirror & 1) << 7; + dib7000p_write_word(state, 908, reg_908); dib7000p_write_word(state, 909, reg_909); } @@ -778,7 +781,10 @@ static void dib7000p_set_channel(struct dib7000p_state *state, struct dvb_fronte default: case GUARD_INTERVAL_1_32: value *= 1; break; } - state->div_sync_wait = (value * 3) / 2 + 32; // add 50% SFN margin + compensate for one DVSY-fifo TODO + if (state->cfg.diversity_delay == 0) + state->div_sync_wait = (value * 3) / 2 + 48; // add 50% SFN margin + compensate for one DVSY-fifo + else + state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay; // add 50% SFN margin + compensate for one DVSY-fifo /* deactive the possibility of diversity reception if extended interleaver */ state->div_force_off = !1 && ch->u.ofdm.transmission_mode != TRANSMISSION_MODE_8K; diff --git a/drivers/media/dvb/frontends/dib7000p.h b/drivers/media/dvb/frontends/dib7000p.h index 805dd13a97e..da17345bf5b 100644 --- a/drivers/media/dvb/frontends/dib7000p.h +++ b/drivers/media/dvb/frontends/dib7000p.h @@ -33,6 +33,11 @@ struct dib7000p_config { int (*agc_control) (struct dvb_frontend *, u8 before); u8 output_mode; + u8 disable_sample_and_hold : 1; + + u8 enable_current_mirror : 1; + u8 diversity_delay; + }; #define DEFAULT_DIB7000P_I2C_ADDRESS 18 diff --git a/drivers/media/dvb/siano/smscoreapi.c b/drivers/media/dvb/siano/smscoreapi.c index d93468cd3a8..ff3b0fa901b 100644 --- a/drivers/media/dvb/siano/smscoreapi.c +++ b/drivers/media/dvb/siano/smscoreapi.c @@ -1098,33 +1098,26 @@ EXPORT_SYMBOL_GPL(smscore_onresponse); * * @return pointer to descriptor on success, NULL on error. */ -struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev) + +struct smscore_buffer_t *get_entry(struct smscore_device_t *coredev) { struct smscore_buffer_t *cb = NULL; unsigned long flags; - DEFINE_WAIT(wait); - spin_lock_irqsave(&coredev->bufferslock, flags); - - /* This function must return a valid buffer, since the buffer list is - * finite, we check that there is an available buffer, if not, we wait - * until such buffer become available. - */ - - prepare_to_wait(&coredev->buffer_mng_waitq, &wait, TASK_INTERRUPTIBLE); - if (list_empty(&coredev->buffers)) { - spin_unlock_irqrestore(&coredev->bufferslock, flags); - schedule(); - spin_lock_irqsave(&coredev->bufferslock, flags); + if (!list_empty(&coredev->buffers)) { + cb = (struct smscore_buffer_t *) coredev->buffers.next; + list_del(&cb->entry); } + spin_unlock_irqrestore(&coredev->bufferslock, flags); + return cb; +} - finish_wait(&coredev->buffer_mng_waitq, &wait); - - cb = (struct smscore_buffer_t *) coredev->buffers.next; - list_del(&cb->entry); +struct smscore_buffer_t *smscore_getbuffer(struct smscore_device_t *coredev) +{ + struct smscore_buffer_t *cb = NULL; - spin_unlock_irqrestore(&coredev->bufferslock, flags); + wait_event(coredev->buffer_mng_waitq, (cb = get_entry(coredev))); return cb; } diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c index a6be529eec5..893fbc57c72 100644 --- a/drivers/media/dvb/ttpci/av7110.c +++ b/drivers/media/dvb/ttpci/av7110.c @@ -730,6 +730,7 @@ static const struct file_operations dvb_osd_fops = { .unlocked_ioctl = dvb_generic_ioctl, .open = dvb_generic_open, .release = dvb_generic_release, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_osd = { diff --git a/drivers/media/dvb/ttpci/av7110_av.c b/drivers/media/dvb/ttpci/av7110_av.c index 13efba942da..6ef3996565a 100644 --- a/drivers/media/dvb/ttpci/av7110_av.c +++ b/drivers/media/dvb/ttpci/av7110_av.c @@ -1521,6 +1521,7 @@ static const struct file_operations dvb_video_fops = { .open = dvb_video_open, .release = dvb_video_release, .poll = dvb_video_poll, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_video = { @@ -1539,6 +1540,7 @@ static const struct file_operations dvb_audio_fops = { .open = dvb_audio_open, .release = dvb_audio_release, .poll = dvb_audio_poll, + .llseek = noop_llseek, }; static struct dvb_device dvbdev_audio = { diff --git a/drivers/media/dvb/ttpci/av7110_ca.c b/drivers/media/dvb/ttpci/av7110_ca.c index 4eba35a018e..43f61f2eca9 100644 --- a/drivers/media/dvb/ttpci/av7110_ca.c +++ b/drivers/media/dvb/ttpci/av7110_ca.c @@ -353,6 +353,7 @@ static const struct file_operations dvb_ca_fops = { .open = dvb_ca_open, .release = dvb_generic_release, .poll = dvb_ca_poll, + .llseek = default_llseek, }; static struct dvb_device dvbdev_ca = { diff --git a/drivers/media/dvb/ttpci/av7110_ir.c b/drivers/media/dvb/ttpci/av7110_ir.c index b070e88d8c6..908f272fe26 100644 --- a/drivers/media/dvb/ttpci/av7110_ir.c +++ b/drivers/media/dvb/ttpci/av7110_ir.c @@ -312,6 +312,7 @@ static ssize_t av7110_ir_proc_write(struct file *file, const char __user *buffer static const struct file_operations av7110_ir_proc_fops = { .owner = THIS_MODULE, .write = av7110_ir_proc_write, + .llseek = noop_llseek, }; /* interrupt handler */ diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c index 67a4ec8768a..4ce541a5eb4 100644 --- a/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -395,7 +395,7 @@ static int __devinit si470x_i2c_probe(struct i2c_client *client, radio->registers[POWERCFG] = POWERCFG_ENABLE; if (si470x_set_register(radio, POWERCFG) < 0) { retval = -EIO; - goto err_all; + goto err_video; } msleep(110); diff --git a/drivers/media/video/cx231xx/Makefile b/drivers/media/video/cx231xx/Makefile index 755dd0ce65f..6f2b5738448 100644 --- a/drivers/media/video/cx231xx/Makefile +++ b/drivers/media/video/cx231xx/Makefile @@ -11,4 +11,5 @@ EXTRA_CFLAGS += -Idrivers/media/video EXTRA_CFLAGS += -Idrivers/media/common/tuners EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core EXTRA_CFLAGS += -Idrivers/media/dvb/frontends +EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-usb diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c index 6bdc0ef1811..f2a4900014b 100644 --- a/drivers/media/video/cx231xx/cx231xx-cards.c +++ b/drivers/media/video/cx231xx/cx231xx-cards.c @@ -32,6 +32,7 @@ #include <media/v4l2-chip-ident.h> #include <media/cx25840.h> +#include "dvb-usb-ids.h" #include "xc5000.h" #include "cx231xx.h" @@ -175,6 +176,8 @@ struct usb_device_id cx231xx_id_table[] = { .driver_info = CX231XX_BOARD_CNXT_RDE_250}, {USB_DEVICE(0x0572, 0x58A1), .driver_info = CX231XX_BOARD_CNXT_RDU_250}, + {USB_DEVICE_VER(USB_VID_PIXELVIEW, USB_PID_PIXELVIEW_SBTVD, 0x4000,0x4fff), + .driver_info = CX231XX_BOARD_UNKNOWN}, {}, }; @@ -226,14 +229,16 @@ void cx231xx_pre_card_setup(struct cx231xx *dev) dev->board.name, dev->model); /* set the direction for GPIO pins */ - cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1); - cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); - cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1); + if (dev->board.tuner_gpio) { + cx231xx_set_gpio_direction(dev, dev->board.tuner_gpio->bit, 1); + cx231xx_set_gpio_value(dev, dev->board.tuner_gpio->bit, 1); + cx231xx_set_gpio_direction(dev, dev->board.tuner_sif_gpio, 1); - /* request some modules if any required */ + /* request some modules if any required */ - /* reset the Tuner */ - cx231xx_gpio_set(dev, dev->board.tuner_gpio); + /* reset the Tuner */ + cx231xx_gpio_set(dev, dev->board.tuner_gpio); + } /* set the mode to Analog mode initially */ cx231xx_set_mode(dev, CX231XX_ANALOG_MODE); diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c index 86ca8c2359d..f5a3e74c3c7 100644 --- a/drivers/media/video/cx25840/cx25840-core.c +++ b/drivers/media/video/cx25840/cx25840-core.c @@ -1996,7 +1996,7 @@ static int cx25840_probe(struct i2c_client *client, state->volume = v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME, - 0, 65335, 65535 / 100, default_volume); + 0, 65535, 65535 / 100, default_volume); state->mute = v4l2_ctrl_new_std(&state->hdl, &cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0); diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig index 99dbae11759..0fa85cbefbb 100644 --- a/drivers/media/video/cx88/Kconfig +++ b/drivers/media/video/cx88/Kconfig @@ -17,7 +17,7 @@ config VIDEO_CX88 config VIDEO_CX88_ALSA tristate "Conexant 2388x DMA audio support" - depends on VIDEO_CX88 && SND && EXPERIMENTAL + depends on VIDEO_CX88 && SND select SND_PCM ---help--- This is a video4linux driver for direct (DMA) audio on diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c index 5b176bd7afd..f3e25e91366 100644 --- a/drivers/media/video/dabusb.c +++ b/drivers/media/video/dabusb.c @@ -32,7 +32,6 @@ #include <linux/list.h> #include <linux/vmalloc.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/init.h> #include <asm/uaccess.h> #include <asm/atomic.h> @@ -621,7 +620,6 @@ static int dabusb_open (struct inode *inode, struct file *file) if (devnum < DABUSB_MINOR || devnum >= (DABUSB_MINOR + NRDABUSB)) return -EIO; - lock_kernel(); s = &dabusb[devnum - DABUSB_MINOR]; dbg("dabusb_open"); @@ -630,21 +628,17 @@ static int dabusb_open (struct inode *inode, struct file *file) while (!s->usbdev || s->opened) { mutex_unlock(&s->mutex); - if (file->f_flags & O_NONBLOCK) { + if (file->f_flags & O_NONBLOCK) return -EBUSY; - } msleep_interruptible(500); - if (signal_pending (current)) { - unlock_kernel(); + if (signal_pending (current)) return -EAGAIN; - } mutex_lock(&s->mutex); } if (usb_set_interface (s->usbdev, _DABUSB_IF, 1) < 0) { mutex_unlock(&s->mutex); dev_err(&s->usbdev->dev, "set_interface failed\n"); - unlock_kernel(); return -EINVAL; } s->opened = 1; @@ -654,7 +648,6 @@ static int dabusb_open (struct inode *inode, struct file *file) file->private_data = s; r = nonseekable_open(inode, file); - unlock_kernel(); return r; } @@ -689,17 +682,13 @@ static long dabusb_ioctl (struct file *file, unsigned int cmd, unsigned long arg dbg("dabusb_ioctl"); - lock_kernel(); - if (s->remove_pending) { - unlock_kernel(); + if (s->remove_pending) return -EIO; - } mutex_lock(&s->mutex); if (!s->usbdev) { mutex_unlock(&s->mutex); - unlock_kernel(); return -EIO; } @@ -735,7 +724,6 @@ static long dabusb_ioctl (struct file *file, unsigned int cmd, unsigned long arg break; } mutex_unlock(&s->mutex); - unlock_kernel(); return ret; } diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c index b9846106913..78abc1c1f9d 100644 --- a/drivers/media/video/gspca/gspca.c +++ b/drivers/media/video/gspca/gspca.c @@ -223,6 +223,7 @@ static int alloc_and_submit_int_urb(struct gspca_dev *gspca_dev, usb_rcvintpipe(dev, ep->bEndpointAddress), buffer, buffer_len, int_irq, (void *)gspca_dev, interval); + urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; gspca_dev->int_urb = urb; ret = usb_submit_urb(urb, GFP_KERNEL); if (ret < 0) { diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c index 83a718f0f3f..9052d570255 100644 --- a/drivers/media/video/gspca/sn9c20x.c +++ b/drivers/media/video/gspca/sn9c20x.c @@ -2357,8 +2357,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev, (data[33] << 10); avg_lum >>= 9; atomic_set(&sd->avg_lum, avg_lum); - gspca_frame_add(gspca_dev, LAST_PACKET, - data, len); + gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); return; } if (gspca_dev->last_packet_type == LAST_PACKET) { diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c index be03a712731..f0316d02f09 100644 --- a/drivers/media/video/ivtv/ivtvfb.c +++ b/drivers/media/video/ivtv/ivtvfb.c @@ -466,6 +466,8 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar struct fb_vblank vblank; u32 trace; + memset(&vblank, 0, sizeof(struct fb_vblank)); + vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT | FB_VBLANK_HAVE_VSYNC; trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16; diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c index 4525335f9bd..a7210d98138 100644 --- a/drivers/media/video/mem2mem_testdev.c +++ b/drivers/media/video/mem2mem_testdev.c @@ -239,7 +239,7 @@ static int device_process(struct m2mtest_ctx *ctx, return -EFAULT; } - if (in_buf->vb.size < out_buf->vb.size) { + if (in_buf->vb.size > out_buf->vb.size) { v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n"); return -EINVAL; } @@ -1014,6 +1014,7 @@ static int m2mtest_remove(struct platform_device *pdev) v4l2_m2m_release(dev->m2m_dev); del_timer_sync(&dev->timer); video_unregister_device(dev->vfd); + video_device_release(dev->vfd); v4l2_device_unregister(&dev->v4l2_dev); kfree(dev); diff --git a/drivers/media/video/mt9m111.c b/drivers/media/video/mt9m111.c index 758a4db27d6..c71af4e0e51 100644 --- a/drivers/media/video/mt9m111.c +++ b/drivers/media/video/mt9m111.c @@ -447,6 +447,9 @@ static int mt9m111_s_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) dev_dbg(&client->dev, "%s left=%d, top=%d, width=%d, height=%d\n", __func__, rect.left, rect.top, rect.width, rect.height); + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + ret = mt9m111_make_rect(client, &rect); if (!ret) mt9m111->rect = rect; @@ -466,12 +469,14 @@ static int mt9m111_g_crop(struct v4l2_subdev *sd, struct v4l2_crop *a) static int mt9m111_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) { + if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) + return -EINVAL; + a->bounds.left = MT9M111_MIN_DARK_COLS; a->bounds.top = MT9M111_MIN_DARK_ROWS; a->bounds.width = MT9M111_MAX_WIDTH; a->bounds.height = MT9M111_MAX_HEIGHT; a->defrect = a->bounds; - a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->pixelaspect.numerator = 1; a->pixelaspect.denominator = 1; @@ -487,6 +492,7 @@ static int mt9m111_g_fmt(struct v4l2_subdev *sd, mf->width = mt9m111->rect.width; mf->height = mt9m111->rect.height; mf->code = mt9m111->fmt->code; + mf->colorspace = mt9m111->fmt->colorspace; mf->field = V4L2_FIELD_NONE; return 0; diff --git a/drivers/media/video/mt9v022.c b/drivers/media/video/mt9v022.c index e7cd23cd639..b48473c7896 100644 --- a/drivers/media/video/mt9v022.c +++ b/drivers/media/video/mt9v022.c @@ -402,9 +402,6 @@ static int mt9v022_s_fmt(struct v4l2_subdev *sd, if (mt9v022->model != V4L2_IDENT_MT9V022IX7ATC) return -EINVAL; break; - case 0: - /* No format change, only geometry */ - break; default: return -EINVAL; } diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c index 66ff174151b..b6ea67221d1 100644 --- a/drivers/media/video/mx2_camera.c +++ b/drivers/media/video/mx2_camera.c @@ -378,6 +378,9 @@ static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb, spin_lock_irqsave(&pcdev->lock, flags); + if (*fb_active == NULL) + goto out; + vb = &(*fb_active)->vb; dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); @@ -402,6 +405,7 @@ static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb, *fb_active = buf; +out: spin_unlock_irqrestore(&pcdev->lock, flags); } diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c index 1b992b84719..55ea914c7fc 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c +++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c @@ -513,7 +513,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr, if (ret >= 0) { ret = pvr2_ctrl_range_check(cptr,*valptr); } - if (maskptr) *maskptr = ~0; + *maskptr = ~0; } else if (cptr->info->type == pvr2_ctl_bool) { ret = parse_token(ptr,len,valptr,boolNames, ARRAY_SIZE(boolNames)); @@ -522,7 +522,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr, } else if (ret == 0) { *valptr = (*valptr & 1) ? !0 : 0; } - if (maskptr) *maskptr = 1; + *maskptr = 1; } else if (cptr->info->type == pvr2_ctl_enum) { ret = parse_token( ptr,len,valptr, @@ -531,7 +531,7 @@ int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr, if (ret >= 0) { ret = pvr2_ctrl_range_check(cptr,*valptr); } - if (maskptr) *maskptr = ~0; + *maskptr = ~0; } else if (cptr->info->type == pvr2_ctl_bitmask) { ret = parse_tlist( ptr,len,maskptr,valptr, diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c index b151c7be8a5..6961c55baf9 100644 --- a/drivers/media/video/s5p-fimc/fimc-core.c +++ b/drivers/media/video/s5p-fimc/fimc-core.c @@ -393,6 +393,37 @@ static void fimc_set_yuv_order(struct fimc_ctx *ctx) dbg("ctx->out_order_1p= %d", ctx->out_order_1p); } +static void fimc_prepare_dma_offset(struct fimc_ctx *ctx, struct fimc_frame *f) +{ + struct samsung_fimc_variant *variant = ctx->fimc_dev->variant; + + f->dma_offset.y_h = f->offs_h; + if (!variant->pix_hoff) + f->dma_offset.y_h *= (f->fmt->depth >> 3); + + f->dma_offset.y_v = f->offs_v; + + f->dma_offset.cb_h = f->offs_h; + f->dma_offset.cb_v = f->offs_v; + + f->dma_offset.cr_h = f->offs_h; + f->dma_offset.cr_v = f->offs_v; + + if (!variant->pix_hoff) { + if (f->fmt->planes_cnt == 3) { + f->dma_offset.cb_h >>= 1; + f->dma_offset.cr_h >>= 1; + } + if (f->fmt->color == S5P_FIMC_YCBCR420) { + f->dma_offset.cb_v >>= 1; + f->dma_offset.cr_v >>= 1; + } + } + + dbg("in_offset: color= %d, y_h= %d, y_v= %d", + f->fmt->color, f->dma_offset.y_h, f->dma_offset.y_v); +} + /** * fimc_prepare_config - check dimensions, operation and color mode * and pre-calculate offset and the scaling coefficients. @@ -406,7 +437,6 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags) { struct fimc_frame *s_frame, *d_frame; struct fimc_vid_buffer *buf = NULL; - struct samsung_fimc_variant *variant = ctx->fimc_dev->variant; int ret = 0; s_frame = &ctx->s_frame; @@ -419,61 +449,16 @@ static int fimc_prepare_config(struct fimc_ctx *ctx, u32 flags) swap(d_frame->width, d_frame->height); } - /* Prepare the output offset ratios for scaler. */ - d_frame->dma_offset.y_h = d_frame->offs_h; - if (!variant->pix_hoff) - d_frame->dma_offset.y_h *= (d_frame->fmt->depth >> 3); - - d_frame->dma_offset.y_v = d_frame->offs_v; - - d_frame->dma_offset.cb_h = d_frame->offs_h; - d_frame->dma_offset.cb_v = d_frame->offs_v; - - d_frame->dma_offset.cr_h = d_frame->offs_h; - d_frame->dma_offset.cr_v = d_frame->offs_v; + /* Prepare the DMA offset ratios for scaler. */ + fimc_prepare_dma_offset(ctx, &ctx->s_frame); + fimc_prepare_dma_offset(ctx, &ctx->d_frame); - if (!variant->pix_hoff && d_frame->fmt->planes_cnt == 3) { - d_frame->dma_offset.cb_h >>= 1; - d_frame->dma_offset.cb_v >>= 1; - d_frame->dma_offset.cr_h >>= 1; - d_frame->dma_offset.cr_v >>= 1; - } - - dbg("out offset: color= %d, y_h= %d, y_v= %d", - d_frame->fmt->color, - d_frame->dma_offset.y_h, d_frame->dma_offset.y_v); - - /* Prepare the input offset ratios for scaler. */ - s_frame->dma_offset.y_h = s_frame->offs_h; - if (!variant->pix_hoff) - s_frame->dma_offset.y_h *= (s_frame->fmt->depth >> 3); - s_frame->dma_offset.y_v = s_frame->offs_v; - - s_frame->dma_offset.cb_h = s_frame->offs_h; - s_frame->dma_offset.cb_v = s_frame->offs_v; - - s_frame->dma_offset.cr_h = s_frame->offs_h; - s_frame->dma_offset.cr_v = s_frame->offs_v; - - if (!variant->pix_hoff && s_frame->fmt->planes_cnt == 3) { - s_frame->dma_offset.cb_h >>= 1; - s_frame->dma_offset.cb_v >>= 1; - s_frame->dma_offset.cr_h >>= 1; - s_frame->dma_offset.cr_v >>= 1; - } - - dbg("in offset: color= %d, y_h= %d, y_v= %d", - s_frame->fmt->color, s_frame->dma_offset.y_h, - s_frame->dma_offset.y_v); - - fimc_set_yuv_order(ctx); - - /* Check against the scaler ratio. */ if (s_frame->height > (SCALER_MAX_VRATIO * d_frame->height) || s_frame->width > (SCALER_MAX_HRATIO * d_frame->width)) { err("out of scaler range"); return -EINVAL; } + fimc_set_yuv_order(ctx); } /* Input DMA mode is not allowed when the scaler is disabled. */ @@ -822,7 +807,8 @@ static int fimc_m2m_s_fmt(struct file *file, void *priv, struct v4l2_format *f) } else { v4l2_err(&ctx->fimc_dev->m2m.v4l2_dev, "Wrong buffer/video queue type (%d)\n", f->type); - return -EINVAL; + ret = -EINVAL; + goto s_fmt_out; } pix = &f->fmt.pix; @@ -1414,8 +1400,10 @@ static int fimc_probe(struct platform_device *pdev) } fimc->work_queue = create_workqueue(dev_name(&fimc->pdev->dev)); - if (!fimc->work_queue) + if (!fimc->work_queue) { + ret = -ENOMEM; goto err_irq; + } ret = fimc_register_m2m_device(fimc); if (ret) @@ -1492,6 +1480,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = { }; static struct samsung_fimc_variant fimc01_variant_s5pv210 = { + .pix_hoff = 1, .has_inp_rot = 1, .has_out_rot = 1, .min_inp_pixsize = 16, @@ -1506,6 +1495,7 @@ static struct samsung_fimc_variant fimc01_variant_s5pv210 = { }; static struct samsung_fimc_variant fimc2_variant_s5pv210 = { + .pix_hoff = 1, .min_inp_pixsize = 16, .min_out_pixsize = 32, diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index ec697fcd406..bb8d83d8dda 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c @@ -4323,13 +4323,13 @@ struct saa7134_board saa7134_boards[] = { }, [SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM] = { /* Beholder Intl. Ltd. 2008 */ - /*Dmitry Belimov <d.belimov@gmail.com> */ - .name = "Beholder BeholdTV Columbus TVFM", + /* Dmitry Belimov <d.belimov@gmail.com> */ + .name = "Beholder BeholdTV Columbus TV/FM", .audio_clock = 0x00187de7, .tuner_type = TUNER_ALPS_TSBE5_PAL, - .radio_type = UNSET, - .tuner_addr = ADDR_UNSET, - .radio_addr = ADDR_UNSET, + .radio_type = TUNER_TEA5767, + .tuner_addr = 0xc2 >> 1, + .radio_addr = 0xc0 >> 1, .tda9887_conf = TDA9887_PRESENT, .gpiomask = 0x000A8004, .inputs = {{ diff --git a/drivers/media/video/saa7164/saa7164-buffer.c b/drivers/media/video/saa7164/saa7164-buffer.c index 5713f3a4b76..ddd25d32723 100644 --- a/drivers/media/video/saa7164/saa7164-buffer.c +++ b/drivers/media/video/saa7164/saa7164-buffer.c @@ -136,10 +136,11 @@ ret: int saa7164_buffer_dealloc(struct saa7164_tsport *port, struct saa7164_buffer *buf) { - struct saa7164_dev *dev = port->dev; + struct saa7164_dev *dev; - if ((buf == 0) || (port == 0)) + if (!buf || !port) return SAA_ERR_BAD_PARAMETER; + dev = port->dev; dprintk(DBGLVL_BUF, "%s() deallocating buffer @ 0x%p\n", __func__, buf); diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index 8bdd940f32e..2ac85d8984f 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c @@ -486,6 +486,12 @@ static int uvc_parse_format(struct uvc_device *dev, max(frame->dwFrameInterval[0], frame->dwDefaultFrameInterval)); + if (dev->quirks & UVC_QUIRK_RESTRICT_FRAME_RATE) { + frame->bFrameIntervalType = 1; + frame->dwFrameInterval[0] = + frame->dwDefaultFrameInterval; + } + uvc_trace(UVC_TRACE_DESCR, "- %ux%u (%u.%u fps)\n", frame->wWidth, frame->wHeight, 10000000/frame->dwDefaultFrameInterval, @@ -2026,6 +2032,15 @@ static struct usb_device_id uvc_ids[] = { .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, .bInterfaceProtocol = 0 }, + /* Chicony CNF7129 (Asus EEE 100HE) */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x04f2, + .idProduct = 0xb071, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_RESTRICT_FRAME_RATE }, /* Alcor Micro AU3820 (Future Boy PC USB Webcam) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, @@ -2091,6 +2106,15 @@ static struct usb_device_id uvc_ids[] = { .bInterfaceProtocol = 0, .driver_info = UVC_QUIRK_PROBE_MINMAX | UVC_QUIRK_PROBE_DEF }, + /* IMC Networks (Medion Akoya) */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x13d3, + .idProduct = 0x5103, + .bInterfaceClass = USB_CLASS_VIDEO, + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_STREAM_NO_FID }, /* Syntek (HP Spartan) */ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index bdacf3beabf..892e0e51916 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h @@ -182,6 +182,7 @@ struct uvc_xu_control { #define UVC_QUIRK_IGNORE_SELECTOR_UNIT 0x00000020 #define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 #define UVC_QUIRK_PROBE_DEF 0x00000100 +#define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200 /* Format flags */ #define UVC_FMT_FLAG_COMPRESSED 0x00000001 diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c index 073f01390cd..86294ed35c9 100644 --- a/drivers/media/video/v4l2-compat-ioctl32.c +++ b/drivers/media/video/v4l2-compat-ioctl32.c @@ -193,17 +193,24 @@ static int put_video_window32(struct video_window *kp, struct video_window32 __u struct video_code32 { char loadwhat[16]; /* name or tag of file being passed */ compat_int_t datasize; - unsigned char *data; + compat_uptr_t data; }; -static int get_microcode32(struct video_code *kp, struct video_code32 __user *up) +static struct video_code __user *get_microcode32(struct video_code32 *kp) { - if (!access_ok(VERIFY_READ, up, sizeof(struct video_code32)) || - copy_from_user(kp->loadwhat, up->loadwhat, sizeof(up->loadwhat)) || - get_user(kp->datasize, &up->datasize) || - copy_from_user(kp->data, up->data, up->datasize)) - return -EFAULT; - return 0; + struct video_code __user *up; + + up = compat_alloc_user_space(sizeof(*up)); + + /* + * NOTE! We don't actually care if these fail. If the + * user address is invalid, the native ioctl will do + * the error handling for us + */ + (void) copy_to_user(up->loadwhat, kp->loadwhat, sizeof(up->loadwhat)); + (void) put_user(kp->datasize, &up->datasize); + (void) put_user(compat_ptr(kp->data), &up->data); + return up; } #define VIDIOCGTUNER32 _IOWR('v', 4, struct video_tuner32) @@ -739,7 +746,7 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar struct video_tuner vt; struct video_buffer vb; struct video_window vw; - struct video_code vc; + struct video_code32 vc; struct video_audio va; #endif struct v4l2_format v2f; @@ -818,8 +825,11 @@ static long do_video_ioctl(struct file *file, unsigned int cmd, unsigned long ar break; case VIDIOCSMICROCODE: - err = get_microcode32(&karg.vc, up); - compatible_arg = 0; + /* Copy the 32-bit "video_code32" to kernel space */ + if (copy_from_user(&karg.vc, up, sizeof(karg.vc))) + return -EFAULT; + /* Convert the 32-bit version to a 64-bit version in user space */ + up = get_microcode32(&karg.vc); break; case VIDIOCSFREQ: diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c index 372b87efcd0..6ff9e4bac3e 100644 --- a/drivers/media/video/videobuf-dma-contig.c +++ b/drivers/media/video/videobuf-dma-contig.c @@ -393,8 +393,10 @@ void videobuf_dma_contig_free(struct videobuf_queue *q, } /* read() method */ - dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle); - mem->vaddr = NULL; + if (mem->vaddr) { + dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle); + mem->vaddr = NULL; + } } EXPORT_SYMBOL_GPL(videobuf_dma_contig_free); diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c index 06f9a9c2a39..2ad0bc252b0 100644 --- a/drivers/media/video/videobuf-dma-sg.c +++ b/drivers/media/video/videobuf-dma-sg.c @@ -94,7 +94,7 @@ err: * must free the memory. */ static struct scatterlist *videobuf_pages_to_sg(struct page **pages, - int nr_pages, int offset) + int nr_pages, int offset, size_t size) { struct scatterlist *sglist; int i; @@ -110,12 +110,14 @@ static struct scatterlist *videobuf_pages_to_sg(struct page **pages, /* DMA to highmem pages might not work */ goto highmem; sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset); + size -= PAGE_SIZE - offset; for (i = 1; i < nr_pages; i++) { if (NULL == pages[i]) goto nopage; if (PageHighMem(pages[i])) goto highmem; - sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0); + sg_set_page(&sglist[i], pages[i], min(PAGE_SIZE, size), 0); + size -= min(PAGE_SIZE, size); } return sglist; @@ -170,7 +172,8 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, first = (data & PAGE_MASK) >> PAGE_SHIFT; last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT; - dma->offset = data & ~PAGE_MASK; + dma->offset = data & ~PAGE_MASK; + dma->size = size; dma->nr_pages = last-first+1; dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); if (NULL == dma->pages) @@ -252,7 +255,7 @@ int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma) if (dma->pages) { dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, - dma->offset); + dma->offset, dma->size); } if (dma->vaddr) { dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr, diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index d3f1a087ece..02362eccc58 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c @@ -18,11 +18,12 @@ #include <linux/kthread.h> #include <linux/delay.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/memstick.h> #define DRIVER_NAME "mspro_block" +static DEFINE_MUTEX(mspro_block_mutex); static int major; module_param(major, int, 0644); @@ -180,7 +181,7 @@ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode) struct mspro_block_data *msb = disk->private_data; int rc = -ENXIO; - lock_kernel(); + mutex_lock(&mspro_block_mutex); mutex_lock(&mspro_block_disk_lock); if (msb && msb->card) { @@ -192,7 +193,7 @@ static int mspro_block_bd_open(struct block_device *bdev, fmode_t mode) } mutex_unlock(&mspro_block_disk_lock); - unlock_kernel(); + mutex_unlock(&mspro_block_mutex); return rc; } @@ -225,9 +226,9 @@ static int mspro_block_disk_release(struct gendisk *disk) static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode) { int ret; - lock_kernel(); + mutex_lock(&mspro_block_mutex); ret = mspro_block_disk_release(disk); - unlock_kernel(); + mutex_unlock(&mspro_block_mutex); return ret; } diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index d8ddfdf8be1..a3856ed90ae 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c @@ -54,7 +54,7 @@ #include <linux/pci.h> #include <linux/delay.h> /* for mdelay */ #include <linux/miscdevice.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/compat.h> #include <asm/io.h> @@ -83,6 +83,7 @@ MODULE_VERSION(my_VERSION); /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ +static DEFINE_MUTEX(mpctl_mutex); static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS; @@ -601,12 +602,12 @@ mptctl_fasync(int fd, struct file *filep, int mode) MPT_ADAPTER *ioc; int ret; - lock_kernel(); + mutex_lock(&mpctl_mutex); list_for_each_entry(ioc, &ioc_list, list) ioc->aen_event_read_flag=0; ret = fasync_helper(fd, filep, mode, &async_queue); - unlock_kernel(); + mutex_unlock(&mpctl_mutex); return ret; } @@ -698,9 +699,9 @@ static long mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; - lock_kernel(); + mutex_lock(&mpctl_mutex); ret = __mptctl_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&mpctl_mutex); return ret; } @@ -2926,7 +2927,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd, static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { long ret; - lock_kernel(); + mutex_lock(&mpctl_mutex); switch (cmd) { case MPTIOCINFO: case MPTIOCINFO1: @@ -2951,7 +2952,7 @@ static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long a ret = -ENOIOCTLCMD; break; } - unlock_kernel(); + mutex_unlock(&mpctl_mutex); return ret; } diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index f0f1e667000..f87a9d405a5 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -53,7 +53,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/i2o.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/mempool.h> @@ -69,6 +69,7 @@ #define OSM_VERSION "1.325" #define OSM_DESCRIPTION "I2O Block Device OSM" +static DEFINE_MUTEX(i2o_block_mutex); static struct i2o_driver i2o_block_driver; /* global Block OSM request mempool */ @@ -578,7 +579,7 @@ static int i2o_block_open(struct block_device *bdev, fmode_t mode) if (!dev->i2o_dev) return -ENODEV; - lock_kernel(); + mutex_lock(&i2o_block_mutex); if (dev->power > 0x1f) i2o_block_device_power(dev, 0x02); @@ -587,7 +588,7 @@ static int i2o_block_open(struct block_device *bdev, fmode_t mode) i2o_block_device_lock(dev->i2o_dev, -1); osm_debug("Ready.\n"); - unlock_kernel(); + mutex_unlock(&i2o_block_mutex); return 0; }; @@ -618,7 +619,7 @@ static int i2o_block_release(struct gendisk *disk, fmode_t mode) if (!dev->i2o_dev) return 0; - lock_kernel(); + mutex_lock(&i2o_block_mutex); i2o_block_device_flush(dev->i2o_dev); i2o_block_device_unlock(dev->i2o_dev, -1); @@ -629,7 +630,7 @@ static int i2o_block_release(struct gendisk *disk, fmode_t mode) operation = 0x24; i2o_block_device_power(dev, operation); - unlock_kernel(); + mutex_unlock(&i2o_block_mutex); return 0; } @@ -664,7 +665,7 @@ static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode, if (!capable(CAP_SYS_ADMIN)) return -EPERM; - lock_kernel(); + mutex_lock(&i2o_block_mutex); switch (cmd) { case BLKI2OGRSTRAT: ret = put_user(dev->rcache, (int __user *)arg); @@ -688,7 +689,7 @@ static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode, ret = 0; break; } - unlock_kernel(); + mutex_unlock(&i2o_block_mutex); return ret; }; diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index 068ba0785bb..7d3cc575c36 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c @@ -31,7 +31,7 @@ */ #include <linux/miscdevice.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/compat.h> #include <linux/slab.h> @@ -41,6 +41,7 @@ #define SG_TABLESIZE 30 +static DEFINE_MUTEX(i2o_cfg_mutex); static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long); static spinlock_t i2o_config_lock; @@ -741,7 +742,7 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) { int ret; - lock_kernel(); + mutex_lock(&i2o_cfg_mutex); switch (cmd) { case I2OGETIOPS: ret = i2o_cfg_ioctl(file, cmd, arg); @@ -753,7 +754,7 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, ret = -ENOIOCTLCMD; break; } - unlock_kernel(); + mutex_unlock(&i2o_cfg_mutex); return ret; } @@ -981,7 +982,7 @@ static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { int ret; - lock_kernel(); + mutex_lock(&i2o_cfg_mutex); switch (cmd) { case I2OGETIOPS: ret = i2o_cfg_getiops(arg); @@ -1037,7 +1038,7 @@ static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) osm_debug("unknown ioctl called!\n"); ret = -EINVAL; } - unlock_kernel(); + mutex_unlock(&i2o_cfg_mutex); return ret; } @@ -1051,7 +1052,7 @@ static int cfg_open(struct inode *inode, struct file *file) if (!tmp) return -ENOMEM; - lock_kernel(); + mutex_lock(&i2o_cfg_mutex); file->private_data = (void *)(i2o_cfg_info_id++); tmp->fp = file; tmp->fasync = NULL; @@ -1065,7 +1066,7 @@ static int cfg_open(struct inode *inode, struct file *file) spin_lock_irqsave(&i2o_config_lock, flags); open_files = tmp; spin_unlock_irqrestore(&i2o_config_lock, flags); - unlock_kernel(); + mutex_unlock(&i2o_cfg_mutex); return 0; } @@ -1076,14 +1077,14 @@ static int cfg_fasync(int fd, struct file *fp, int on) struct i2o_cfg_info *p; int ret = -EBADF; - lock_kernel(); + mutex_lock(&i2o_cfg_mutex); for (p = open_files; p; p = p->next) if (p->q_id == id) break; if (p) ret = fasync_helper(fd, fp, on, &p->fasync); - unlock_kernel(); + mutex_unlock(&i2o_cfg_mutex); return ret; } @@ -1093,7 +1094,7 @@ static int cfg_release(struct inode *inode, struct file *file) struct i2o_cfg_info *p, **q; unsigned long flags; - lock_kernel(); + mutex_lock(&i2o_cfg_mutex); spin_lock_irqsave(&i2o_config_lock, flags); for (q = &open_files; (p = *q) != NULL; q = &p->next) { if (p->q_id == id) { @@ -1103,7 +1104,7 @@ static int cfg_release(struct inode *inode, struct file *file) } } spin_unlock_irqrestore(&i2o_config_lock, flags); - unlock_kernel(); + mutex_unlock(&i2o_cfg_mutex); return 0; } diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index 66379b41390..b048ecc56db 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -583,6 +583,7 @@ static ssize_t ab3100_get_set_reg(struct file *file, static const struct file_operations ab3100_get_set_reg_fops = { .open = ab3100_get_set_reg_open_file, .write = ab3100_get_set_reg, + .llseek = noop_llseek, }; static struct dentry *ab3100_dir; diff --git a/drivers/mfd/ab8500-spi.c b/drivers/mfd/ab8500-spi.c index e1c8b62b086..01b6d584442 100644 --- a/drivers/mfd/ab8500-spi.c +++ b/drivers/mfd/ab8500-spi.c @@ -83,6 +83,11 @@ static int __devinit ab8500_spi_probe(struct spi_device *spi) struct ab8500 *ab8500; int ret; + spi->bits_per_word = 24; + ret = spi_setup(spi); + if (ret < 0) + return ret; + ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL); if (!ab8500) return -ENOMEM; diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index 04028a9ee08..428377a5a6f 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c @@ -429,24 +429,25 @@ static void max8925_irq_sync_unlock(unsigned int irq) irq_tsc = cache_tsc; for (i = 0; i < ARRAY_SIZE(max8925_irqs); i++) { irq_data = &max8925_irqs[i]; + /* 1 -- disable, 0 -- enable */ switch (irq_data->mask_reg) { case MAX8925_CHG_IRQ1_MASK: - irq_chg[0] &= irq_data->enable; + irq_chg[0] &= ~irq_data->enable; break; case MAX8925_CHG_IRQ2_MASK: - irq_chg[1] &= irq_data->enable; + irq_chg[1] &= ~irq_data->enable; break; case MAX8925_ON_OFF_IRQ1_MASK: - irq_on[0] &= irq_data->enable; + irq_on[0] &= ~irq_data->enable; break; case MAX8925_ON_OFF_IRQ2_MASK: - irq_on[1] &= irq_data->enable; + irq_on[1] &= ~irq_data->enable; break; case MAX8925_RTC_IRQ_MASK: - irq_rtc &= irq_data->enable; + irq_rtc &= ~irq_data->enable; break; case MAX8925_TSC_IRQ_MASK: - irq_tsc &= irq_data->enable; + irq_tsc &= ~irq_data->enable; break; default: dev_err(chip->dev, "wrong IRQ\n"); diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 097f24d8bce..b9fda7018ce 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -78,7 +78,7 @@ struct sih { u8 irq_lines; /* number of supported irq lines */ /* SIR ignored -- set interrupt, for testing only */ - struct irq_data { + struct sih_irq_data { u8 isr_offset; u8 imr_offset; } mask[2]; @@ -810,7 +810,7 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end) twl4030_irq_chip = dummy_irq_chip; twl4030_irq_chip.name = "twl4030"; - twl4030_sih_irq_chip.ack = dummy_irq_chip.ack; + twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack; for (i = irq_base; i < irq_end; i++) { set_irq_chip_and_handler(i, &twl4030_irq_chip, diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index 7dabe4dbd37..294183b6260 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c @@ -394,8 +394,13 @@ static int wm831x_irq_set_type(unsigned int irq, unsigned int type) irq = irq - wm831x->irq_base; - if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) - return -EINVAL; + if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) { + /* Ignore internal-only IRQs */ + if (irq >= 0 && irq < WM831X_NUM_IRQS) + return 0; + else + return -EINVAL; + } switch (type) { case IRQ_TYPE_EDGE_BOTH: diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 0b591b65824..b7433126074 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -368,7 +368,7 @@ config VMWARE_BALLOON If unsure, say N. To compile this driver as a module, choose M here: the - module will be called vmware_balloon. + module will be called vmw_balloon. config ARM_CHARLCD bool "ARM Ltd. Character LCD Driver" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 255a80dc9d7..42eab95cde2 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -33,5 +33,5 @@ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ obj-$(CONFIG_HMC6352) += hmc6352.o obj-y += eeprom/ obj-y += cb710/ -obj-$(CONFIG_VMWARE_BALLOON) += vmware_balloon.o +obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o diff --git a/drivers/misc/bh1780gli.c b/drivers/misc/bh1780gli.c index 714c6b48731..d5f3a3fd231 100644 --- a/drivers/misc/bh1780gli.c +++ b/drivers/misc/bh1780gli.c @@ -190,7 +190,6 @@ static int __devexit bh1780_remove(struct i2c_client *client) ddata = i2c_get_clientdata(client); sysfs_remove_group(&client->dev.kobj, &bh1780_attr_group); - i2c_set_clientdata(client, NULL); kfree(ddata); return 0; diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c index 557a8c2a733..69c1f2fca14 100644 --- a/drivers/misc/hpilo.c +++ b/drivers/misc/hpilo.c @@ -640,6 +640,7 @@ static const struct file_operations ilo_fops = { .poll = ilo_poll, .open = ilo_open, .release = ilo_close, + .llseek = noop_llseek, }; static irqreturn_t ilo_isr(int irq, void *data) diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index 8844a3f4538..af2497ae5fe 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -584,6 +584,7 @@ static const struct file_operations command_fops = { .release = command_file_close, .read = command_file_read, .write = command_file_write, + .llseek = generic_file_llseek, }; static const struct file_operations event_fops = { @@ -591,6 +592,7 @@ static const struct file_operations event_fops = { .release = event_file_close, .read = event_file_read, .write = event_file_write, + .llseek = generic_file_llseek, }; static const struct file_operations r_heartbeat_fops = { @@ -598,6 +600,7 @@ static const struct file_operations r_heartbeat_fops = { .release = r_heartbeat_file_close, .read = r_heartbeat_file_read, .write = r_heartbeat_file_write, + .llseek = generic_file_llseek, }; static const struct file_operations remote_settings_fops = { @@ -605,6 +608,7 @@ static const struct file_operations remote_settings_fops = { .release = remote_settings_file_close, .read = remote_settings_file_read, .write = remote_settings_file_write, + .llseek = generic_file_llseek, }; diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c index e9eda471f6e..62fbaec4820 100644 --- a/drivers/misc/iwmc3200top/debugfs.c +++ b/drivers/misc/iwmc3200top/debugfs.c @@ -71,6 +71,7 @@ ssize_t iwmct_dbgfs_##name##_write(struct file *file, \ static const struct file_operations iwmct_dbgfs_##name##_ops = { \ .read = iwmct_dbgfs_##name##_read, \ .open = iwmct_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ }; #define DEBUGFS_WRITE_FILE_OPS(name) \ @@ -78,6 +79,7 @@ ssize_t iwmct_dbgfs_##name##_write(struct file *file, \ static const struct file_operations iwmct_dbgfs_##name##_ops = { \ .write = iwmct_dbgfs_##name##_write, \ .open = iwmct_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ }; #define DEBUGFS_READ_WRITE_FILE_OPS(name) \ @@ -87,6 +89,7 @@ ssize_t iwmct_dbgfs_##name##_write(struct file *file, \ .write = iwmct_dbgfs_##name##_write, \ .read = iwmct_dbgfs_##name##_read, \ .open = iwmct_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ }; diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c index ef34de7a802..343b5d8ea69 100644 --- a/drivers/misc/lkdtm.c +++ b/drivers/misc/lkdtm.c @@ -575,30 +575,39 @@ struct crash_entry { static const struct crash_entry crash_entries[] = { {"DIRECT", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, .open = lkdtm_debugfs_open, .write = direct_entry} }, {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, .open = lkdtm_debugfs_open, .write = int_hardware_entry} }, {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, .open = lkdtm_debugfs_open, .write = int_hw_irq_en} }, {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, .open = lkdtm_debugfs_open, .write = int_tasklet_entry} }, {"FS_DEVRW", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, .open = lkdtm_debugfs_open, .write = fs_devrw_entry} }, {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, .open = lkdtm_debugfs_open, .write = mem_swapout_entry} }, {"TIMERADD", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, .open = lkdtm_debugfs_open, .write = timeradd_entry} }, {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, .open = lkdtm_debugfs_open, .write = scsi_dispatch_cmd_entry} }, {"IDE_CORE_CP", {.read = lkdtm_debugfs_read, + .llseek = generic_file_llseek, .open = lkdtm_debugfs_open, .write = ide_core_cp_entry} }, }; diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index 75ee0d3f6f4..4197a3cb26b 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c @@ -24,7 +24,7 @@ #include <linux/slab.h> #include <linux/phantom.h> #include <linux/sched.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/atomic.h> #include <asm/io.h> @@ -38,6 +38,7 @@ #define PHB_RUNNING 1 #define PHB_NOT_OH 2 +static DEFINE_MUTEX(phantom_mutex); static struct class *phantom_class; static int phantom_major; @@ -215,17 +216,17 @@ static int phantom_open(struct inode *inode, struct file *file) struct phantom_device *dev = container_of(inode->i_cdev, struct phantom_device, cdev); - lock_kernel(); + mutex_lock(&phantom_mutex); nonseekable_open(inode, file); if (mutex_lock_interruptible(&dev->open_lock)) { - unlock_kernel(); + mutex_unlock(&phantom_mutex); return -ERESTARTSYS; } if (dev->opened) { mutex_unlock(&dev->open_lock); - unlock_kernel(); + mutex_unlock(&phantom_mutex); return -EINVAL; } @@ -236,7 +237,7 @@ static int phantom_open(struct inode *inode, struct file *file) atomic_set(&dev->counter, 0); dev->opened++; mutex_unlock(&dev->open_lock); - unlock_kernel(); + mutex_unlock(&phantom_mutex); return 0; } @@ -279,6 +280,7 @@ static const struct file_operations phantom_file_ops = { .unlocked_ioctl = phantom_ioctl, .compat_ioctl = phantom_compat_ioctl, .poll = phantom_poll, + .llseek = no_llseek, }; static irqreturn_t phantom_isr(int irq, void *data) diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index cb3b4d22847..28852dfa310 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c @@ -587,6 +587,7 @@ static const struct file_operations gru_fops = { .owner = THIS_MODULE, .unlocked_ioctl = gru_file_unlocked_ioctl, .mmap = gru_file_mmap, + .llseek = noop_llseek, }; static struct miscdevice gru_miscdev = { diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmw_balloon.c index 2a1e804a71a..2a1e804a71a 100644 --- a/drivers/misc/vmware_balloon.c +++ b/drivers/misc/vmw_balloon.c diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index d545f79f600..00073b7c036 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -29,7 +29,6 @@ #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> @@ -51,6 +50,7 @@ MODULE_ALIAS("mmc:block"); #define MMC_SHIFT 3 #define MMC_NUM_MINORS (256 >> MMC_SHIFT) +static DEFINE_MUTEX(block_mutex); static DECLARE_BITMAP(dev_use, MMC_NUM_MINORS); /* @@ -108,7 +108,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode) struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); int ret = -ENXIO; - lock_kernel(); + mutex_lock(&block_mutex); if (md) { if (md->usage == 2) check_disk_change(bdev); @@ -119,7 +119,7 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode) ret = -EROFS; } } - unlock_kernel(); + mutex_unlock(&block_mutex); return ret; } @@ -128,9 +128,9 @@ static int mmc_blk_release(struct gendisk *disk, fmode_t mode) { struct mmc_blk_data *md = disk->private_data; - lock_kernel(); + mutex_lock(&block_mutex); mmc_blk_put(md); - unlock_kernel(); + mutex_unlock(&block_mutex); return 0; } diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index e876678176b..9c0b42bfe08 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -128,7 +128,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock mq->req = NULL; blk_queue_prep_rq(mq->queue, mmc_prep_request); - blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); if (mmc_can_erase(card)) { queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 5db49b124ff..09eee6df065 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1631,6 +1631,19 @@ int mmc_suspend_host(struct mmc_host *host) if (host->bus_ops && !host->bus_dead) { if (host->bus_ops->suspend) err = host->bus_ops->suspend(host); + if (err == -ENOSYS || !host->bus_ops->resume) { + /* + * We simply "remove" the card in this case. + * It will be redetected on resume. + */ + if (host->bus_ops->remove) + host->bus_ops->remove(host); + mmc_claim_host(host); + mmc_detach_bus(host); + mmc_release_host(host); + host->pm_flags = 0; + err = 0; + } } mmc_bus_put(host); diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 53cb380c098..46bc6d7551a 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -245,6 +245,7 @@ static const struct file_operations mmc_dbg_ext_csd_fops = { .open = mmc_ext_csd_open, .read = mmc_ext_csd_read, .release = mmc_ext_csd_release, + .llseek = default_llseek, }; void mmc_add_card_debugfs(struct mmc_card *card) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 840b301b567..f2e02d7d9f3 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -41,23 +41,35 @@ static unsigned int fmax = 515633; * @clkreg: default value for MCICLOCK register * @clkreg_enable: enable value for MMCICLOCK register * @datalength_bits: number of bits in the MMCIDATALENGTH register + * @fifosize: number of bytes that can be written when MMCI_TXFIFOEMPTY + * is asserted (likewise for RX) + * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY + * is asserted (likewise for RX) */ struct variant_data { unsigned int clkreg; unsigned int clkreg_enable; unsigned int datalength_bits; + unsigned int fifosize; + unsigned int fifohalfsize; }; static struct variant_data variant_arm = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, .datalength_bits = 16, }; static struct variant_data variant_u300 = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, .clkreg_enable = 1 << 13, /* HWFCEN */ .datalength_bits = 16, }; static struct variant_data variant_ux500 = { + .fifosize = 30 * 4, + .fifohalfsize = 8 * 4, .clkreg = MCI_CLK_ENABLE, .clkreg_enable = 1 << 14, /* HWFCEN */ .datalength_bits = 24, @@ -138,6 +150,7 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) { + struct variant_data *variant = host->variant; unsigned int datactrl, timeout, irqmask; unsigned long long clks; void __iomem *base; @@ -173,7 +186,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) * If we have less than a FIFOSIZE of bytes to transfer, * trigger a PIO interrupt as soon as any data is available. */ - if (host->size < MCI_FIFOSIZE) + if (host->size < variant->fifosize) irqmask |= MCI_RXDATAAVLBLMASK; } else { /* @@ -332,13 +345,15 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int remain, u32 status) { + struct variant_data *variant = host->variant; void __iomem *base = host->base; char *ptr = buffer; do { unsigned int count, maxcnt; - maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE : MCI_FIFOHALFSIZE; + maxcnt = status & MCI_TXFIFOEMPTY ? + variant->fifosize : variant->fifohalfsize; count = min(remain, maxcnt); writesl(base + MMCIFIFO, ptr, count >> 2); @@ -362,6 +377,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) { struct mmci_host *host = dev_id; struct sg_mapping_iter *sg_miter = &host->sg_miter; + struct variant_data *variant = host->variant; void __iomem *base = host->base; unsigned long flags; u32 status; @@ -420,7 +436,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) * If we're nearing the end of the read, switch to * "any data available" mode. */ - if (status & MCI_RXACTIVE && host->size < MCI_FIFOSIZE) + if (status & MCI_RXACTIVE && host->size < variant->fifosize) writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); /* @@ -564,18 +580,23 @@ static int mmci_get_ro(struct mmc_host *mmc) if (host->gpio_wp == -ENOSYS) return -ENOSYS; - return gpio_get_value(host->gpio_wp); + return gpio_get_value_cansleep(host->gpio_wp); } static int mmci_get_cd(struct mmc_host *mmc) { struct mmci_host *host = mmc_priv(mmc); + struct mmci_platform_data *plat = host->plat; unsigned int status; - if (host->gpio_cd == -ENOSYS) - status = host->plat->status(mmc_dev(host->mmc)); - else - status = !gpio_get_value(host->gpio_cd); + if (host->gpio_cd == -ENOSYS) { + if (!plat->status) + return 1; /* Assume always present */ + + status = plat->status(mmc_dev(host->mmc)); + } else + status = !!gpio_get_value_cansleep(host->gpio_cd) + ^ plat->cd_invert; /* * Use positive logic throughout - status is zero for no card, @@ -584,6 +605,15 @@ static int mmci_get_cd(struct mmc_host *mmc) return status; } +static irqreturn_t mmci_cd_irq(int irq, void *dev_id) +{ + struct mmci_host *host = dev_id; + + mmc_detect_change(host->mmc, msecs_to_jiffies(500)); + + return IRQ_HANDLED; +} + static const struct mmc_host_ops mmci_ops = { .request = mmci_request, .set_ios = mmci_set_ios, @@ -620,6 +650,7 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) host->gpio_wp = -ENOSYS; host->gpio_cd = -ENOSYS; + host->gpio_cd_irq = -1; host->hw_designer = amba_manf(dev); host->hw_revision = amba_rev(dev); @@ -699,7 +730,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) if (host->vcc == NULL) mmc->ocr_avail = plat->ocr_mask; mmc->caps = plat->capabilities; - mmc->caps |= MMC_CAP_NEEDS_POLL; /* * We can do SGIO @@ -744,6 +774,12 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) host->gpio_cd = plat->gpio_cd; else if (ret != -ENOSYS) goto err_gpio_cd; + + ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), + mmci_cd_irq, 0, + DRIVER_NAME " (cd)", host); + if (ret >= 0) + host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); } if (gpio_is_valid(plat->gpio_wp)) { ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); @@ -755,6 +791,10 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) goto err_gpio_wp; } + if ((host->plat->status || host->gpio_cd != -ENOSYS) + && host->gpio_cd_irq < 0) + mmc->caps |= MMC_CAP_NEEDS_POLL; + ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); if (ret) goto unmap; @@ -781,6 +821,8 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) if (host->gpio_wp != -ENOSYS) gpio_free(host->gpio_wp); err_gpio_wp: + if (host->gpio_cd_irq >= 0) + free_irq(host->gpio_cd_irq, host); if (host->gpio_cd != -ENOSYS) gpio_free(host->gpio_cd); err_gpio_cd: @@ -819,6 +861,8 @@ static int __devexit mmci_remove(struct amba_device *dev) if (host->gpio_wp != -ENOSYS) gpio_free(host->gpio_wp); + if (host->gpio_cd_irq >= 0) + free_irq(host->gpio_cd_irq, host); if (host->gpio_cd != -ENOSYS) gpio_free(host->gpio_cd); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 68970cfb81e..4ae887fc018 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -54,10 +54,16 @@ #define MCI_DPSM_MODE (1 << 2) #define MCI_DPSM_DMAENABLE (1 << 3) #define MCI_DPSM_BLOCKSIZE (1 << 4) -#define MCI_DPSM_RWSTART (1 << 8) -#define MCI_DPSM_RWSTOP (1 << 9) -#define MCI_DPSM_RWMOD (1 << 10) -#define MCI_DPSM_SDIOEN (1 << 11) +/* Control register extensions in the ST Micro U300 and Ux500 versions */ +#define MCI_ST_DPSM_RWSTART (1 << 8) +#define MCI_ST_DPSM_RWSTOP (1 << 9) +#define MCI_ST_DPSM_RWMOD (1 << 10) +#define MCI_ST_DPSM_SDIOEN (1 << 11) +/* Control register extensions in the ST Micro Ux500 versions */ +#define MCI_ST_DPSM_DMAREQCTL (1 << 12) +#define MCI_ST_DPSM_DBOOTMODEEN (1 << 13) +#define MCI_ST_DPSM_BUSYMODE (1 << 14) +#define MCI_ST_DPSM_DDRMODE (1 << 15) #define MMCIDATACNT 0x030 #define MMCISTATUS 0x034 @@ -133,13 +139,6 @@ MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \ MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) -/* - * The size of the FIFO in bytes. - */ -#define MCI_FIFOSIZE (16*4) - -#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2) - #define NR_SG 16 struct clk; @@ -154,6 +153,7 @@ struct mmci_host { struct clk *clk; int gpio_cd; int gpio_wp; + int gpio_cd_irq; unsigned int data_xfered; diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 71ad4163b95..aacb862ecc8 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -241,8 +241,10 @@ static struct sdhci_ops sdhci_s3c_ops = { static void sdhci_s3c_notify_change(struct platform_device *dev, int state) { struct sdhci_host *host = platform_get_drvdata(dev); + unsigned long flags; + if (host) { - spin_lock(&host->lock); + spin_lock_irqsave(&host->lock, flags); if (state) { dev_dbg(&dev->dev, "card inserted.\n"); host->flags &= ~SDHCI_DEVICE_DEAD; @@ -253,7 +255,7 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state) host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; } tasklet_schedule(&host->card_tasklet); - spin_unlock(&host->lock); + spin_unlock_irqrestore(&host->lock, flags); } } @@ -481,8 +483,10 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev) sdhci_remove_host(host, 1); for (ptr = 0; ptr < 3; ptr++) { - clk_disable(sc->clk_bus[ptr]); - clk_put(sc->clk_bus[ptr]); + if (sc->clk_bus[ptr]) { + clk_disable(sc->clk_bus[ptr]); + clk_put(sc->clk_bus[ptr]); + } } clk_disable(sc->clk_io); clk_put(sc->clk_io); diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index 7aa65bb2af4..f472c2714eb 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -30,7 +30,6 @@ #include <linux/ioport.h> #include <linux/scatterlist.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <linux/io.h> @@ -536,9 +535,7 @@ static int sdricoh_pcmcia_resume(struct pcmcia_device *link) #endif static struct pcmcia_driver sdricoh_driver = { - .drv = { - .name = DRIVER_NAME, - }, + .name = DRIVER_NAME, .probe = sdricoh_pcmcia_probe, .remove = sdricoh_pcmcia_detach, .id_table = pcmcia_ids, diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c index e9ca5ba7d9d..57a1acfe22c 100644 --- a/drivers/mtd/maps/pcmciamtd.c +++ b/drivers/mtd/maps/pcmciamtd.c @@ -16,7 +16,6 @@ #include <asm/io.h> #include <asm/system.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -101,7 +100,7 @@ MODULE_PARM_DESC(mem_type, "Set Memory type (0=Flash, 1=RAM, 2=ROM, default=0)") static caddr_t remap_window(struct map_info *map, unsigned long to) { struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; - window_handle_t win = (window_handle_t)map->map_priv_2; + struct resource *win = (struct resource *) map->map_priv_2; unsigned int offset; int ret; @@ -316,30 +315,19 @@ static void pcmciamtd_set_vpp(struct map_info *map, int on) { struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; struct pcmcia_device *link = dev->p_dev; - modconf_t mod; - int ret; - - mod.Attributes = CONF_VPP1_CHANGE_VALID | CONF_VPP2_CHANGE_VALID; - mod.Vcc = 0; - mod.Vpp1 = mod.Vpp2 = on ? dev->vpp : 0; DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp); - ret = pcmcia_modify_configuration(link, &mod); + pcmcia_fixup_vpp(link, on ? dev->vpp : 0); } -/* After a card is removed, pcmciamtd_release() will unregister the - * device, and release the PCMCIA configuration. If the device is - * still open, this will be postponed until it is closed. - */ - static void pcmciamtd_release(struct pcmcia_device *link) { struct pcmciamtd_dev *dev = link->priv; DEBUG(3, "link = 0x%p", link); - if (link->win) { + if (link->resource[2]->end) { if(dev->win_base) { iounmap(dev->win_base); dev->win_base = NULL; @@ -482,18 +470,12 @@ static void card_settings(struct pcmciamtd_dev *dev, struct pcmcia_device *p_dev } -/* pcmciamtd_config() is scheduled to run after a CARD_INSERTION event - * is received, to configure the PCMCIA socket, and to make the - * MTD device available to the system. - */ - static int pcmciamtd_config(struct pcmcia_device *link) { struct pcmciamtd_dev *dev = link->priv; struct mtd_info *mtd = NULL; - win_req_t req; int ret; - int i; + int i, j = 0; static char *probes[] = { "jedec_probe", "cfi_probe" }; int new_name = 0; @@ -520,28 +502,34 @@ static int pcmciamtd_config(struct pcmcia_device *link) * smaller windows until we succeed */ - req.Attributes = WIN_MEMORY_TYPE_CM | WIN_ENABLE; - req.Attributes |= (dev->pcmcia_map.bankwidth == 1) ? WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16; - req.Base = 0; - req.AccessSpeed = mem_speed; - link->win = (window_handle_t)link; - req.Size = (force_size) ? force_size << 20 : MAX_PCMCIA_ADDR; + link->resource[2]->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE; + link->resource[2]->flags |= (dev->pcmcia_map.bankwidth == 1) ? + WIN_DATA_WIDTH_8 : WIN_DATA_WIDTH_16; + link->resource[2]->start = 0; + link->resource[2]->end = (force_size) ? force_size << 20 : + MAX_PCMCIA_ADDR; dev->win_size = 0; do { int ret; - DEBUG(2, "requesting window with size = %dKiB memspeed = %d", - req.Size >> 10, req.AccessSpeed); - ret = pcmcia_request_window(link, &req, &link->win); + DEBUG(2, "requesting window with size = %luKiB memspeed = %d", + (unsigned long) resource_size(link->resource[2]) >> 10, + mem_speed); + ret = pcmcia_request_window(link, link->resource[2], mem_speed); DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size); if(ret) { - req.Size >>= 1; + j++; + link->resource[2]->start = 0; + link->resource[2]->end = (force_size) ? + force_size << 20 : MAX_PCMCIA_ADDR; + link->resource[2]->end >>= j; } else { - DEBUG(2, "Got window of size %dKiB", req.Size >> 10); - dev->win_size = req.Size; + DEBUG(2, "Got window of size %luKiB", (unsigned long) + resource_size(link->resource[2]) >> 10); + dev->win_size = resource_size(link->resource[2]); break; } - } while(req.Size >= 0x1000); + } while (link->resource[2]->end >= 0x1000); DEBUG(2, "dev->win_size = %d", dev->win_size); @@ -553,33 +541,31 @@ static int pcmciamtd_config(struct pcmcia_device *link) DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10); /* Get write protect status */ - DEBUG(2, "window handle = 0x%8.8lx", (unsigned long)link->win); - dev->win_base = ioremap(req.Base, req.Size); + dev->win_base = ioremap(link->resource[2]->start, + resource_size(link->resource[2])); if(!dev->win_base) { - dev_err(&dev->p_dev->dev, "ioremap(%lu, %u) failed\n", - req.Base, req.Size); + dev_err(&dev->p_dev->dev, "ioremap(%pR) failed\n", + link->resource[2]); pcmciamtd_release(link); return -ENODEV; } - DEBUG(1, "mapped window dev = %p req.base = 0x%lx base = %p size = 0x%x", - dev, req.Base, dev->win_base, req.Size); + DEBUG(1, "mapped window dev = %p @ %pR, base = %p", + dev, link->resource[2], dev->win_base); dev->offset = 0; dev->pcmcia_map.map_priv_1 = (unsigned long)dev; - dev->pcmcia_map.map_priv_2 = (unsigned long)link->win; + dev->pcmcia_map.map_priv_2 = (unsigned long)link->resource[2]; dev->vpp = (vpp) ? vpp : link->socket->socket.Vpp; - link->conf.Attributes = 0; if(setvpp == 2) { - link->conf.Vpp = dev->vpp; + link->vpp = dev->vpp; } else { - link->conf.Vpp = 0; + link->vpp = 0; } - link->conf.IntType = INT_MEMORY; - link->conf.ConfigIndex = 0; + link->config_index = 0; DEBUG(2, "Setting Configuration"); - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret != 0) { if (dev->win_base) { iounmap(dev->win_base); @@ -680,12 +666,6 @@ static int pcmciamtd_resume(struct pcmcia_device *dev) } -/* This deletes a driver "instance". The device is de-registered - * with Card Services. If it has been released, all local data - * structures are freed. Otherwise, the structures will be freed - * when the device is released. - */ - static void pcmciamtd_detach(struct pcmcia_device *link) { struct pcmciamtd_dev *dev = link->priv; @@ -703,11 +683,6 @@ static void pcmciamtd_detach(struct pcmcia_device *link) } -/* pcmciamtd_attach() creates an "instance" of the driver, allocating - * local data structures for one device. The device is registered - * with Card Services. - */ - static int pcmciamtd_probe(struct pcmcia_device *link) { struct pcmciamtd_dev *dev; @@ -720,9 +695,6 @@ static int pcmciamtd_probe(struct pcmcia_device *link) dev->p_dev = link; link->priv = dev; - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY; - return pcmciamtd_config(link); } @@ -757,9 +729,7 @@ static struct pcmcia_device_id pcmciamtd_ids[] = { MODULE_DEVICE_TABLE(pcmcia, pcmciamtd_ids); static struct pcmcia_driver pcmciamtd_driver = { - .drv = { - .name = "pcmciamtd" - }, + .name = "pcmciamtd", .probe = pcmciamtd_probe, .remove = pcmciamtd_detach, .owner = THIS_MODULE, @@ -771,8 +741,6 @@ static struct pcmcia_driver pcmciamtd_driver = { static int __init init_pcmciamtd(void) { - info(DRIVER_DESC); - if(bankwidth && bankwidth != 1 && bankwidth != 2) { info("bad bankwidth (%d), using default", bankwidth); bankwidth = 2; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 62e68707b07..50ab431b24e 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -29,7 +29,6 @@ #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/spinlock.h> -#include <linux/smp_lock.h> #include <linux/hdreg.h> #include <linux/init.h> #include <linux/mutex.h> @@ -38,6 +37,7 @@ #include "mtdcore.h" +static DEFINE_MUTEX(mtd_blkdevs_mutex); static LIST_HEAD(blktrans_majors); static DEFINE_MUTEX(blktrans_ref_mutex); @@ -181,7 +181,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) if (!dev) return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/ - lock_kernel(); + mutex_lock(&mtd_blkdevs_mutex); mutex_lock(&dev->lock); if (!dev->mtd) { @@ -198,7 +198,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode) unlock: mutex_unlock(&dev->lock); blktrans_dev_put(dev); - unlock_kernel(); + mutex_unlock(&mtd_blkdevs_mutex); return ret; } @@ -210,7 +210,7 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode) if (!dev) return ret; - lock_kernel(); + mutex_lock(&mtd_blkdevs_mutex); mutex_lock(&dev->lock); /* Release one reference, we sure its not the last one here*/ @@ -223,7 +223,7 @@ static int blktrans_release(struct gendisk *disk, fmode_t mode) unlock: mutex_unlock(&dev->lock); blktrans_dev_put(dev); - unlock_kernel(); + mutex_unlock(&mtd_blkdevs_mutex); return ret; } @@ -256,7 +256,7 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, if (!dev) return ret; - lock_kernel(); + mutex_lock(&mtd_blkdevs_mutex); mutex_lock(&dev->lock); if (!dev->mtd) @@ -271,7 +271,7 @@ static int blktrans_ioctl(struct block_device *bdev, fmode_t mode, } unlock: mutex_unlock(&dev->lock); - unlock_kernel(); + mutex_unlock(&mtd_blkdevs_mutex); blktrans_dev_put(dev); return ret; } diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index a825002123c..5ef45487b65 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -26,7 +26,7 @@ #include <linux/module.h> #include <linux/slab.h> #include <linux/sched.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/backing-dev.h> #include <linux/compat.h> #include <linux/mount.h> @@ -37,6 +37,7 @@ #include <asm/uaccess.h> #define MTD_INODE_FS_MAGIC 0x11307854 +static DEFINE_MUTEX(mtd_mutex); static struct vfsmount *mtd_inode_mnt __read_mostly; /* @@ -90,7 +91,7 @@ static int mtd_open(struct inode *inode, struct file *file) if ((file->f_mode & FMODE_WRITE) && (minor & 1)) return -EACCES; - lock_kernel(); + mutex_lock(&mtd_mutex); mtd = get_mtd_device(NULL, devnum); if (IS_ERR(mtd)) { @@ -138,7 +139,7 @@ static int mtd_open(struct inode *inode, struct file *file) file->private_data = mfi; out: - unlock_kernel(); + mutex_unlock(&mtd_mutex); return ret; } /* mtd_open */ @@ -866,9 +867,9 @@ static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg) { int ret; - lock_kernel(); + mutex_lock(&mtd_mutex); ret = mtd_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&mtd_mutex); return ret; } @@ -892,7 +893,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd, void __user *argp = compat_ptr(arg); int ret = 0; - lock_kernel(); + mutex_lock(&mtd_mutex); switch (cmd) { case MEMWRITEOOB32: @@ -927,7 +928,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd, ret = mtd_ioctl(file, cmd, (unsigned long)argp); } - unlock_kernel(); + mutex_unlock(&mtd_mutex); return ret; } diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index b2828e84d24..214b03afdd4 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -30,6 +30,8 @@ #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> +#include <linux/irq.h> +#include <linux/completion.h> #include <asm/mach/flash.h> #include <mach/mxc_nand.h> @@ -151,7 +153,7 @@ struct mxc_nand_host { int irq; int eccsize; - wait_queue_head_t irq_waitq; + struct completion op_completion; uint8_t *data_buf; unsigned int buf_start; @@ -164,6 +166,7 @@ struct mxc_nand_host { void (*send_read_id)(struct mxc_nand_host *); uint16_t (*get_dev_status)(struct mxc_nand_host *); int (*check_int)(struct mxc_nand_host *); + void (*irq_control)(struct mxc_nand_host *, int); }; /* OOB placement block for use with hardware ecc generation */ @@ -216,9 +219,12 @@ static irqreturn_t mxc_nfc_irq(int irq, void *dev_id) { struct mxc_nand_host *host = dev_id; - disable_irq_nosync(irq); + if (!host->check_int(host)) + return IRQ_NONE; - wake_up(&host->irq_waitq); + host->irq_control(host, 0); + + complete(&host->op_completion); return IRQ_HANDLED; } @@ -245,11 +251,54 @@ static int check_int_v1_v2(struct mxc_nand_host *host) if (!(tmp & NFC_V1_V2_CONFIG2_INT)) return 0; - writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2); + if (!cpu_is_mx21()) + writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2); return 1; } +/* + * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit + * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the + * driver can enable/disable the irq line rather than simply masking the + * interrupts. + */ +static void irq_control_mx21(struct mxc_nand_host *host, int activate) +{ + if (activate) + enable_irq(host->irq); + else + disable_irq_nosync(host->irq); +} + +static void irq_control_v1_v2(struct mxc_nand_host *host, int activate) +{ + uint16_t tmp; + + tmp = readw(NFC_V1_V2_CONFIG1); + + if (activate) + tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK; + else + tmp |= NFC_V1_V2_CONFIG1_INT_MSK; + + writew(tmp, NFC_V1_V2_CONFIG1); +} + +static void irq_control_v3(struct mxc_nand_host *host, int activate) +{ + uint32_t tmp; + + tmp = readl(NFC_V3_CONFIG2); + + if (activate) + tmp &= ~NFC_V3_CONFIG2_INT_MSK; + else + tmp |= NFC_V3_CONFIG2_INT_MSK; + + writel(tmp, NFC_V3_CONFIG2); +} + /* This function polls the NANDFC to wait for the basic operation to * complete by checking the INT bit of config2 register. */ @@ -259,10 +308,9 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq) if (useirq) { if (!host->check_int(host)) { - - enable_irq(host->irq); - - wait_event(host->irq_waitq, host->check_int(host)); + INIT_COMPLETION(host->op_completion); + host->irq_control(host, 1); + wait_for_completion(&host->op_completion); } } else { while (max_retries-- > 0) { @@ -799,6 +847,7 @@ static void preset_v3(struct mtd_info *mtd) NFC_V3_CONFIG2_2CMD_PHASES | NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) | NFC_V3_CONFIG2_ST_CMD(0x70) | + NFC_V3_CONFIG2_INT_MSK | NFC_V3_CONFIG2_NUM_ADDR_PHASE0; if (chip->ecc.mode == NAND_ECC_HW) @@ -1024,6 +1073,10 @@ static int __init mxcnd_probe(struct platform_device *pdev) host->send_read_id = send_read_id_v1_v2; host->get_dev_status = get_dev_status_v1_v2; host->check_int = check_int_v1_v2; + if (cpu_is_mx21()) + host->irq_control = irq_control_mx21; + else + host->irq_control = irq_control_v1_v2; } if (nfc_is_v21()) { @@ -1062,6 +1115,7 @@ static int __init mxcnd_probe(struct platform_device *pdev) host->send_read_id = send_read_id_v3; host->check_int = check_int_v3; host->get_dev_status = get_dev_status_v3; + host->irq_control = irq_control_v3; oob_smallpage = &nandv2_hw_eccoob_smallpage; oob_largepage = &nandv2_hw_eccoob_largepage; } else @@ -1093,14 +1147,34 @@ static int __init mxcnd_probe(struct platform_device *pdev) this->options |= NAND_USE_FLASH_BBT; } - init_waitqueue_head(&host->irq_waitq); + init_completion(&host->op_completion); host->irq = platform_get_irq(pdev, 0); + /* + * mask the interrupt. For i.MX21 explicitely call + * irq_control_v1_v2 to use the mask bit. We can't call + * disable_irq_nosync() for an interrupt we do not own yet. + */ + if (cpu_is_mx21()) + irq_control_v1_v2(host, 0); + else + host->irq_control(host, 0); + err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); if (err) goto eirq; + host->irq_control(host, 0); + + /* + * Now that the interrupt is disabled make sure the interrupt + * mask bit is cleared on i.MX21. Otherwise we can't read + * the interrupt status bit on this machine. + */ + if (cpu_is_mx21()) + irq_control_v1_v2(host, 1); + /* first scan to find the device and get the page size */ if (nand_scan_ident(mtd, 1, NULL)) { err = -ENXIO; diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 133d51528f8..513e0a76a4a 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -413,7 +413,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, prefetch_status = gpmc_read_status(GPMC_PREFETCH_COUNT); } while (prefetch_status); /* disable and stop the PFPW engine */ - gpmc_prefetch_reset(); + gpmc_prefetch_reset(info->gpmc_cs); dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); return 0; diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig index f702a163d8d..3cf193fb5e0 100644 --- a/drivers/mtd/ubi/Kconfig +++ b/drivers/mtd/ubi/Kconfig @@ -1,9 +1,5 @@ -menu "UBI - Unsorted block images" - depends on MTD - -config MTD_UBI - tristate "Enable UBI" - depends on MTD +menuconfig MTD_UBI + tristate "Enable UBI - Unsorted block images" select CRC32 help UBI is a software layer above MTD layer which admits of LVM-like @@ -12,11 +8,12 @@ config MTD_UBI capabilities. Please, consult the MTD web site for more details (www.linux-mtd.infradead.org). +if MTD_UBI + config MTD_UBI_WL_THRESHOLD int "UBI wear-leveling threshold" default 4096 range 2 65536 - depends on MTD_UBI help This parameter defines the maximum difference between the highest erase counter value and the lowest erase counter value of eraseblocks @@ -34,7 +31,6 @@ config MTD_UBI_BEB_RESERVE int "Percentage of reserved eraseblocks for bad eraseblocks handling" default 1 range 0 25 - depends on MTD_UBI help If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI reserves some amount of physical eraseblocks to handle new bad @@ -48,8 +44,6 @@ config MTD_UBI_BEB_RESERVE config MTD_UBI_GLUEBI tristate "MTD devices emulation driver (gluebi)" - default n - depends on MTD_UBI help This option enables gluebi - an additional driver which emulates MTD devices on top of UBI volumes: for each UBI volumes an MTD device is @@ -59,4 +53,5 @@ config MTD_UBI_GLUEBI software. source "drivers/mtd/ubi/Kconfig.debug" -endmenu + +endif # MTD_UBI diff --git a/drivers/mtd/ubi/Kconfig.debug b/drivers/mtd/ubi/Kconfig.debug index 61f6e5e4045..fad4adc0fe2 100644 --- a/drivers/mtd/ubi/Kconfig.debug +++ b/drivers/mtd/ubi/Kconfig.debug @@ -1,94 +1,73 @@ comment "UBI debugging options" - depends on MTD_UBI config MTD_UBI_DEBUG bool "UBI debugging" depends on SYSFS - depends on MTD_UBI select DEBUG_FS select KALLSYMS_ALL if KALLSYMS && DEBUG_KERNEL help This option enables UBI debugging. +if MTD_UBI_DEBUG + config MTD_UBI_DEBUG_MSG bool "UBI debugging messages" - depends on MTD_UBI_DEBUG - default n help This option enables UBI debugging messages. config MTD_UBI_DEBUG_PARANOID bool "Extra self-checks" - default n - depends on MTD_UBI_DEBUG help This option enables extra checks in UBI code. Note this slows UBI down significantly. config MTD_UBI_DEBUG_DISABLE_BGT bool "Do not enable the UBI background thread" - depends on MTD_UBI_DEBUG - default n help This option switches the background thread off by default. The thread may be also be enabled/disabled via UBI sysfs. config MTD_UBI_DEBUG_EMULATE_BITFLIPS bool "Emulate flash bit-flips" - depends on MTD_UBI_DEBUG - default n help This option emulates bit-flips with probability 1/50, which in turn causes scrubbing. Useful for debugging and stressing UBI. config MTD_UBI_DEBUG_EMULATE_WRITE_FAILURES bool "Emulate flash write failures" - depends on MTD_UBI_DEBUG - default n help This option emulates write failures with probability 1/100. Useful for debugging and testing how UBI handlines errors. config MTD_UBI_DEBUG_EMULATE_ERASE_FAILURES bool "Emulate flash erase failures" - depends on MTD_UBI_DEBUG - default n help This option emulates erase failures with probability 1/100. Useful for debugging and testing how UBI handlines errors. -menu "Additional UBI debugging messages" - depends on MTD_UBI_DEBUG +comment "Additional UBI debugging messages" config MTD_UBI_DEBUG_MSG_BLD bool "Additional UBI initialization and build messages" - default n - depends on MTD_UBI_DEBUG help This option enables detailed UBI initialization and device build debugging messages. config MTD_UBI_DEBUG_MSG_EBA bool "Eraseblock association unit messages" - default n - depends on MTD_UBI_DEBUG help This option enables debugging messages from the UBI eraseblock association unit. config MTD_UBI_DEBUG_MSG_WL bool "Wear-leveling unit messages" - default n - depends on MTD_UBI_DEBUG help This option enables debugging messages from the UBI wear-leveling unit. config MTD_UBI_DEBUG_MSG_IO bool "Input/output unit messages" - default n - depends on MTD_UBI_DEBUG help This option enables debugging messages from the UBI input/output unit. -endmenu # UBI debugging messages +endif # MTD_UBI_DEBUG diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 78ae89488a4..5ebe280225d 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -95,8 +95,8 @@ DEFINE_MUTEX(ubi_devices_mutex); static DEFINE_SPINLOCK(ubi_devices_lock); /* "Show" method for files in '/<sysfs>/class/ubi/' */ -static ssize_t ubi_version_show(struct class *class, struct class_attribute *attr, - char *buf) +static ssize_t ubi_version_show(struct class *class, + struct class_attribute *attr, char *buf) { return sprintf(buf, "%d\n", UBI_VERSION); } @@ -591,6 +591,7 @@ static int attach_by_scanning(struct ubi_device *ubi) ubi->bad_peb_count = si->bad_peb_count; ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; + ubi->corr_peb_count = si->corr_peb_count; ubi->max_ec = si->max_ec; ubi->mean_ec = si->mean_ec; ubi_msg("max. sequence number: %llu", si->max_sqnum); @@ -972,6 +973,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); ubi_msg("number of good PEBs: %d", ubi->good_peb_count); ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); + ubi_msg("number of corrupted PEBs: %d", ubi->corr_peb_count); ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index 3d2d1a69e9a..af9fb0ff821 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c @@ -1100,4 +1100,5 @@ const struct file_operations ubi_ctrl_cdev_operations = { .owner = THIS_MODULE, .unlocked_ioctl = ctrl_cdev_ioctl, .compat_ioctl = ctrl_cdev_compat_ioctl, + .llseek = noop_llseek, }; diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h index 17a10712972..9eca95074bc 100644 --- a/drivers/mtd/ubi/debug.h +++ b/drivers/mtd/ubi/debug.h @@ -57,6 +57,9 @@ void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len); +#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) \ + print_hex_dump(l, ps, pt, r, g, b, len, a) + #ifdef CONFIG_MTD_UBI_DEBUG_MSG /* General debugging messages */ #define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) @@ -172,6 +175,7 @@ static inline int ubi_dbg_is_erase_failure(void) #define ubi_dbg_dump_seb(seb, type) ({}) #define ubi_dbg_dump_mkvol_req(req) ({}) #define ubi_dbg_dump_flash(ubi, pnum, offset, len) ({}) +#define ubi_dbg_print_hex_dump(l, ps, pt, r, g, b, len, a) ({}) #define UBI_IO_DEBUG 0 #define DBG_DISABLE_BGT 0 diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index fe74749e0da..4be67181501 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -418,7 +418,7 @@ retry: * may try to recover data. FIXME: but this is * not implemented. */ - if (err == UBI_IO_BAD_HDR_READ || + if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_BAD_HDR) { ubi_warn("corrupted VID header at PEB " "%d, LEB %d:%d", pnum, vol_id, @@ -963,7 +963,7 @@ write_error: static int is_error_sane(int err) { if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR || - err == UBI_IO_BAD_HDR_READ || err == -ETIMEDOUT) + err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT) return 0; return 1; } @@ -1201,6 +1201,9 @@ static void print_rsvd_warning(struct ubi_device *ubi, ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d," " need %d", ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); + if (ubi->corr_peb_count) + ubi_warn("%d PEBs are corrupted and not used", + ubi->corr_peb_count); } /** @@ -1263,6 +1266,9 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) if (ubi->avail_pebs < EBA_RESERVED_PEBS) { ubi_err("no enough physical eraseblocks (%d, need %d)", ubi->avail_pebs, EBA_RESERVED_PEBS); + if (ubi->corr_peb_count) + ubi_err("%d PEBs are corrupted and not used", + ubi->corr_peb_count); err = -ENOSPC; goto out_free; } diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index 332f992f13d..c2960ac9f39 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -376,25 +376,6 @@ retry: return 0; } -/** - * check_pattern - check if buffer contains only a certain byte pattern. - * @buf: buffer to check - * @patt: the pattern to check - * @size: buffer size in bytes - * - * This function returns %1 in there are only @patt bytes in @buf, and %0 if - * something else was also found. - */ -static int check_pattern(const void *buf, uint8_t patt, int size) -{ - int i; - - for (i = 0; i < size; i++) - if (((const uint8_t *)buf)[i] != patt) - return 0; - return 1; -} - /* Patterns to write to a physical eraseblock when torturing it */ static uint8_t patterns[] = {0xa5, 0x5a, 0x0}; @@ -426,7 +407,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum) if (err) goto out; - err = check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size); + err = ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size); if (err == 0) { ubi_err("erased PEB %d, but a non-0xFF byte found", pnum); @@ -445,7 +426,8 @@ static int torture_peb(struct ubi_device *ubi, int pnum) if (err) goto out; - err = check_pattern(ubi->peb_buf1, patterns[i], ubi->peb_size); + err = ubi_check_pattern(ubi->peb_buf1, patterns[i], + ubi->peb_size); if (err == 0) { ubi_err("pattern %x checking failed for PEB %d", patterns[i], pnum); @@ -517,7 +499,7 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum) * In this case we probably anyway have garbage in this PEB. */ err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); - if (err1 == UBI_IO_BAD_HDR_READ || err1 == UBI_IO_BAD_HDR) + if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) /* * The VID header is corrupted, so we can safely erase this * PEB and not afraid that it will be treated as a valid PEB in @@ -712,47 +694,47 @@ bad: * and corrected by the flash driver; this is harmless but may indicate that * this eraseblock may become bad soon (but may be not); * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error); - * o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty; + * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was + * a data integrity error (uncorrectable ECC error in case of NAND); + * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty) * o a negative error code in case of failure. */ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, struct ubi_ec_hdr *ec_hdr, int verbose) { - int err, read_err = 0; + int err, read_err; uint32_t crc, magic, hdr_crc; dbg_io("read EC header from PEB %d", pnum); ubi_assert(pnum >= 0 && pnum < ubi->peb_count); - err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); - if (err) { - if (err != UBI_IO_BITFLIPS && err != -EBADMSG) - return err; + read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); + if (read_err) { + if (read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG) + return read_err; /* * We read all the data, but either a correctable bit-flip - * occurred, or MTD reported about some data integrity error, - * like an ECC error in case of NAND. The former is harmless, - * the later may mean that the read data is corrupted. But we - * have a CRC check-sum and we will detect this. If the EC - * header is still OK, we just report this as there was a - * bit-flip. + * occurred, or MTD reported a data integrity error + * (uncorrectable ECC error in case of NAND). The former is + * harmless, the later may mean that the read data is + * corrupted. But we have a CRC check-sum and we will detect + * this. If the EC header is still OK, we just report this as + * there was a bit-flip, to force scrubbing. */ - if (err == -EBADMSG) - read_err = UBI_IO_BAD_HDR_READ; } magic = be32_to_cpu(ec_hdr->magic); if (magic != UBI_EC_HDR_MAGIC) { - if (read_err) - return read_err; + if (read_err == -EBADMSG) + return UBI_IO_BAD_HDR_EBADMSG; /* * The magic field is wrong. Let's check if we have read all * 0xFF. If yes, this physical eraseblock is assumed to be * empty. */ - if (check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { + if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { /* The physical eraseblock is supposedly empty */ if (verbose) ubi_warn("no EC header found at PEB %d, " @@ -760,7 +742,10 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, else if (UBI_IO_DEBUG) dbg_msg("no EC header found at PEB %d, " "only 0xFF bytes", pnum); - return UBI_IO_PEB_EMPTY; + if (!read_err) + return UBI_IO_FF; + else + return UBI_IO_FF_BITFLIPS; } /* @@ -788,7 +773,11 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, } else if (UBI_IO_DEBUG) dbg_msg("bad EC header CRC at PEB %d, calculated " "%#08x, read %#08x", pnum, crc, hdr_crc); - return read_err ?: UBI_IO_BAD_HDR; + + if (!read_err) + return UBI_IO_BAD_HDR; + else + return UBI_IO_BAD_HDR_EBADMSG; } /* And of course validate what has just been read from the media */ @@ -975,22 +964,16 @@ bad: * * This function reads the volume identifier header from physical eraseblock * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read - * volume identifier header. The following codes may be returned: + * volume identifier header. The error codes are the same as in + * 'ubi_io_read_ec_hdr()'. * - * o %0 if the CRC checksum is correct and the header was successfully read; - * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected - * and corrected by the flash driver; this is harmless but may indicate that - * this eraseblock may become bad soon; - * o %UBI_IO_BAD_HDR if the volume identifier header is corrupted (a CRC - * error detected); - * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID - * header there); - * o a negative error code in case of failure. + * Note, the implementation of this function is also very similar to + * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'. */ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, struct ubi_vid_hdr *vid_hdr, int verbose) { - int err, read_err = 0; + int err, read_err; uint32_t crc, magic, hdr_crc; void *p; @@ -998,48 +981,29 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ubi_assert(pnum >= 0 && pnum < ubi->peb_count); p = (char *)vid_hdr - ubi->vid_hdr_shift; - err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, + read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, ubi->vid_hdr_alsize); - if (err) { - if (err != UBI_IO_BITFLIPS && err != -EBADMSG) - return err; - - /* - * We read all the data, but either a correctable bit-flip - * occurred, or MTD reported about some data integrity error, - * like an ECC error in case of NAND. The former is harmless, - * the later may mean the read data is corrupted. But we have a - * CRC check-sum and we will identify this. If the VID header is - * still OK, we just report this as there was a bit-flip. - */ - if (err == -EBADMSG) - read_err = UBI_IO_BAD_HDR_READ; - } + if (read_err && read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG) + return read_err; magic = be32_to_cpu(vid_hdr->magic); if (magic != UBI_VID_HDR_MAGIC) { - if (read_err) - return read_err; + if (read_err == -EBADMSG) + return UBI_IO_BAD_HDR_EBADMSG; - /* - * If we have read all 0xFF bytes, the VID header probably does - * not exist and the physical eraseblock is assumed to be free. - */ - if (check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { - /* The physical eraseblock is supposedly free */ + if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { if (verbose) ubi_warn("no VID header found at PEB %d, " "only 0xFF bytes", pnum); else if (UBI_IO_DEBUG) dbg_msg("no VID header found at PEB %d, " "only 0xFF bytes", pnum); - return UBI_IO_PEB_FREE; + if (!read_err) + return UBI_IO_FF; + else + return UBI_IO_FF_BITFLIPS; } - /* - * This is not a valid VID header, and these are not 0xFF - * bytes. Report that the header is corrupted. - */ if (verbose) { ubi_warn("bad magic number at PEB %d: %08x instead of " "%08x", pnum, magic, UBI_VID_HDR_MAGIC); @@ -1061,20 +1025,18 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, } else if (UBI_IO_DEBUG) dbg_msg("bad CRC at PEB %d, calculated %#08x, " "read %#08x", pnum, crc, hdr_crc); - return read_err ?: UBI_IO_BAD_HDR; + if (!read_err) + return UBI_IO_BAD_HDR; + else + return UBI_IO_BAD_HDR_EBADMSG; } - /* Validate the VID header that we have just read */ err = validate_vid_hdr(ubi, vid_hdr); if (err) { ubi_err("validation failed for PEB %d", pnum); return -EINVAL; } - /* - * If there was a read error (%-EBADMSG), but the header CRC is still - * OK, report about a bit-flip to force scrubbing on this PEB. - */ return read_err ? UBI_IO_BITFLIPS : 0; } @@ -1383,7 +1345,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len) goto error; } - err = check_pattern(ubi->dbg_peb_buf, 0xFF, len); + err = ubi_check_pattern(ubi->dbg_peb_buf, 0xFF, len); if (err == 0) { ubi_err("flash region at PEB %d:%d, length %d does not " "contain all 0xFF bytes", pnum, offset, len); diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c index 22ad3140294..ff2a65c37f6 100644 --- a/drivers/mtd/ubi/misc.c +++ b/drivers/mtd/ubi/misc.c @@ -103,3 +103,22 @@ void ubi_calculate_reserved(struct ubi_device *ubi) if (ubi->beb_rsvd_level < MIN_RESEVED_PEBS) ubi->beb_rsvd_level = MIN_RESEVED_PEBS; } + +/** + * ubi_check_pattern - check if buffer contains only a certain byte pattern. + * @buf: buffer to check + * @patt: the pattern to check + * @size: buffer size in bytes + * + * This function returns %1 in there are only @patt bytes in @buf, and %0 if + * something else was also found. + */ +int ubi_check_pattern(const void *buf, uint8_t patt, int size) +{ + int i; + + for (i = 0; i < size; i++) + if (((const uint8_t *)buf)[i] != patt) + return 0; + return 1; +} diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 69b52e9c948..3c631863bf4 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -29,7 +29,7 @@ * objects which are kept in volume RB-tree with root at the @volumes field. * The RB-tree is indexed by the volume ID. * - * Found logical eraseblocks are represented by &struct ubi_scan_leb objects. + * Scanned logical eraseblocks are represented by &struct ubi_scan_leb objects. * These objects are kept in per-volume RB-trees with the root at the * corresponding &struct ubi_scan_volume object. To put it differently, we keep * an RB-tree of per-volume objects and each of these objects is the root of @@ -38,6 +38,33 @@ * Corrupted physical eraseblocks are put to the @corr list, free physical * eraseblocks are put to the @free list and the physical eraseblock to be * erased are put to the @erase list. + * + * UBI tries to distinguish between 2 types of corruptions. + * 1. Corruptions caused by power cuts. These are harmless and expected + * corruptions and UBI tries to handle them gracefully, without printing too + * many warnings and error messages. The idea is that we do not lose + * important data in these case - we may lose only the data which was being + * written to the media just before the power cut happened, and the upper + * layers (e.g., UBIFS) are supposed to handle these situations. UBI puts + * these PEBs to the head of the @erase list and they are scheduled for + * erasure. + * + * 2. Unexpected corruptions which are not caused by power cuts. During + * scanning, such PEBs are put to the @corr list and UBI preserves them. + * Obviously, this lessens the amount of available PEBs, and if at some + * point UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly + * informs about such PEBs every time the MTD device is attached. + * + * However, it is difficult to reliably distinguish between these types of + * corruptions and UBI's strategy is as follows. UBI assumes (2.) if the VID + * header is corrupted and the data area does not contain all 0xFFs, and there + * were not bit-flips or integrity errors while reading the data area. Otherwise + * UBI assumes (1.). The assumptions are: + * o if the data area contains only 0xFFs, there is no data, and it is safe + * to just erase this PEB. + * o if the data area has bit-flips and data integrity errors (ECC errors on + * NAND), it is probably a PEB which was being erased when power cut + * happened. */ #include <linux/err.h> @@ -62,26 +89,26 @@ static struct ubi_vid_hdr *vidh; * @si: scanning information * @pnum: physical eraseblock number to add * @ec: erase counter of the physical eraseblock + * @to_head: if not zero, add to the head of the list * @list: the list to add to * - * This function adds physical eraseblock @pnum to free, erase, corrupted or - * alien lists. Returns zero in case of success and a negative error code in - * case of failure. + * This function adds physical eraseblock @pnum to free, erase, or alien lists. + * If @to_head is not zero, PEB will be added to the head of the list, which + * basically means it will be processed first later. E.g., we add corrupted + * PEBs (corrupted due to power cuts) to the head of the erase list to make + * sure we erase them first and get rid of corruptions ASAP. This function + * returns zero in case of success and a negative error code in case of + * failure. */ -static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, +static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, int to_head, struct list_head *list) { struct ubi_scan_leb *seb; if (list == &si->free) { dbg_bld("add to free: PEB %d, EC %d", pnum, ec); - si->free_peb_count += 1; } else if (list == &si->erase) { dbg_bld("add to erase: PEB %d, EC %d", pnum, ec); - si->erase_peb_count += 1; - } else if (list == &si->corr) { - dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); - si->corr_peb_count += 1; } else if (list == &si->alien) { dbg_bld("add to alien: PEB %d, EC %d", pnum, ec); si->alien_peb_count += 1; @@ -94,7 +121,37 @@ static int add_to_list(struct ubi_scan_info *si, int pnum, int ec, seb->pnum = pnum; seb->ec = ec; - list_add_tail(&seb->u.list, list); + if (to_head) + list_add(&seb->u.list, list); + else + list_add_tail(&seb->u.list, list); + return 0; +} + +/** + * add_corrupted - add a corrupted physical eraseblock. + * @si: scanning information + * @pnum: physical eraseblock number to add + * @ec: erase counter of the physical eraseblock + * + * This function adds corrupted physical eraseblock @pnum to the 'corr' list. + * The corruption was presumably not caused by a power cut. Returns zero in + * case of success and a negative error code in case of failure. + */ +static int add_corrupted(struct ubi_scan_info *si, int pnum, int ec) +{ + struct ubi_scan_leb *seb; + + dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec); + + seb = kmalloc(sizeof(struct ubi_scan_leb), GFP_KERNEL); + if (!seb) + return -ENOMEM; + + si->corr_peb_count += 1; + seb->pnum = pnum; + seb->ec = ec; + list_add(&seb->u.list, &si->corr); return 0; } @@ -258,8 +315,8 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, * created before sequence numbers support has been added. At * that times we used 32-bit LEB versions stored in logical * eraseblocks. That was before UBI got into mainline. We do not - * support these images anymore. Well, those images will work - * still work, but only if no unclean reboots happened. + * support these images anymore. Well, those images still work, + * but only if no unclean reboots happened. */ ubi_err("unsupported on-flash UBI format\n"); return -EINVAL; @@ -285,19 +342,25 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, return 1; } } else { - pnum = seb->pnum; + if (!seb->copy_flag) { + /* It is not a copy, so it is newer */ + dbg_bld("first PEB %d is newer, copy_flag is unset", + pnum); + return bitflips << 1; + } vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); if (!vh) return -ENOMEM; + pnum = seb->pnum; err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); if (err) { if (err == UBI_IO_BITFLIPS) bitflips = 1; else { dbg_err("VID of PEB %d header is bad, but it " - "was OK earlier", pnum); + "was OK earlier, err %d", pnum, err); if (err > 0) err = -EIO; @@ -305,14 +368,6 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb, } } - if (!vh->copy_flag) { - /* It is not a copy, so it is newer */ - dbg_bld("first PEB %d is newer, copy_flag is unset", - pnum); - err = bitflips << 1; - goto out_free_vidh; - } - vid_hdr = vh; } @@ -463,18 +518,15 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, if (err) return err; - if (cmp_res & 4) - err = add_to_list(si, seb->pnum, seb->ec, - &si->corr); - else - err = add_to_list(si, seb->pnum, seb->ec, - &si->erase); + err = add_to_list(si, seb->pnum, seb->ec, cmp_res & 4, + &si->erase); if (err) return err; seb->ec = ec; seb->pnum = pnum; seb->scrub = ((cmp_res & 2) || bitflips); + seb->copy_flag = vid_hdr->copy_flag; seb->sqnum = sqnum; if (sv->highest_lnum == lnum) @@ -487,10 +539,8 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, * This logical eraseblock is older than the one found * previously. */ - if (cmp_res & 4) - return add_to_list(si, pnum, ec, &si->corr); - else - return add_to_list(si, pnum, ec, &si->erase); + return add_to_list(si, pnum, ec, cmp_res & 4, + &si->erase); } } @@ -510,8 +560,9 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, seb->ec = ec; seb->pnum = pnum; seb->lnum = lnum; - seb->sqnum = sqnum; seb->scrub = bitflips; + seb->copy_flag = vid_hdr->copy_flag; + seb->sqnum = sqnum; if (sv->highest_lnum <= lnum) { sv->highest_lnum = lnum; @@ -521,7 +572,6 @@ int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si, sv->leb_count += 1; rb_link_node(&seb->u.rb, parent, p); rb_insert_color(&seb->u.rb, &sv->root); - si->used_peb_count += 1; return 0; } @@ -668,8 +718,8 @@ out_free: struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, struct ubi_scan_info *si) { - int err = 0, i; - struct ubi_scan_leb *seb; + int err = 0; + struct ubi_scan_leb *seb, *tmp_seb; if (!list_empty(&si->free)) { seb = list_entry(si->free.next, struct ubi_scan_leb, u.list); @@ -678,38 +728,86 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi, return seb; } - for (i = 0; i < 2; i++) { - struct list_head *head; - struct ubi_scan_leb *tmp_seb; + /* + * We try to erase the first physical eraseblock from the erase list + * and pick it if we succeed, or try to erase the next one if not. And + * so forth. We don't want to take care about bad eraseblocks here - + * they'll be handled later. + */ + list_for_each_entry_safe(seb, tmp_seb, &si->erase, u.list) { + if (seb->ec == UBI_SCAN_UNKNOWN_EC) + seb->ec = si->mean_ec; - if (i == 0) - head = &si->erase; - else - head = &si->corr; + err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1); + if (err) + continue; + seb->ec += 1; + list_del(&seb->u.list); + dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec); + return seb; + } + + ubi_err("no free eraseblocks"); + return ERR_PTR(-ENOSPC); +} + +/** + * check_corruption - check the data area of PEB. + * @ubi: UBI device description object + * @vid_hrd: the (corrupted) VID header of this PEB + * @pnum: the physical eraseblock number to check + * + * This is a helper function which is used to distinguish between VID header + * corruptions caused by power cuts and other reasons. If the PEB contains only + * 0xFF bytes in the data area, the VID header is most probably corrupted + * because of a power cut (%0 is returned in this case). Otherwise, it was + * probably corrupted for some other reasons (%1 is returned in this case). A + * negative error code is returned if a read error occurred. + * + * If the corruption reason was a power cut, UBI can safely erase this PEB. + * Otherwise, it should preserve it to avoid possibly destroying important + * information. + */ +static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, + int pnum) +{ + int err; + + mutex_lock(&ubi->buf_mutex); + memset(ubi->peb_buf1, 0x00, ubi->leb_size); + + err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start, + ubi->leb_size); + if (err == UBI_IO_BITFLIPS || err == -EBADMSG) { /* - * We try to erase the first physical eraseblock from the @head - * list and pick it if we succeed, or try to erase the - * next one if not. And so forth. We don't want to take care - * about bad eraseblocks here - they'll be handled later. + * Bit-flips or integrity errors while reading the data area. + * It is difficult to say for sure what type of corruption is + * this, but presumably a power cut happened while this PEB was + * erased, so it became unstable and corrupted, and should be + * erased. */ - list_for_each_entry_safe(seb, tmp_seb, head, u.list) { - if (seb->ec == UBI_SCAN_UNKNOWN_EC) - seb->ec = si->mean_ec; + return 0; + } - err = ubi_scan_erase_peb(ubi, si, seb->pnum, seb->ec+1); - if (err) - continue; + if (err) + return err; - seb->ec += 1; - list_del(&seb->u.list); - dbg_bld("return PEB %d, EC %d", seb->pnum, seb->ec); - return seb; - } + if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size)) { + mutex_unlock(&ubi->buf_mutex); + return 0; } - ubi_err("no eraseblocks found"); - return ERR_PTR(-ENOSPC); + ubi_err("PEB %d contains corrupted VID header, and the data does not " + "contain all 0xFF, this may be a non-UBI PEB or a severe VID " + "header corruption which requires manual inspection", pnum); + ubi_dbg_dump_vid_hdr(vid_hdr); + dbg_msg("hexdump of PEB %d offset %d, length %d", + pnum, ubi->leb_start, ubi->leb_size); + ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, + ubi->peb_buf1, ubi->leb_size, 1); + mutex_unlock(&ubi->buf_mutex); + return 1; } /** @@ -725,7 +823,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) { long long uninitialized_var(ec); - int err, bitflips = 0, vol_id, ec_corr = 0; + int err, bitflips = 0, vol_id, ec_err = 0; dbg_bld("scan PEB %d", pnum); @@ -746,22 +844,37 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (err < 0) return err; - else if (err == UBI_IO_BITFLIPS) + switch (err) { + case 0: + break; + case UBI_IO_BITFLIPS: bitflips = 1; - else if (err == UBI_IO_PEB_EMPTY) - return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, &si->erase); - else if (err == UBI_IO_BAD_HDR_READ || err == UBI_IO_BAD_HDR) { + break; + case UBI_IO_FF: + si->empty_peb_count += 1; + return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 0, + &si->erase); + case UBI_IO_FF_BITFLIPS: + si->empty_peb_count += 1; + return add_to_list(si, pnum, UBI_SCAN_UNKNOWN_EC, 1, + &si->erase); + case UBI_IO_BAD_HDR_EBADMSG: + case UBI_IO_BAD_HDR: /* * We have to also look at the VID header, possibly it is not * corrupted. Set %bitflips flag in order to make this PEB be * moved and EC be re-created. */ - ec_corr = err; + ec_err = err; ec = UBI_SCAN_UNKNOWN_EC; bitflips = 1; + break; + default: + ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err); + return -EINVAL; } - if (!ec_corr) { + if (!ec_err) { int image_seq; /* Make sure UBI version is OK */ @@ -814,24 +927,67 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0); if (err < 0) return err; - else if (err == UBI_IO_BITFLIPS) + switch (err) { + case 0: + break; + case UBI_IO_BITFLIPS: bitflips = 1; - else if (err == UBI_IO_BAD_HDR_READ || err == UBI_IO_BAD_HDR || - (err == UBI_IO_PEB_FREE && ec_corr)) { - /* VID header is corrupted */ - if (err == UBI_IO_BAD_HDR_READ || - ec_corr == UBI_IO_BAD_HDR_READ) - si->read_err_count += 1; - err = add_to_list(si, pnum, ec, &si->corr); + break; + case UBI_IO_BAD_HDR_EBADMSG: + if (ec_err == UBI_IO_BAD_HDR_EBADMSG) + /* + * Both EC and VID headers are corrupted and were read + * with data integrity error, probably this is a bad + * PEB, bit it is not marked as bad yet. This may also + * be a result of power cut during erasure. + */ + si->maybe_bad_peb_count += 1; + case UBI_IO_BAD_HDR: + if (ec_err) + /* + * Both headers are corrupted. There is a possibility + * that this a valid UBI PEB which has corresponding + * LEB, but the headers are corrupted. However, it is + * impossible to distinguish it from a PEB which just + * contains garbage because of a power cut during erase + * operation. So we just schedule this PEB for erasure. + */ + err = 0; + else + /* + * The EC was OK, but the VID header is corrupted. We + * have to check what is in the data area. + */ + err = check_corruption(ubi, vidh, pnum); + + if (err < 0) + return err; + else if (!err) + /* This corruption is caused by a power cut */ + err = add_to_list(si, pnum, ec, 1, &si->erase); + else + /* This is an unexpected corruption */ + err = add_corrupted(si, pnum, ec); if (err) return err; goto adjust_mean_ec; - } else if (err == UBI_IO_PEB_FREE) { - /* No VID header - the physical eraseblock is free */ - err = add_to_list(si, pnum, ec, &si->free); + case UBI_IO_FF_BITFLIPS: + err = add_to_list(si, pnum, ec, 1, &si->erase); if (err) return err; goto adjust_mean_ec; + case UBI_IO_FF: + if (ec_err) + err = add_to_list(si, pnum, ec, 1, &si->erase); + else + err = add_to_list(si, pnum, ec, 0, &si->free); + if (err) + return err; + goto adjust_mean_ec; + default: + ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d", + err); + return -EINVAL; } vol_id = be32_to_cpu(vidh->vol_id); @@ -843,7 +999,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, case UBI_COMPAT_DELETE: ubi_msg("\"delete\" compatible internal volume %d:%d" " found, will remove it", vol_id, lnum); - err = add_to_list(si, pnum, ec, &si->erase); + err = add_to_list(si, pnum, ec, 1, &si->erase); if (err) return err; return 0; @@ -858,7 +1014,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, case UBI_COMPAT_PRESERVE: ubi_msg("\"preserve\" compatible internal volume %d:%d" " found", vol_id, lnum); - err = add_to_list(si, pnum, ec, &si->alien); + err = add_to_list(si, pnum, ec, 0, &si->alien); if (err) return err; return 0; @@ -870,7 +1026,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, } } - if (ec_corr) + if (ec_err) ubi_warn("valid VID header but corrupted EC header at PEB %d", pnum); err = ubi_scan_add_used(ubi, si, pnum, ec, vidh, bitflips); @@ -878,7 +1034,7 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, return err; adjust_mean_ec: - if (!ec_corr) { + if (!ec_err) { si->ec_sum += ec; si->ec_count += 1; if (ec > si->max_ec) @@ -904,19 +1060,20 @@ adjust_mean_ec: static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si) { struct ubi_scan_leb *seb; - int max_corr; + int max_corr, peb_count; - max_corr = ubi->peb_count - si->bad_peb_count - si->alien_peb_count; - max_corr = max_corr / 20 ?: 8; + peb_count = ubi->peb_count - si->bad_peb_count - si->alien_peb_count; + max_corr = peb_count / 20 ?: 8; /* - * Few corrupted PEBs are not a problem and may be just a result of + * Few corrupted PEBs is not a problem and may be just a result of * unclean reboots. However, many of them may indicate some problems * with the flash HW or driver. */ - if (si->corr_peb_count >= 8) { - ubi_warn("%d PEBs are corrupted", si->corr_peb_count); - printk(KERN_WARNING "corrupted PEBs are:"); + if (si->corr_peb_count) { + ubi_err("%d PEBs are corrupted and preserved", + si->corr_peb_count); + printk(KERN_ERR "Corrupted PEBs are:"); list_for_each_entry(seb, &si->corr, u.list) printk(KERN_CONT " %d", seb->pnum); printk(KERN_CONT "\n"); @@ -931,41 +1088,35 @@ static int check_what_we_have(struct ubi_device *ubi, struct ubi_scan_info *si) } } - if (si->free_peb_count + si->used_peb_count + - si->alien_peb_count == 0) { - /* No UBI-formatted eraseblocks were found */ - if (si->corr_peb_count == si->read_err_count && - si->corr_peb_count < 8) { - /* No or just few corrupted PEBs, and all of them had a - * read error. We assume that those are bad PEBs, which - * were just not marked as bad so far. - * - * This piece of code basically tries to distinguish - * between the following 2 situations: - * - * 1. Flash is empty, but there are few bad PEBs, which - * are not marked as bad so far, and which were read - * with error. We want to go ahead and format this - * flash. While formating, the faulty PEBs will - * probably be marked as bad. - * - * 2. Flash probably contains non-UBI data and we do - * not want to format it and destroy possibly needed - * data (e.g., consider the case when the bootloader - * MTD partition was accidentally fed to UBI). - */ + if (si->empty_peb_count + si->maybe_bad_peb_count == peb_count) { + /* + * All PEBs are empty, or almost all - a couple PEBs look like + * they may be bad PEBs which were not marked as bad yet. + * + * This piece of code basically tries to distinguish between + * the following situations: + * + * 1. Flash is empty, but there are few bad PEBs, which are not + * marked as bad so far, and which were read with error. We + * want to go ahead and format this flash. While formatting, + * the faulty PEBs will probably be marked as bad. + * + * 2. Flash contains non-UBI data and we do not want to format + * it and destroy possibly important information. + */ + if (si->maybe_bad_peb_count <= 2) { si->is_empty = 1; ubi_msg("empty MTD device detected"); - get_random_bytes(&ubi->image_seq, sizeof(ubi->image_seq)); + get_random_bytes(&ubi->image_seq, + sizeof(ubi->image_seq)); } else { - ubi_err("MTD device possibly contains non-UBI data, " - "refusing it"); + ubi_err("MTD device is not UBI-formatted and possibly " + "contains non-UBI data - refusing it"); return -EINVAL; } + } - if (si->corr_peb_count > 0) - ubi_msg("corrupted PEBs will be formatted"); return 0; } diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h index 2576a8d1532..a3264f0bef2 100644 --- a/drivers/mtd/ubi/scan.h +++ b/drivers/mtd/ubi/scan.h @@ -30,6 +30,7 @@ * @pnum: physical eraseblock number * @lnum: logical eraseblock number * @scrub: if this physical eraseblock needs scrubbing + * @copy_flag: this LEB is a copy (@copy_flag is set in VID header of this LEB) * @sqnum: sequence number * @u: unions RB-tree or @list links * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects @@ -42,7 +43,8 @@ struct ubi_scan_leb { int ec; int pnum; int lnum; - int scrub; + unsigned int scrub:1; + unsigned int copy_flag:1; unsigned long long sqnum; union { struct rb_node rb; @@ -91,14 +93,13 @@ struct ubi_scan_volume { * @erase: list of physical eraseblocks which have to be erased * @alien: list of physical eraseblocks which should not be used by UBI (e.g., * those belonging to "preserve"-compatible internal volumes) - * @used_peb_count: count of used PEBs * @corr_peb_count: count of PEBs in the @corr list - * @read_err_count: count of PEBs read with error (%UBI_IO_BAD_HDR_READ was - * returned) - * @free_peb_count: count of PEBs in the @free list - * @erase_peb_count: count of PEBs in the @erase list + * @empty_peb_count: count of PEBs which are presumably empty (contain only + * 0xFF bytes) * @alien_peb_count: count of PEBs in the @alien list * @bad_peb_count: count of bad physical eraseblocks + * @maybe_bad_peb_count: count of bad physical eraseblocks which are not marked + * as bad yet, but which look like bad * @vols_found: number of volumes found during scanning * @highest_vol_id: highest volume ID * @is_empty: flag indicating whether the MTD device is empty or not @@ -119,13 +120,11 @@ struct ubi_scan_info { struct list_head free; struct list_head erase; struct list_head alien; - int used_peb_count; int corr_peb_count; - int read_err_count; - int free_peb_count; - int erase_peb_count; + int empty_peb_count; int alien_peb_count; int bad_peb_count; + int maybe_bad_peb_count; int vols_found; int highest_vol_id; int is_empty; diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 0359e0cce48..0b0149c41fe 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -85,21 +85,26 @@ /* * Error codes returned by the I/O sub-system. * - * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only - * %0xFF bytes - * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a - * valid erase counter header, and the rest are %0xFF bytes + * UBI_IO_FF: the read region of flash contains only 0xFFs + * UBI_IO_FF_BITFLIPS: the same as %UBI_IO_FF, but also also there was a data + * integrity error reported by the MTD driver + * (uncorrectable ECC error in case of NAND) * UBI_IO_BAD_HDR: the EC or VID header is corrupted (bad magic or CRC) - * UBI_IO_BAD_HDR_READ: the same as %UBI_IO_BAD_HDR, but also there was a read - * error reported by the flash driver + * UBI_IO_BAD_HDR_EBADMSG: the same as %UBI_IO_BAD_HDR, but also there was a + * data integrity error reported by the MTD driver + * (uncorrectable ECC error in case of NAND) * UBI_IO_BITFLIPS: bit-flips were detected and corrected + * + * Note, it is probably better to have bit-flip and ebadmsg as flags which can + * be or'ed with other error code. But this is a big change because there are + * may callers, so it does not worth the risk of introducing a bug */ enum { - UBI_IO_PEB_EMPTY = 1, - UBI_IO_PEB_FREE, + UBI_IO_FF = 1, + UBI_IO_FF_BITFLIPS, UBI_IO_BAD_HDR, - UBI_IO_BAD_HDR_READ, - UBI_IO_BITFLIPS + UBI_IO_BAD_HDR_EBADMSG, + UBI_IO_BITFLIPS, }; /* @@ -356,6 +361,8 @@ struct ubi_wl_entry; * @peb_size: physical eraseblock size * @bad_peb_count: count of bad physical eraseblocks * @good_peb_count: count of good physical eraseblocks + * @corr_peb_count: count of corrupted physical eraseblocks (preserved and not + * used by UBI) * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks * @min_io_size: minimal input/output unit size of the underlying MTD device @@ -442,6 +449,7 @@ struct ubi_device { int peb_size; int bad_peb_count; int good_peb_count; + int corr_peb_count; int erroneous_peb_count; int max_erroneous; int min_io_size; @@ -506,6 +514,7 @@ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); int ubi_check_volume(struct ubi_device *ubi, int vol_id); void ubi_calculate_reserved(struct ubi_device *ubi); +int ubi_check_pattern(const void *buf, uint8_t patt, int size); /* eba.c */ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index e42afab9a9f..c47620dfc72 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -261,6 +261,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) /* Reserve physical eraseblocks */ if (vol->reserved_pebs > ubi->avail_pebs) { dbg_err("not enough PEBs, only %d available", ubi->avail_pebs); + if (ubi->corr_peb_count) + dbg_err("%d PEBs are corrupted and not used", + ubi->corr_peb_count); err = -ENOSPC; goto out_unlock; } @@ -527,6 +530,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) if (pebs > ubi->avail_pebs) { dbg_err("not enough PEBs: requested %d, available %d", pebs, ubi->avail_pebs); + if (ubi->corr_peb_count) + dbg_err("%d PEBs are corrupted and not used", + ubi->corr_peb_count); spin_unlock(&ubi->volumes_lock); err = -ENOSPC; goto out_free; diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 14c10bed94e..fcdb7f65fe0 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c @@ -366,7 +366,7 @@ write_error: * Probably this physical eraseblock went bad, try to pick * another one. */ - list_add_tail(&new_seb->u.list, &si->corr); + list_add(&new_seb->u.list, &si->erase); goto retry; } kfree(new_seb); @@ -662,9 +662,13 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, ubi->vol_count += 1; vol->ubi = ubi; - if (reserved_pebs > ubi->avail_pebs) + if (reserved_pebs > ubi->avail_pebs) { ubi_err("not enough PEBs, required %d, available %d", reserved_pebs, ubi->avail_pebs); + if (ubi->corr_peb_count) + ubi_err("%d PEBs are corrupted and not used", + ubi->corr_peb_count); + } ubi->rsvd_pebs += reserved_pebs; ubi->avail_pebs -= reserved_pebs; @@ -837,7 +841,7 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si) return PTR_ERR(ubi->vtbl); } - ubi->avail_pebs = ubi->good_peb_count; + ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count; /* * The layout volume is OK, initialize the corresponding in-RAM data diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 97a435672ea..655bbbe415d 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -745,7 +745,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0); if (err && err != UBI_IO_BITFLIPS) { - if (err == UBI_IO_PEB_FREE) { + if (err == UBI_IO_FF) { /* * We are trying to move PEB without a VID header. UBI * always write VID headers shortly after the PEB was @@ -759,6 +759,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, dbg_wl("PEB %d has no VID header", e1->pnum); protect = 1; goto out_not_moved; + } else if (err == UBI_IO_FF_BITFLIPS) { + /* + * The same situation as %UBI_IO_FF, but bit-flips were + * detected. It is better to schedule this PEB for + * scrubbing. + */ + dbg_wl("PEB %d has no VID header but has bit-flips", + e1->pnum); + scrubbing = 1; + goto out_not_moved; } ubi_err("error %d while reading VID header from PEB %d", @@ -1468,22 +1478,6 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ubi->lookuptbl[e->pnum] = e; } - list_for_each_entry(seb, &si->corr, u.list) { - cond_resched(); - - e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); - if (!e) - goto out_free; - - e->pnum = seb->pnum; - e->ec = seb->ec; - ubi->lookuptbl[e->pnum] = e; - if (schedule_erase(ubi, e, 0)) { - kmem_cache_free(ubi_wl_entry_slab, e); - goto out_free; - } - } - ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) { ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) { cond_resched(); @@ -1510,6 +1504,9 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) if (ubi->avail_pebs < WL_RESERVED_PEBS) { ubi_err("no enough physical eraseblocks (%d, need %d)", ubi->avail_pebs, WL_RESERVED_PEBS); + if (ubi->corr_peb_count) + ubi_err("%d PEBs are corrupted and not used", + ubi->corr_peb_count); goto out_free; } ubi->avail_pebs -= WL_RESERVED_PEBS; diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c index 70705d1306b..eca55c52bdf 100644 --- a/drivers/net/3c527.c +++ b/drivers/net/3c527.c @@ -522,7 +522,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot) lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */ lp->rx_len = lp->exec_box->data[11]; /* Receive list count */ - init_MUTEX_LOCKED(&lp->cmd_mutex); + sema_init(&lp->cmd_mutex, 0); init_completion(&lp->execution_cmd); init_completion(&lp->xceiver_cmd); diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 85671adae45..179871d9e71 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -635,6 +635,9 @@ struct vortex_private { must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ large_frames:1, /* accept large frames */ handling_irq:1; /* private in_irq indicator */ + /* {get|set}_wol operations are already serialized by rtnl. + * no additional locking is required for the enable_wol and acpi_set_WOL() + */ int drv_flags; u16 status_enable; u16 intr_enable; @@ -2939,28 +2942,31 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct vortex_private *vp = netdev_priv(dev); - spin_lock_irq(&vp->lock); + if (!VORTEX_PCI(vp)) + return; + wol->supported = WAKE_MAGIC; wol->wolopts = 0; if (vp->enable_wol) wol->wolopts |= WAKE_MAGIC; - spin_unlock_irq(&vp->lock); } static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct vortex_private *vp = netdev_priv(dev); + + if (!VORTEX_PCI(vp)) + return -EOPNOTSUPP; + if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; - spin_lock_irq(&vp->lock); if (wol->wolopts & WAKE_MAGIC) vp->enable_wol = 1; else vp->enable_wol = 0; acpi_set_WOL(dev); - spin_unlock_irq(&vp->lock); return 0; } @@ -3202,6 +3208,9 @@ static void acpi_set_WOL(struct net_device *dev) return; } + if (VORTEX_PCI(vp)->current_state < PCI_D3hot) + return; + /* Change the power state to D3; RxEnable doesn't take effect. */ pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); } diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 2cc81a54cbf..77efe462b92 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2,6 +2,9 @@ # Network device configuration # +config HAVE_NET_MACB + bool + menuconfig NETDEVICES default y if UML depends on NET @@ -221,7 +224,7 @@ config MII config MACB tristate "Atmel MACB support" - depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 || ARCH_AT91SAM9G20 || ARCH_AT91SAM9G45 || ARCH_AT91CAP9 + depends on HAVE_NET_MACB select PHYLIB help The Atmel MACB ethernet interface is found on many AT32 and AT91 @@ -2428,7 +2431,7 @@ config UGETH_TX_ON_DEMAND config MV643XX_ETH tristate "Marvell Discovery (643XX) and Orion ethernet support" - depends on MV64X60 || PPC32 || PLAT_ORION + depends on (MV64X60 || PPC32 || PLAT_ORION) && INET select INET_LRO select PHYLIB help @@ -2803,7 +2806,7 @@ config NIU config PASEMI_MAC tristate "PA Semi 1/10Gbit MAC" - depends on PPC_PASEMI && PCI + depends on PPC_PASEMI && PCI && INET select PHYLIB select INET_LRO help diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index 0a0e0cd81a2..20f97e7017c 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig @@ -3,6 +3,7 @@ # config ATALK tristate "Appletalk protocol support" + depends on BKL # waiting to be removed from net/appletalk/ddp.c select LLC ---help--- AppleTalk is the protocol that Apple computers can use to communicate diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 63b9ba0cc67..c73be284831 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -1251,6 +1251,12 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter) rrd_ring->desc = NULL; rrd_ring->dma = 0; + + adapter->cmb.dma = 0; + adapter->cmb.cmb = NULL; + + adapter->smb.dma = 0; + adapter->smb.smb = NULL; } static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) @@ -2847,10 +2853,11 @@ static int atl1_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3cold, 0); atl1_reset_hw(&adapter->hw); - adapter->cmb.cmb->int_stats = 0; - if (netif_running(netdev)) + if (netif_running(netdev)) { + adapter->cmb.cmb->int_stats = 0; atl1_up(adapter); + } netif_device_attach(netdev); return 0; diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 1e620e287ae..efeffdf9e5f 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -2170,8 +2170,6 @@ static int __devinit b44_init_one(struct ssb_device *sdev, dev->irq = sdev->irq; SET_ETHTOOL_OPS(dev, &b44_ethtool_ops); - netif_carrier_off(dev); - err = ssb_bus_powerup(sdev->bus, 0); if (err) { dev_err(sdev->dev, @@ -2213,6 +2211,8 @@ static int __devinit b44_init_one(struct ssb_device *sdev, goto err_out_powerdown; } + netif_carrier_off(dev); + ssb_set_drvdata(sdev, dev); /* Chip reset provides power to the b44 MAC & PCI cores, which diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 012613fde3f..03d063554b7 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -38,6 +38,7 @@ #include <asm/blackfin.h> #include <asm/cacheflush.h> #include <asm/portmux.h> +#include <mach/pll.h> #include "bfin_mac.h" diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 822f586d72a..0ddf4c66afe 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2466,6 +2466,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac if (!(dev->flags & IFF_MASTER)) goto out; + if (!pskb_may_pull(skb, sizeof(struct lacpdu))) + goto out; + read_lock(&bond->lock); slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), orig_dev); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c746b331771..26bb118c453 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -362,6 +362,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct goto out; } + if (!pskb_may_pull(skb, arp_hdr_len(bond_dev))) + goto out; + if (skb->len < sizeof(struct arp_pkt)) { pr_debug("Packet is too small to be an ARP\n"); goto out; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 3b16f62d560..e953c6ad6e6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5164,6 +5164,15 @@ int bond_create(struct net *net, const char *name) res = dev_alloc_name(bond_dev, "bond%d"); if (res < 0) goto out; + } else { + /* + * If we're given a name to register + * we need to ensure that its not already + * registered + */ + res = -EEXIST; + if (__dev_get_by_name(net, name) != NULL) + goto out; } res = register_netdevice(bond_dev); diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index f5058ff2b21..8427533fe31 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -240,13 +240,15 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf, static const struct file_operations dbgfs_state_fops = { .open = dbgfs_open, .read = dbgfs_state, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static const struct file_operations dbgfs_frame_fops = { .open = dbgfs_open, .read = dbgfs_frame, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static inline void dev_debugfs_add(struct cfspi *cfspi) diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index ad19585d960..f208712c0b9 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -2296,6 +2296,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) case CHELSIO_GET_QSET_NUM:{ struct ch_reg edata; + memset(&edata, 0, sizeof(struct ch_reg)); + edata.cmd = CHELSIO_GET_QSET_NUM; edata.val = pi->nqsets; if (copy_to_user(useraddr, &edata, sizeof(edata))) diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index c327527fbbc..e2bf10d90ad 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -2026,6 +2026,7 @@ static const struct file_operations mem_debugfs_fops = { .owner = THIS_MODULE, .open = mem_open, .read = mem_read, + .llseek = default_llseek, }; static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index 66ed08f726f..ba302a5c2c3 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -57,6 +57,7 @@ enum e1e_registers { E1000_SCTL = 0x00024, /* SerDes Control - RW */ E1000_FCAL = 0x00028, /* Flow Control Address Low - RW */ E1000_FCAH = 0x0002C, /* Flow Control Address High -RW */ + E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */ E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ E1000_FCT = 0x00030, /* Flow Control Type - RW */ E1000_VET = 0x00038, /* VLAN Ether Type - RW */ diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 63930d12711..57b5435599a 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -105,6 +105,10 @@ #define E1000_FEXTNVM_SW_CONFIG 1 #define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ +#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + #define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL #define E1000_ICH_RAR_ENTRIES 7 @@ -125,6 +129,7 @@ /* SMBus Address Phy Register */ #define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F #define HV_SMB_ADDR_PEC_EN 0x0200 #define HV_SMB_ADDR_VALID 0x0080 @@ -237,6 +242,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) { @@ -272,7 +279,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) { struct e1000_phy_info *phy = &hw->phy; - u32 ctrl; + u32 ctrl, fwsm; s32 ret_val = 0; phy->addr = 1; @@ -294,7 +301,8 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) * disabled, then toggle the LANPHYPC Value bit to force * the interconnect to PCIe mode. */ - if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + fwsm = er32(FWSM); + if (!(fwsm & E1000_ICH_FWSM_FW_VALID)) { ctrl = er32(CTRL); ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; @@ -303,6 +311,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; ew32(CTRL, ctrl); msleep(50); + + /* + * Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if (hw->mac.type == e1000_pch2lan) + e1000_gate_hw_phy_config_ich8lan(hw, true); } /* @@ -315,6 +330,13 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) if (ret_val) goto out; + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + msleep(10); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + phy->id = e1000_phy_unknown; ret_val = e1000e_get_phy_id(hw); if (ret_val) @@ -561,13 +583,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter) if (mac->type == e1000_ich8lan) e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); - /* Disable PHY configuration by hardware, config by software */ - if (mac->type == e1000_pch2lan) { - u32 extcnf_ctrl = er32(EXTCNF_CTRL); - - extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; - ew32(EXTCNF_CTRL, extcnf_ctrl); - } + /* Gate automatic PHY configuration by hardware on managed 82579 */ + if ((mac->type == e1000_pch2lan) && + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); return 0; } @@ -652,6 +671,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) goto out; } + if (hw->mac.type == e1000_pch2lan) { + ret_val = e1000_k1_workaround_lv(hw); + if (ret_val) + goto out; + } + /* * Check if there was DownShift, must be checked * immediately after link-up @@ -895,6 +920,34 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) } /** + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states + * @hw: pointer to the HW structure + * + * Assumes semaphore already acquired. + * + **/ +static s32 e1000_write_smbus_addr(struct e1000_hw *hw) +{ + u16 phy_data; + u32 strap = er32(STRAP); + s32 ret_val = 0; + + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; + + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); + if (ret_val) + goto out; + + phy_data &= ~HV_SMB_ADDR_MASK; + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); + +out: + return ret_val; +} + +/** * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration * @hw: pointer to the HW structure * @@ -903,7 +956,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) **/ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) { - struct e1000_adapter *adapter = hw->adapter; struct e1000_phy_info *phy = &hw->phy; u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; s32 ret_val = 0; @@ -921,7 +973,8 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) if (phy->type != e1000_phy_igp_3) return ret_val; - if (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) { + if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || + (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; break; } @@ -961,21 +1014,16 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; - if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && - ((hw->mac.type == e1000_pchlan) || - (hw->mac.type == e1000_pch2lan))) { + if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && + (hw->mac.type == e1000_pchlan)) || + (hw->mac.type == e1000_pch2lan)) { /* * HW configures the SMBus address and LEDs when the * OEM and LCD Write Enable bits are set in the NVM. * When both NVM bits are cleared, SW will configure * them instead. */ - data = er32(STRAP); - data &= E1000_STRAP_SMBUS_ADDRESS_MASK; - reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT; - reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; - ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, - reg_data); + ret_val = e1000_write_smbus_addr(hw); if (ret_val) goto out; @@ -1440,10 +1488,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) goto out; /* Enable jumbo frame workaround in the PHY */ - e1e_rphy(hw, PHY_REG(769, 20), &data); - ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14)); - if (ret_val) - goto out; e1e_rphy(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); data |= (0x37 << 5); @@ -1452,7 +1496,6 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) goto out; e1e_rphy(hw, PHY_REG(769, 16), &data); data &= ~(1 << 13); - data |= (1 << 12); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) goto out; @@ -1477,7 +1520,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) mac_reg = er32(RCTL); mac_reg &= ~E1000_RCTL_SECRC; - ew32(FFLT_DBG, mac_reg); + ew32(RCTL, mac_reg); ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_CTRL_OFFSET, @@ -1503,17 +1546,12 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) goto out; /* Write PHY register values back to h/w defaults */ - e1e_rphy(hw, PHY_REG(769, 20), &data); - ret_val = e1e_wphy(hw, PHY_REG(769, 20), data & ~(1 << 14)); - if (ret_val) - goto out; e1e_rphy(hw, PHY_REG(769, 23), &data); data &= ~(0x7F << 5); ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); if (ret_val) goto out; e1e_rphy(hw, PHY_REG(769, 16), &data); - data &= ~(1 << 12); data |= (1 << 13); ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); if (ret_val) @@ -1559,6 +1597,69 @@ out: } /** + * e1000_k1_gig_workaround_lv - K1 Si workaround + * @hw: pointer to the HW structure + * + * Workaround to set the K1 beacon duration for 82579 parts + **/ +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 status_reg = 0; + u32 mac_reg; + + if (hw->mac.type != e1000_pch2lan) + goto out; + + /* Set K1 beacon duration based on 1Gbps speed or otherwise */ + ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); + if (ret_val) + goto out; + + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + + if (status_reg & HV_M_STATUS_SPEED_1000) + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; + else + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; + + ew32(FEXTNVM4, mac_reg); + } + +out: + return ret_val; +} + +/** + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware + * @hw: pointer to the HW structure + * @gate: boolean set to true to gate, false to ungate + * + * Gate/ungate the automatic PHY configuration via hardware; perform + * the configuration via software instead. + **/ +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) +{ + u32 extcnf_ctrl; + + if (hw->mac.type != e1000_pch2lan) + return; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (gate) + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + else + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; + + ew32(EXTCNF_CTRL, extcnf_ctrl); + return; +} + +/** * e1000_lan_init_done_ich8lan - Check for PHY config completion * @hw: pointer to the HW structure * @@ -1602,6 +1703,9 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) if (e1000_check_reset_block(hw)) goto out; + /* Allow time for h/w to get to quiescent state after reset */ + msleep(10); + /* Perform any necessary post-reset workarounds */ switch (hw->mac.type) { case e1000_pchlan: @@ -1630,6 +1734,13 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) /* Configure the LCD with the OEM bits in NVM */ ret_val = e1000_oem_bits_config_ich8lan(hw, true); + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + msleep(10); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + out: return ret_val; } @@ -1646,6 +1757,11 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) { s32 ret_val = 0; + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + ret_val = e1000e_phy_hw_reset_generic(hw); if (ret_val) goto out; @@ -2910,6 +3026,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) * external PHY is reset. */ ctrl |= E1000_CTRL_PHY_RST; + + /* + * Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); } ret_val = e1000_acquire_swflag_ich8lan(hw); e_dbg("Issuing a global reset to ich8lan\n"); @@ -3460,13 +3584,20 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) { u32 phy_ctrl; + s32 ret_val; phy_ctrl = er32(PHY_CTRL); phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE; ew32(PHY_CTRL, phy_ctrl); - if (hw->mac.type >= e1000_pchlan) - e1000_phy_hw_reset_ich8lan(hw); + if (hw->mac.type >= e1000_pchlan) { + e1000_oem_bits_config_ich8lan(hw, true); + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + e1000_write_smbus_addr(hw); + hw->phy.ops.release(hw); + } } /** diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 2b8ef44bd2b..e561d15c3eb 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -2704,6 +2704,16 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) u32 psrctl = 0; u32 pages = 0; + /* Workaround Si errata on 82579 - configure jumbo frame flow */ + if (hw->mac.type == e1000_pch2lan) { + s32 ret_val; + + if (adapter->netdev->mtu > ETH_DATA_LEN) + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); + else + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + } + /* Program MC offset vector base */ rctl = er32(RCTL); rctl &= ~(3 << E1000_RCTL_MO_SHIFT); @@ -2744,16 +2754,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter) e1e_wphy(hw, 22, phy_data); } - /* Workaround Si errata on 82579 - configure jumbo frame flow */ - if (hw->mac.type == e1000_pch2lan) { - s32 ret_val; - - if (rctl & E1000_RCTL_LPE) - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); - else - ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); - } - /* Setup buffer sizes */ rctl &= ~E1000_RCTL_SZ_4096; rctl |= E1000_RCTL_BSEX; @@ -4833,6 +4833,15 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) return -EINVAL; } + /* Jumbo frame workaround on 82579 requires CRC be stripped */ + if ((adapter->hw.mac.type == e1000_pch2lan) && + !(adapter->flags2 & FLAG2_CRC_STRIPPING) && + (new_mtu > ETH_DATA_LEN)) { + e_err("Jumbo Frames not supported on 82579 when CRC " + "stripping is disabled.\n"); + return -EINVAL; + } + /* 82573 Errata 17 */ if (((adapter->hw.mac.type == e1000_82573) || (adapter->hw.mac.type == e1000_82574)) && diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index a333b42111b..6372610ed24 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -533,8 +533,15 @@ static inline void ehea_fill_skb(struct net_device *dev, int length = cqe->num_bytes_transfered - 4; /*remove CRC */ skb_put(skb, length); - skb->ip_summed = CHECKSUM_UNNECESSARY; skb->protocol = eth_type_trans(skb, dev); + + /* The packet was not an IPV4 packet so a complemented checksum was + calculated. The value is found in the Internet Checksum field. */ + if (cqe->status & EHEA_CQE_BLIND_CKSUM) { + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = csum_unfold(~cqe->inet_checksum_value); + } else + skb->ip_summed = CHECKSUM_UNNECESSARY; } static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array, diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h index f608a6c54af..38104734a3b 100644 --- a/drivers/net/ehea/ehea_qmr.h +++ b/drivers/net/ehea/ehea_qmr.h @@ -150,6 +150,7 @@ struct ehea_rwqe { #define EHEA_CQE_TYPE_RQ 0x60 #define EHEA_CQE_STAT_ERR_MASK 0x700F #define EHEA_CQE_STAT_FAT_ERR_MASK 0xF +#define EHEA_CQE_BLIND_CKSUM 0x8000 #define EHEA_CQE_STAT_ERR_TCP 0x4000 #define EHEA_CQE_STAT_ERR_IP 0x2000 #define EHEA_CQE_STAT_ERR_CRC 0x1000 diff --git a/drivers/net/eql.c b/drivers/net/eql.c index dda2c7944da..0cb1cf9cf4b 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp) equalizer_t *eql; master_config_t mc; + memset(&mc, 0, sizeof(master_config_t)); + if (eql_is_master(dev)) { eql = netdev_priv(dev); mc.max_slaves = eql->max_slaves; diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 768b840aeb6..cce32d43175 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -678,24 +678,37 @@ static int fec_enet_mii_probe(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); struct phy_device *phy_dev = NULL; - int ret; + char mdio_bus_id[MII_BUS_ID_SIZE]; + char phy_name[MII_BUS_ID_SIZE + 3]; + int phy_id; fep->phy_dev = NULL; - /* find the first phy */ - phy_dev = phy_find_first(fep->mii_bus); - if (!phy_dev) { - printk(KERN_ERR "%s: no PHY found\n", dev->name); - return -ENODEV; + /* check for attached phy */ + for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { + if ((fep->mii_bus->phy_mask & (1 << phy_id))) + continue; + if (fep->mii_bus->phy_map[phy_id] == NULL) + continue; + if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) + continue; + strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); + break; } - /* attach the mac to the phy */ - ret = phy_connect_direct(dev, phy_dev, - &fec_enet_adjust_link, 0, - PHY_INTERFACE_MODE_MII); - if (ret) { - printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); - return ret; + if (phy_id >= PHY_MAX_ADDR) { + printk(KERN_INFO "%s: no PHY, assuming direct connection " + "to switch\n", dev->name); + strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); + phy_id = 0; + } + + snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); + phy_dev = phy_connect(dev, phy_name, &fec_enet_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + if (IS_ERR(phy_dev)) { + printk(KERN_ERR "%s: could not attach to PHY\n", dev->name); + return PTR_ERR(phy_dev); } /* mask with MAC supported features */ @@ -738,7 +751,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) fep->mii_bus->read = fec_enet_mdio_read; fep->mii_bus->write = fec_enet_mdio_write; fep->mii_bus->reset = fec_enet_mdio_reset; - snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); + snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1); fep->mii_bus->priv = fep; fep->mii_bus->parent = &pdev->dev; @@ -1311,6 +1324,9 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_mii_init; + /* Carrier starts down, phylib will bring it up */ + netif_carrier_off(ndev); + ret = register_netdev(ndev); if (ret) goto failed_register; diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 4b52c767ad0..3e5d0b6b651 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -608,7 +608,7 @@ static int sixpack_open(struct tty_struct *tty) spin_lock_init(&sp->lock); atomic_set(&sp->refcnt, 1); - init_MUTEX_LOCKED(&sp->dead_sem); + sema_init(&sp->dead_sem, 0); /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */ diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 66e88bd59ca..4c628393c8b 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -747,7 +747,7 @@ static int mkiss_open(struct tty_struct *tty) spin_lock_init(&ax->buflock); atomic_set(&ax->refcnt, 1); - init_MUTEX_LOCKED(&ax->dead_sem); + sema_init(&ax->dead_sem, 0); ax->tty = tty; tty->disc_data = ax; diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 3506fd6ad72..519e19e2395 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -2928,7 +2928,7 @@ static int __devinit emac_probe(struct platform_device *ofdev, if (dev->emac_irq != NO_IRQ) irq_dispose_mapping(dev->emac_irq); err_free: - kfree(ndev); + free_netdev(ndev); err_gone: /* if we were on the bootlist, remove us as we won't show up and * wake up all waiters to notify them in case they were waiting @@ -2971,7 +2971,7 @@ static int __devexit emac_remove(struct platform_device *ofdev) if (dev->emac_irq != NO_IRQ) irq_dispose_mapping(dev->emac_irq); - kfree(dev->ndev); + free_netdev(dev->ndev); return 0; } diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c index 1b051dab7b2..51d74447f8f 100644 --- a/drivers/net/irda/sir_dev.c +++ b/drivers/net/irda/sir_dev.c @@ -909,7 +909,7 @@ struct sir_dev * sirdev_get_instance(const struct sir_driver *drv, const char *n dev->tx_skb = NULL; spin_lock_init(&dev->tx_lock); - init_MUTEX(&dev->fsm.sem); + sema_init(&dev->fsm.sem, 1); dev->drv = drv; dev->netdev = ndev; diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index cabae7bb1fc..b075a35b85d 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -1540,7 +1540,6 @@ netxen_process_rcv(struct netxen_adapter *adapter, if (pkt_offset) skb_pull(skb, pkt_offset); - skb->truesize = skb->len + sizeof(struct sk_buff); skb->protocol = eth_type_trans(skb, netdev); napi_gro_receive(&sds_ring->napi, skb); @@ -1602,8 +1601,6 @@ netxen_process_lro(struct netxen_adapter *adapter, skb_put(skb, lro_length + data_offset); - skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); - skb_pull(skb, l2_hdr_offset); skb->protocol = eth_type_trans(skb, netdev); diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index c683f77c6f4..ff824e11f0b 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c @@ -87,7 +87,6 @@ earlier 3Com products. #include <linux/bitops.h> #include <linux/mii.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> @@ -280,25 +279,15 @@ static int tc574_probe(struct pcmcia_device *link) spin_lock_init(&lp->window_lock); link->resource[0]->end = 32; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - link->conf.ConfigIndex = 1; + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 1; dev->netdev_ops = &el3_netdev_ops; SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); dev->watchdog_timeo = TX_TIMEOUT; return tc574_config(link); -} /* tc574_attach */ - -/* - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -*/ +} static void tc574_detach(struct pcmcia_device *link) { @@ -313,12 +302,6 @@ static void tc574_detach(struct pcmcia_device *link) free_netdev(dev); } /* tc574_detach */ -/* - tc574_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ethernet device available to the system. -*/ - static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; static int tc574_config(struct pcmcia_device *link) @@ -352,7 +335,7 @@ static int tc574_config(struct pcmcia_device *link) if (ret) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -465,12 +448,6 @@ failed: } /* tc574_config */ -/* - After a card is removed, tc574_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. -*/ - static void tc574_release(struct pcmcia_device *link) { pcmcia_disable_device(link); @@ -1198,9 +1175,7 @@ MODULE_DEVICE_TABLE(pcmcia, tc574_ids); static struct pcmcia_driver tc574_driver = { .owner = THIS_MODULE, - .drv = { - .name = "3c574_cs", - }, + .name = "3c574_cs", .probe = tc574_probe, .remove = tc574_detach, .id_table = tc574_ids, diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c index 61f9cf2100f..a07e2229533 100644 --- a/drivers/net/pcmcia/3c589_cs.c +++ b/drivers/net/pcmcia/3c589_cs.c @@ -41,7 +41,6 @@ #include <linux/bitops.h> #include <linux/jiffies.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> @@ -176,14 +175,6 @@ static const struct ethtool_ops netdev_ethtool_ops; static void tc589_detach(struct pcmcia_device *p_dev); -/*====================================================================== - - tc589_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - -======================================================================*/ - static const struct net_device_ops el3_netdev_ops = { .ndo_open = el3_open, .ndo_stop = el3_close, @@ -216,9 +207,8 @@ static int tc589_probe(struct pcmcia_device *link) link->resource[0]->end = 16; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - link->conf.ConfigIndex = 1; + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 1; dev->netdev_ops = &el3_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; @@ -226,16 +216,7 @@ static int tc589_probe(struct pcmcia_device *link) SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); return tc589_config(link); -} /* tc589_attach */ - -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ +} static void tc589_detach(struct pcmcia_device *link) { @@ -250,14 +231,6 @@ static void tc589_detach(struct pcmcia_device *link) free_netdev(dev); } /* tc589_detach */ -/*====================================================================== - - tc589_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ethernet device available to the system. - -======================================================================*/ - static int tc589_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -294,7 +267,7 @@ static int tc589_config(struct pcmcia_device *link) if (ret) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -352,14 +325,6 @@ failed: return -ENODEV; } /* tc589_config */ -/*====================================================================== - - After a card is removed, tc589_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void tc589_release(struct pcmcia_device *link) { pcmcia_disable_device(link); @@ -955,9 +920,7 @@ MODULE_DEVICE_TABLE(pcmcia, tc589_ids); static struct pcmcia_driver tc589_driver = { .owner = THIS_MODULE, - .drv = { - .name = "3c589_cs", - }, + .name = "3c589_cs", .probe = tc589_probe, .remove = tc589_detach, .id_table = tc589_ids, diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 5f05ffb240c..9e8b28b271a 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -39,7 +39,6 @@ #include <linux/mii.h> #include "../8390.h" -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> @@ -140,14 +139,6 @@ static const struct net_device_ops axnet_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -/*====================================================================== - - axnet_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - -======================================================================*/ - static int axnet_probe(struct pcmcia_device *link) { axnet_dev_t *info; @@ -166,8 +157,7 @@ static int axnet_probe(struct pcmcia_device *link) info = PRIV(dev); info->p_dev = link; link->priv = dev; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ; dev->netdev_ops = &axnet_netdev_ops; @@ -177,15 +167,6 @@ static int axnet_probe(struct pcmcia_device *link) return axnet_config(link); } /* axnet_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void axnet_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -231,7 +212,7 @@ static int get_prom(struct pcmcia_device *link) }; /* Not much of a test, but the alternatives are messy */ - if (link->conf.ConfigBase != 0x03c0) + if (link->config_base != 0x03c0) return 0; axnet_reset_8390(dev); @@ -248,14 +229,6 @@ static int get_prom(struct pcmcia_device *link) return 1; } /* get_prom */ -/*====================================================================== - - axnet_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ethernet device available to the system. - -======================================================================*/ - static int try_io_port(struct pcmcia_device *link) { int j, ret; @@ -286,35 +259,16 @@ static int try_io_port(struct pcmcia_device *link) } } -static int axnet_configcheck(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int axnet_configcheck(struct pcmcia_device *p_dev, void *priv_data) { - int i; - cistpl_io_t *io = &cfg->io; + if (p_dev->config_index == 0) + return -EINVAL; - if (cfg->index == 0 || cfg->io.nwin == 0) + p_dev->config_index = 0x05; + if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32) return -ENODEV; - p_dev->conf.ConfigIndex = 0x05; - /* For multifunction cards, by convention, we configure the - network function with window 0, and serial with window 1 */ - if (io->nwin > 1) { - i = (io->win[1].len > io->win[0].len); - p_dev->resource[1]->start = io->win[1-i].base; - p_dev->resource[1]->end = io->win[1-i].len; - } else { - i = p_dev->resource[1]->end = 0; - } - p_dev->resource[0]->start = io->win[i].base; - p_dev->resource[0]->end = io->win[i].len; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32) - return try_io_port(p_dev); - - return -ENODEV; + return try_io_port(p_dev); } static int axnet_config(struct pcmcia_device *link) @@ -326,20 +280,19 @@ static int axnet_config(struct pcmcia_device *link) dev_dbg(&link->dev, "axnet_config(0x%p)\n", link); /* don't trust the CIS on this; Linksys got it wrong */ - link->conf.Present = 0x63; + link->config_regs = 0x63; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; ret = pcmcia_loop_config(link, axnet_configcheck, NULL); if (ret != 0) goto failed; if (!link->irq) goto failed; + + if (resource_size(link->resource[1]) == 8) + link->config_flags |= CONF_ENABLE_SPKR; - if (resource_size(link->resource[1]) == 8) { - link->conf.Attributes |= CONF_ENABLE_SPKR; - link->conf.Status = CCSR_AUDIO_ENA; - } - - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -414,14 +367,6 @@ failed: return -ENODEV; } /* axnet_config */ -/*====================================================================== - - After a card is removed, axnet_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void axnet_release(struct pcmcia_device *link) { pcmcia_disable_device(link); @@ -783,9 +728,7 @@ MODULE_DEVICE_TABLE(pcmcia, axnet_ids); static struct pcmcia_driver axnet_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "axnet_cs", - }, + .name = "axnet_cs", .probe = axnet_probe, .remove = axnet_detach, .id_table = axnet_ids, diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c index 3c400cfa82a..b706a724947 100644 --- a/drivers/net/pcmcia/com20020_cs.c +++ b/drivers/net/pcmcia/com20020_cs.c @@ -43,7 +43,6 @@ #include <linux/arcdevice.h> #include <linux/com20020.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -123,14 +122,6 @@ typedef struct com20020_dev_t { struct net_device *dev; } com20020_dev_t; -/*====================================================================== - - com20020_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - -======================================================================*/ - static int com20020_probe(struct pcmcia_device *p_dev) { com20020_dev_t *info; @@ -160,8 +151,7 @@ static int com20020_probe(struct pcmcia_device *p_dev) p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->resource[0]->end = 16; - p_dev->conf.Attributes = CONF_ENABLE_IRQ; - p_dev->conf.IntType = INT_MEMORY_AND_IO; + p_dev->config_flags |= CONF_ENABLE_IRQ; info->dev = dev; p_dev->priv = info; @@ -174,15 +164,6 @@ fail_alloc_info: return -ENOMEM; } /* com20020_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void com20020_detach(struct pcmcia_device *link) { struct com20020_dev_t *info = link->priv; @@ -221,14 +202,6 @@ static void com20020_detach(struct pcmcia_device *link) } /* com20020_detach */ -/*====================================================================== - - com20020_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. - -======================================================================*/ - static int com20020_config(struct pcmcia_device *link) { struct arcnet_local *lp; @@ -282,7 +255,7 @@ static int com20020_config(struct pcmcia_device *link) dev->irq = link->irq; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -316,14 +289,6 @@ failed: return -ENODEV; } /* com20020_config */ -/*====================================================================== - - After a card is removed, com20020_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void com20020_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "com20020_release\n"); @@ -366,9 +331,7 @@ MODULE_DEVICE_TABLE(pcmcia, com20020_ids); static struct pcmcia_driver com20020_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "com20020_cs", - }, + .name = "com20020_cs", .probe = com20020_probe, .remove = com20020_detach, .id_table = com20020_ids, diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 98fffb03ecd..1c327598bbe 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c @@ -49,7 +49,6 @@ #include <linux/ioport.h> #include <linux/crc32.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> @@ -252,8 +251,7 @@ static int fmvj18x_probe(struct pcmcia_device *link) link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; /* General socket configuration */ - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ; dev->netdev_ops = &fjn_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; @@ -313,7 +311,7 @@ static int ungermann_try_io_port(struct pcmcia_device *link) ret = pcmcia_request_io(link); if (ret == 0) { /* calculate ConfigIndex value */ - link->conf.ConfigIndex = + link->config_index = ((link->resource[0]->start & 0x0f0) >> 3) | 0x22; return ret; } @@ -321,11 +319,7 @@ static int ungermann_try_io_port(struct pcmcia_device *link) return ret; /* RequestIO failed */ } -static int fmvj18x_ioprobe(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int fmvj18x_ioprobe(struct pcmcia_device *p_dev, void *priv_data) { return 0; /* strange, but that's what the code did already before... */ } @@ -362,28 +356,28 @@ static int fmvj18x_config(struct pcmcia_device *link) link->card_id == PRODID_TDK_NP9610 || link->card_id == PRODID_TDK_MN3200) { /* MultiFunction Card */ - link->conf.ConfigBase = 0x800; - link->conf.ConfigIndex = 0x47; + link->config_base = 0x800; + link->config_index = 0x47; link->resource[1]->end = 8; } break; case MANFID_NEC: cardtype = NEC; /* MultiFunction Card */ - link->conf.ConfigBase = 0x800; - link->conf.ConfigIndex = 0x47; + link->config_base = 0x800; + link->config_index = 0x47; link->resource[1]->end = 8; break; case MANFID_KME: cardtype = KME; /* MultiFunction Card */ - link->conf.ConfigBase = 0x800; - link->conf.ConfigIndex = 0x47; + link->config_base = 0x800; + link->config_index = 0x47; link->resource[1]->end = 8; break; case MANFID_CONTEC: cardtype = CONTEC; break; case MANFID_FUJITSU: - if (link->conf.ConfigBase == 0x0fe0) + if (link->config_base == 0x0fe0) cardtype = MBH10302; else if (link->card_id == PRODID_FUJITSU_MBH10302) /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), @@ -403,10 +397,10 @@ static int fmvj18x_config(struct pcmcia_device *link) case MANFID_FUJITSU: if (link->card_id == PRODID_FUJITSU_MBH10304) { cardtype = XXX10304; /* MBH10304 with buggy CIS */ - link->conf.ConfigIndex = 0x20; + link->config_index = 0x20; } else { cardtype = MBH10302; /* NextCom NC5310, etc. */ - link->conf.ConfigIndex = 1; + link->config_index = 1; } break; case MANFID_UNGERMANN: @@ -414,7 +408,7 @@ static int fmvj18x_config(struct pcmcia_device *link) break; default: cardtype = MBH10302; - link->conf.ConfigIndex = 1; + link->config_index = 1; } } @@ -432,7 +426,7 @@ static int fmvj18x_config(struct pcmcia_device *link) ret = pcmcia_request_irq(link, fjn_interrupt); if (ret) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -544,20 +538,18 @@ failed: static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) { - win_req_t req; u_char __iomem *base; int i, j; /* Allocate a small memory window */ - req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; - req.Base = 0; req.Size = 0; - req.AccessSpeed = 0; - i = pcmcia_request_window(link, &req, &link->win); + link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; + link->resource[2]->start = 0; link->resource[2]->end = 0; + i = pcmcia_request_window(link, link->resource[2], 0); if (i != 0) return -1; - base = ioremap(req.Base, req.Size); - pcmcia_map_mem_page(link, link->win, 0); + base = ioremap(link->resource[2]->start, resource_size(link->resource[2])); + pcmcia_map_mem_page(link, link->resource[2], 0); /* * MBH10304 CISTPL_FUNCE_LAN_NODE_ID format @@ -582,7 +574,7 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) } iounmap(base); - j = pcmcia_release_window(link, link->win); + j = pcmcia_release_window(link, link->resource[2]); return (i != 0x200) ? 0 : -1; } /* fmvj18x_get_hwinfo */ @@ -590,27 +582,26 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id) static int fmvj18x_setup_mfc(struct pcmcia_device *link) { - win_req_t req; int i; struct net_device *dev = link->priv; unsigned int ioaddr; local_info_t *lp = netdev_priv(dev); /* Allocate a small memory window */ - req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; - req.Base = 0; req.Size = 0; - req.AccessSpeed = 0; - i = pcmcia_request_window(link, &req, &link->win); + link->resource[3]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; + link->resource[3]->start = link->resource[3]->end = 0; + i = pcmcia_request_window(link, link->resource[3], 0); if (i != 0) return -1; - lp->base = ioremap(req.Base, req.Size); + lp->base = ioremap(link->resource[3]->start, + resource_size(link->resource[3])); if (lp->base == NULL) { printk(KERN_NOTICE "fmvj18x_cs: ioremap failed\n"); return -1; } - i = pcmcia_map_mem_page(link, link->win, 0); + i = pcmcia_map_mem_page(link, link->resource[3], 0); if (i != 0) { iounmap(lp->base); lp->base = NULL; @@ -638,7 +629,6 @@ static void fmvj18x_release(struct pcmcia_device *link) struct net_device *dev = link->priv; local_info_t *lp = netdev_priv(dev); u_char __iomem *tmp; - int j; dev_dbg(&link->dev, "fmvj18x_release\n"); @@ -646,7 +636,6 @@ static void fmvj18x_release(struct pcmcia_device *link) tmp = lp->base; lp->base = NULL; /* set NULL before iounmap */ iounmap(tmp); - j = pcmcia_release_window(link, link->win); } pcmcia_disable_device(link); @@ -708,9 +697,7 @@ MODULE_DEVICE_TABLE(pcmcia, fmvj18x_ids); static struct pcmcia_driver fmvj18x_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "fmvj18x_cs", - }, + .name = "fmvj18x_cs", .probe = fmvj18x_probe, .remove = fmvj18x_detach, .id_table = fmvj18x_ids, diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c index b0d06a3d962..bf7dff96d88 100644 --- a/drivers/net/pcmcia/ibmtr_cs.c +++ b/drivers/net/pcmcia/ibmtr_cs.c @@ -57,7 +57,6 @@ #include <linux/trdevice.h> #include <linux/ibmtr.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -102,9 +101,8 @@ static void ibmtr_detach(struct pcmcia_device *p_dev); typedef struct ibmtr_dev_t { struct pcmcia_device *p_dev; - struct net_device *dev; - window_handle_t sram_win_handle; - struct tok_info *ti; + struct net_device *dev; + struct tok_info *ti; } ibmtr_dev_t; static void netdev_get_drvinfo(struct net_device *dev, @@ -123,14 +121,6 @@ static irqreturn_t ibmtr_interrupt(int irq, void *dev_id) { return tok_interrupt(irq, dev); }; -/*====================================================================== - - ibmtr_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - -======================================================================*/ - static int __devinit ibmtr_attach(struct pcmcia_device *link) { ibmtr_dev_t *info; @@ -153,9 +143,8 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link) link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; link->resource[0]->end = 4; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - link->conf.Present = PRESENT_OPTION; + link->config_flags |= CONF_ENABLE_IRQ; + link->config_regs = PRESENT_OPTION; info->dev = dev; @@ -164,15 +153,6 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link) return ibmtr_config(link); } /* ibmtr_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void ibmtr_detach(struct pcmcia_device *link) { struct ibmtr_dev_t *info = link->priv; @@ -197,26 +177,17 @@ static void ibmtr_detach(struct pcmcia_device *link) kfree(info); } /* ibmtr_detach */ -/*====================================================================== - - ibmtr_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - token-ring device available to the system. - -======================================================================*/ - static int __devinit ibmtr_config(struct pcmcia_device *link) { ibmtr_dev_t *info = link->priv; struct net_device *dev = info->dev; struct tok_info *ti = netdev_priv(dev); - win_req_t req; int i, ret; dev_dbg(&link->dev, "ibmtr_config\n"); - link->conf.ConfigIndex = 0x61; link->io_lines = 16; + link->config_index = 0x61; /* Determine if this is PRIMARY or ALTERNATE. */ @@ -240,39 +211,39 @@ static int __devinit ibmtr_config(struct pcmcia_device *link) ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq); /* Allocate the MMIO memory window */ - req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; - req.Attributes |= WIN_USE_WAIT; - req.Base = 0; - req.Size = 0x2000; - req.AccessSpeed = 250; - ret = pcmcia_request_window(link, &req, &link->win); + link->resource[2]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; + link->resource[2]->flags |= WIN_USE_WAIT; + link->resource[2]->start = 0; + link->resource[2]->end = 0x2000; + ret = pcmcia_request_window(link, link->resource[2], 250); if (ret) goto failed; - ret = pcmcia_map_mem_page(link, link->win, mmiobase); + ret = pcmcia_map_mem_page(link, link->resource[2], mmiobase); if (ret) goto failed; - ti->mmio = ioremap(req.Base, req.Size); + ti->mmio = ioremap(link->resource[2]->start, + resource_size(link->resource[2])); /* Allocate the SRAM memory window */ - req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; - req.Attributes |= WIN_USE_WAIT; - req.Base = 0; - req.Size = sramsize * 1024; - req.AccessSpeed = 250; - ret = pcmcia_request_window(link, &req, &info->sram_win_handle); + link->resource[3]->flags = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; + link->resource[3]->flags |= WIN_USE_WAIT; + link->resource[3]->start = 0; + link->resource[3]->end = sramsize * 1024; + ret = pcmcia_request_window(link, link->resource[3], 250); if (ret) goto failed; - ret = pcmcia_map_mem_page(link, info->sram_win_handle, srambase); + ret = pcmcia_map_mem_page(link, link->resource[3], srambase); if (ret) goto failed; ti->sram_base = srambase >> 12; - ti->sram_virt = ioremap(req.Base, req.Size); - ti->sram_phys = req.Base; + ti->sram_virt = ioremap(link->resource[3]->start, + resource_size(link->resource[3])); + ti->sram_phys = link->resource[3]->start; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -301,14 +272,6 @@ failed: return -ENODEV; } /* ibmtr_config */ -/*====================================================================== - - After a card is removed, ibmtr_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void ibmtr_release(struct pcmcia_device *link) { ibmtr_dev_t *info = link->priv; @@ -316,7 +279,7 @@ static void ibmtr_release(struct pcmcia_device *link) dev_dbg(&link->dev, "ibmtr_release\n"); - if (link->win) { + if (link->resource[2]->end) { struct tok_info *ti = netdev_priv(dev); iounmap(ti->mmio); } @@ -398,9 +361,7 @@ MODULE_DEVICE_TABLE(pcmcia, ibmtr_ids); static struct pcmcia_driver ibmtr_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "ibmtr_cs", - }, + .name = "ibmtr_cs", .probe = ibmtr_attach, .remove = ibmtr_detach, .id_table = ibmtr_ids, diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c index 68f2deeb3ad..1eca4f5a6e7 100644 --- a/drivers/net/pcmcia/nmclan_cs.c +++ b/drivers/net/pcmcia/nmclan_cs.c @@ -146,7 +146,6 @@ Include Files #include <linux/ioport.h> #include <linux/bitops.h> -#include <pcmcia/cs.h> #include <pcmcia/cisreg.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -435,13 +434,6 @@ static const struct net_device_ops mace_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -/* ---------------------------------------------------------------------------- -nmclan_attach - Creates an "instance" of the driver, allocating local data - structures for one device. The device is registered with Card - Services. ----------------------------------------------------------------------------- */ - static int nmclan_probe(struct pcmcia_device *link) { mace_private *lp; @@ -460,10 +452,9 @@ static int nmclan_probe(struct pcmcia_device *link) spin_lock_init(&lp->bank_lock); link->resource[0]->end = 32; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - link->conf.ConfigIndex = 1; - link->conf.Present = PRESENT_OPTION; + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 1; + link->config_regs = PRESENT_OPTION; lp->tx_free_frames=AM2150_MAX_TX_FRAMES; @@ -474,14 +465,6 @@ static int nmclan_probe(struct pcmcia_device *link) return nmclan_config(link); } /* nmclan_attach */ -/* ---------------------------------------------------------------------------- -nmclan_detach - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. ----------------------------------------------------------------------------- */ - static void nmclan_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -625,13 +608,6 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr) return 0; } /* mace_init */ -/* ---------------------------------------------------------------------------- -nmclan_config - This routine is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ethernet device available to the system. ----------------------------------------------------------------------------- */ - static int nmclan_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -650,7 +626,7 @@ static int nmclan_config(struct pcmcia_device *link) ret = pcmcia_request_exclusive_irq(link, mace_interrupt); if (ret) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -712,12 +688,6 @@ failed: return -ENODEV; } /* nmclan_config */ -/* ---------------------------------------------------------------------------- -nmclan_release - After a card is removed, nmclan_release() will unregister the - net device, and release the PCMCIA configuration. If the device - is still open, this will be postponed until it is closed. ----------------------------------------------------------------------------- */ static void nmclan_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "nmclan_release\n"); @@ -1535,9 +1505,7 @@ MODULE_DEVICE_TABLE(pcmcia, nmclan_ids); static struct pcmcia_driver nmclan_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "nmclan_cs", - }, + .name = "nmclan_cs", .probe = nmclan_probe, .remove = nmclan_detach, .id_table = nmclan_ids, diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index f9b509a6b09..5d7d1d3088a 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -42,7 +42,6 @@ #include <linux/mii.h> #include "../8390.h" -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> @@ -238,14 +237,6 @@ static const struct net_device_ops pcnet_netdev_ops = { #endif }; -/*====================================================================== - - pcnet_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - -======================================================================*/ - static int pcnet_probe(struct pcmcia_device *link) { pcnet_dev_t *info; @@ -260,23 +251,13 @@ static int pcnet_probe(struct pcmcia_device *link) info->p_dev = link; link->priv = dev; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; dev->netdev_ops = &pcnet_netdev_ops; return pcnet_config(link); } /* pcnet_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void pcnet_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -300,22 +281,22 @@ static void pcnet_detach(struct pcmcia_device *link) static hw_info_t *get_hwinfo(struct pcmcia_device *link) { struct net_device *dev = link->priv; - win_req_t req; u_char __iomem *base, *virt; int i, j; /* Allocate a small memory window */ - req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; - req.Base = 0; req.Size = 0; - req.AccessSpeed = 0; - i = pcmcia_request_window(link, &req, &link->win); + link->resource[2]->flags |= WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; + link->resource[2]->start = 0; link->resource[2]->end = 0; + i = pcmcia_request_window(link, link->resource[2], 0); if (i != 0) return NULL; - virt = ioremap(req.Base, req.Size); + virt = ioremap(link->resource[2]->start, + resource_size(link->resource[2])); for (i = 0; i < NR_INFO; i++) { - pcmcia_map_mem_page(link, link->win, hw_info[i].offset & ~(req.Size-1)); - base = &virt[hw_info[i].offset & (req.Size-1)]; + pcmcia_map_mem_page(link, link->resource[2], + hw_info[i].offset & ~(resource_size(link->resource[2])-1)); + base = &virt[hw_info[i].offset & (resource_size(link->resource[2])-1)]; if ((readb(base+0) == hw_info[i].a0) && (readb(base+2) == hw_info[i].a1) && (readb(base+4) == hw_info[i].a2)) { @@ -326,7 +307,7 @@ static hw_info_t *get_hwinfo(struct pcmcia_device *link) } iounmap(virt); - j = pcmcia_release_window(link, link->win); + j = pcmcia_release_window(link, link->resource[2]); return (i < NR_INFO) ? hw_info+i : NULL; } /* get_hwinfo */ @@ -421,7 +402,7 @@ static hw_info_t *get_ax88190(struct pcmcia_device *link) int i, j; /* Not much of a test, but the alternatives are messy */ - if (link->conf.ConfigBase != 0x03c0) + if (link->config_base != 0x03c0) return NULL; outb_p(0x01, ioaddr + EN0_DCFG); /* Set word-wide access. */ @@ -463,14 +444,6 @@ static hw_info_t *get_hwired(struct pcmcia_device *link) return &default_info; } /* get_hwired */ -/*====================================================================== - - pcnet_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ethernet device available to the system. - -======================================================================*/ - static int try_io_port(struct pcmcia_device *link) { int j, ret; @@ -502,43 +475,22 @@ static int try_io_port(struct pcmcia_device *link) } } -static int pcnet_confcheck(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int pcnet_confcheck(struct pcmcia_device *p_dev, void *priv_data) { int *priv = priv_data; int try = (*priv & 0x1); - int i; - cistpl_io_t *io = &cfg->io; - if (cfg->index == 0 || cfg->io.nwin == 0) - return -EINVAL; + *priv &= (p_dev->resource[2]->end >= 0x4000) ? 0x10 : ~0x10; - /* For multifunction cards, by convention, we configure the - network function with window 0, and serial with window 1 */ - if (io->nwin > 1) { - i = (io->win[1].len > io->win[0].len); - p_dev->resource[1]->start = io->win[1-i].base; - p_dev->resource[1]->end = io->win[1-i].len; - } else { - i = p_dev->resource[1]->end = 0; - } + if (p_dev->config_index == 0) + return -EINVAL; - *priv &= ((cfg->mem.nwin == 1) && - (cfg->mem.win[0].len >= 0x4000)) ? 0x10 : ~0x10; + if (p_dev->resource[0]->end + p_dev->resource[1]->end < 32) + return -EINVAL; - p_dev->resource[0]->start = io->win[i].base; - p_dev->resource[0]->end = io->win[i].len; - if (!try) - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - else + if (try) p_dev->io_lines = 16; - if (p_dev->resource[0]->end + p_dev->resource[1]->end >= 32) - return try_io_port(p_dev); - - return -EINVAL; + return try_io_port(p_dev); } static hw_info_t *pcnet_try_config(struct pcmcia_device *link, @@ -560,15 +512,14 @@ static hw_info_t *pcnet_try_config(struct pcmcia_device *link, if (!link->irq) return NULL; - if (resource_size(link->resource[1]) == 8) { - link->conf.Attributes |= CONF_ENABLE_SPKR; - link->conf.Status = CCSR_AUDIO_ENA; - } + if (resource_size(link->resource[1]) == 8) + link->config_flags |= CONF_ENABLE_SPKR; + if ((link->manf_id == MANFID_IBM) && (link->card_id == PRODID_IBM_HOME_AND_AWAY)) - link->conf.ConfigIndex |= 0x10; + link->config_index |= 0x10; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) return NULL; @@ -583,7 +534,7 @@ static hw_info_t *pcnet_try_config(struct pcmcia_device *link, } else dev->if_port = 0; - if ((link->conf.ConfigBase == 0x03c0) && + if ((link->config_base == 0x03c0) && (link->manf_id == 0x149) && (link->card_id == 0xc1ab)) { dev_info(&link->dev, "this is an AX88190 card - use axnet_cs instead.\n"); @@ -689,14 +640,6 @@ failed: return -ENODEV; } /* pcnet_config */ -/*====================================================================== - - After a card is removed, pcnet_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void pcnet_release(struct pcmcia_device *link) { pcnet_dev_t *info = PRIV(link->priv); @@ -709,15 +652,6 @@ static void pcnet_release(struct pcmcia_device *link) pcmcia_disable_device(link); } -/*====================================================================== - - The card status event handler. Mostly, this schedules other - stuff to run after an event is received. A CARD_REMOVAL event - also sets some flags to discourage the net drivers from trying - to talk to the card any more. - -======================================================================*/ - static int pcnet_suspend(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -1486,7 +1420,6 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, { struct net_device *dev = link->priv; pcnet_dev_t *info = PRIV(dev); - win_req_t req; int i, window_size, offset, ret; window_size = (stop_pg - start_pg) << 8; @@ -1497,22 +1430,22 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, window_size = roundup_pow_of_two(window_size); /* Allocate a memory window */ - req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; - req.Attributes |= WIN_USE_WAIT; - req.Base = 0; req.Size = window_size; - req.AccessSpeed = mem_speed; - ret = pcmcia_request_window(link, &req, &link->win); + link->resource[3]->flags |= WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM|WIN_ENABLE; + link->resource[3]->flags |= WIN_USE_WAIT; + link->resource[3]->start = 0; link->resource[3]->end = window_size; + ret = pcmcia_request_window(link, link->resource[3], mem_speed); if (ret) goto failed; offset = (start_pg << 8) + cm_offset; offset -= offset % window_size; - ret = pcmcia_map_mem_page(link, link->win, offset); + ret = pcmcia_map_mem_page(link, link->resource[3], offset); if (ret) goto failed; /* Try scribbling on the buffer */ - info->base = ioremap(req.Base, window_size); + info->base = ioremap(link->resource[3]->start, + resource_size(link->resource[3])); for (i = 0; i < (TX_PAGES<<8); i += 2) __raw_writew((i>>1), info->base+offset+i); udelay(100); @@ -1521,19 +1454,20 @@ static int setup_shmem_window(struct pcmcia_device *link, int start_pg, pcnet_reset_8390(dev); if (i != (TX_PAGES<<8)) { iounmap(info->base); - pcmcia_release_window(link, link->win); - info->base = NULL; link->win = 0; + pcmcia_release_window(link, link->resource[3]); + info->base = NULL; goto failed; } ei_status.mem = info->base + offset; - ei_status.priv = req.Size; + ei_status.priv = resource_size(link->resource[3]); dev->mem_start = (u_long)ei_status.mem; - dev->mem_end = dev->mem_start + req.Size; + dev->mem_end = dev->mem_start + resource_size(link->resource[3]); ei_status.tx_start_page = start_pg; ei_status.rx_start_page = start_pg + TX_PAGES; - ei_status.stop_page = start_pg + ((req.Size - offset) >> 8); + ei_status.stop_page = start_pg + ( + (resource_size(link->resource[3]) - offset) >> 8); /* set up block i/o functions */ ei_status.get_8390_hdr = &shmem_get_8390_hdr; @@ -1772,9 +1706,7 @@ MODULE_FIRMWARE("cis/PE-200.cis"); MODULE_FIRMWARE("cis/tamarack.cis"); static struct pcmcia_driver pcnet_driver = { - .drv = { - .name = "pcnet_cs", - }, + .name = "pcnet_cs", .probe = pcnet_probe, .remove = pcnet_detach, .owner = THIS_MODULE, diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c index 377367d03b4..0af2fc8ec16 100644 --- a/drivers/net/pcmcia/smc91c92_cs.c +++ b/drivers/net/pcmcia/smc91c92_cs.c @@ -44,7 +44,6 @@ #include <linux/jiffies.h> #include <linux/firmware.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> @@ -300,14 +299,6 @@ static const struct net_device_ops smc_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -/*====================================================================== - - smc91c92_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - -======================================================================*/ - static int smc91c92_probe(struct pcmcia_device *link) { struct smc_private *smc; @@ -324,10 +315,6 @@ static int smc91c92_probe(struct pcmcia_device *link) link->priv = dev; spin_lock_init(&smc->lock); - link->resource[0]->end = 16; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; /* The SMC91c92-specific entries in the device structure. */ dev->netdev_ops = &smc_netdev_ops; @@ -343,15 +330,6 @@ static int smc91c92_probe(struct pcmcia_device *link) return smc91c92_config(link); } /* smc91c92_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void smc91c92_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -412,26 +390,28 @@ static int mhz_3288_power(struct pcmcia_device *link) mdelay(200); /* Now read and write the COR... */ - tmp = readb(smc->base + link->conf.ConfigBase + CISREG_COR); + tmp = readb(smc->base + link->config_base + CISREG_COR); udelay(5); - writeb(tmp, smc->base + link->conf.ConfigBase + CISREG_COR); + writeb(tmp, smc->base + link->config_base + CISREG_COR); return 0; } -static int mhz_mfc_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int mhz_mfc_config_check(struct pcmcia_device *p_dev, void *priv_data) { int k; - p_dev->resource[1]->start = cf->io.win[0].base; + p_dev->io_lines = 16; + p_dev->resource[1]->start = p_dev->resource[0]->start; + p_dev->resource[1]->end = 8; + p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->end = 16; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; for (k = 0; k < 0x400; k += 0x10) { if (k & 0x80) continue; p_dev->resource[0]->start = k ^ 0x300; - p_dev->io_lines = 16; if (!pcmcia_request_io(p_dev)) return 0; } @@ -442,14 +422,11 @@ static int mhz_mfc_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; struct smc_private *smc = netdev_priv(dev); - win_req_t req; unsigned int offset; int i; - link->conf.Attributes |= CONF_ENABLE_SPKR; - link->conf.Status = CCSR_AUDIO_ENA; - link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[1]->end = 8; + link->config_flags |= CONF_ENABLE_SPKR | CONF_ENABLE_IRQ | + CONF_AUTO_SET_IO; /* The Megahertz combo cards have modem-like CIS entries, so we have to explicitly try a bunch of port combinations. */ @@ -459,16 +436,16 @@ static int mhz_mfc_config(struct pcmcia_device *link) dev->base_addr = link->resource[0]->start; /* Allocate a memory window, for accessing the ISR */ - req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; - req.Base = req.Size = 0; - req.AccessSpeed = 0; - i = pcmcia_request_window(link, &req, &link->win); + link->resource[2]->flags = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; + link->resource[2]->start = link->resource[2]->end = 0; + i = pcmcia_request_window(link, link->resource[2], 0); if (i != 0) return -ENODEV; - smc->base = ioremap(req.Base, req.Size); - offset = (smc->manfid == MANFID_MOTOROLA) ? link->conf.ConfigBase : 0; - i = pcmcia_map_mem_page(link, link->win, offset); + smc->base = ioremap(link->resource[2]->start, + resource_size(link->resource[2])); + offset = (smc->manfid == MANFID_MOTOROLA) ? link->config_base : 0; + i = pcmcia_map_mem_page(link, link->resource[2], offset); if ((i == 0) && (smc->manfid == MANFID_MEGAHERTZ) && (smc->cardid == PRODID_MEGAHERTZ_EM3288)) @@ -591,14 +568,12 @@ static int mot_setup(struct pcmcia_device *link) /*====================================================================*/ -static int smc_configcheck(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int smc_configcheck(struct pcmcia_device *p_dev, void *priv_data) { - p_dev->resource[0]->start = cf->io.win[0].base; - p_dev->io_lines = cf->io.flags & CISTPL_IO_LINES_MASK; + p_dev->resource[0]->end = 16; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + return pcmcia_request_io(p_dev); } @@ -607,7 +582,8 @@ static int smc_config(struct pcmcia_device *link) struct net_device *dev = link->priv; int i; - link->resource[0]->end = 16; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + i = pcmcia_loop_config(link, smc_configcheck, NULL); if (!i) dev->base_addr = link->resource[0]->start; @@ -640,15 +616,14 @@ static int osi_config(struct pcmcia_device *link) static const unsigned int com[4] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8 }; int i, j; - link->conf.Attributes |= CONF_ENABLE_SPKR; - link->conf.Status = CCSR_AUDIO_ENA; + link->config_flags |= CONF_ENABLE_SPKR | CONF_ENABLE_IRQ; link->resource[0]->end = 64; link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; link->resource[1]->end = 8; /* Enable Hard Decode, LAN, Modem */ - link->conf.ConfigIndex = 0x23; link->io_lines = 16; + link->config_index = 0x23; for (i = j = 0; j < 4; j++) { link->resource[1]->start = com[j]; @@ -658,7 +633,7 @@ static int osi_config(struct pcmcia_device *link) } if (i != 0) { /* Fallback: turn off hard decode */ - link->conf.ConfigIndex = 0x03; + link->config_index = 0x03; link->resource[1]->end = 0; i = pcmcia_request_io(link); } @@ -817,27 +792,16 @@ static int check_sig(struct pcmcia_device *link) } if (width) { - modconf_t mod = { - .Attributes = CONF_IO_CHANGE_WIDTH, - }; printk(KERN_INFO "smc91c92_cs: using 8-bit IO window.\n"); smc91c92_suspend(link); - pcmcia_modify_configuration(link, &mod); + pcmcia_fixup_iowidth(link); smc91c92_resume(link); return check_sig(link); } return -ENODEV; } -/*====================================================================== - - smc91c92_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ethernet device available to the system. - -======================================================================*/ - static int smc91c92_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -869,7 +833,7 @@ static int smc91c92_config(struct pcmcia_device *link) i = pcmcia_request_irq(link, smc_interrupt); if (i) goto config_failed; - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i) goto config_failed; @@ -988,18 +952,10 @@ config_failed: return -ENODEV; } /* smc91c92_config */ -/*====================================================================== - - After a card is removed, smc91c92_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void smc91c92_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "smc91c92_release\n"); - if (link->win) { + if (link->resource[2]->end) { struct net_device *dev = link->priv; struct smc_private *smc = netdev_priv(dev); iounmap(smc->base); @@ -2101,9 +2057,7 @@ MODULE_DEVICE_TABLE(pcmcia, smc91c92_ids); static struct pcmcia_driver smc91c92_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "smc91c92_cs", - }, + .name = "smc91c92_cs", .probe = smc91c92_probe, .remove = smc91c92_detach, .id_table = smc91c92_ids, diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index f5819526b5e..1fece617c06 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c @@ -82,7 +82,6 @@ #include <linux/bitops.h> #include <linux/mii.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> @@ -267,33 +266,11 @@ static unsigned mii_rd(unsigned int ioaddr, u_char phyaddr, u_char phyreg); static void mii_wr(unsigned int ioaddr, u_char phyaddr, u_char phyreg, unsigned data, int len); -/* - * The event() function is this driver's Card Services event handler. - * It will be called by Card Services when an appropriate card status - * event is received. The config() and release() entry points are - * used to configure or release a socket, in response to card insertion - * and ejection events. They are invoked from the event handler. - */ - static int has_ce2_string(struct pcmcia_device * link); static int xirc2ps_config(struct pcmcia_device * link); static void xirc2ps_release(struct pcmcia_device * link); - -/**************** - * The attach() and detach() entry points are used to create and destroy - * "instances" of the driver, where each instance represents everything - * needed to manage one actual PCMCIA card. - */ - static void xirc2ps_detach(struct pcmcia_device *p_dev); -/**************** - * You'll also need to prototype all the functions that will actually - * be used to talk to your device. See 'pcmem_cs' for a good example - * of a fully self-sufficient driver; the other drivers rely more or - * less on other parts of the kernel. - */ - static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id); typedef struct local_info_t { @@ -501,16 +478,6 @@ static const struct net_device_ops netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -/**************** - * xirc2ps_attach() creates an "instance" of the driver, allocating - * local data structures for one device. The device is registered - * with Card Services. - * - * The dev_link structure is initialized, but we don't actually - * configure the card at this point -- we wait until we receive a - * card insertion event. - */ - static int xirc2ps_probe(struct pcmcia_device *link) { @@ -529,9 +496,7 @@ xirc2ps_probe(struct pcmcia_device *link) link->priv = dev; /* General socket configuration */ - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - link->conf.ConfigIndex = 1; + link->config_index = 1; /* Fill in card specific entries */ dev->netdev_ops = &netdev_ops; @@ -542,13 +507,6 @@ xirc2ps_probe(struct pcmcia_device *link) return xirc2ps_config(link); } /* xirc2ps_attach */ -/**************** - * This deletes a driver "instance". The device is de-registered - * with Card Services. If it has been released, all local data - * structures are freed. Otherwise, the structures will be freed - * when the device is released. - */ - static void xirc2ps_detach(struct pcmcia_device *link) { @@ -667,44 +625,53 @@ has_ce2_string(struct pcmcia_device * p_dev) } static int -xirc2ps_config_modem(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +xirc2ps_config_modem(struct pcmcia_device *p_dev, void *priv_data) { unsigned int ioaddr; - if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) { - for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { - p_dev->resource[1]->start = cf->io.win[0].base; - p_dev->resource[0]->start = ioaddr; - if (!pcmcia_request_io(p_dev)) - return 0; - } + if ((p_dev->resource[0]->start & 0xf) == 8) + return -ENODEV; + + p_dev->resource[0]->end = 16; + p_dev->resource[1]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->io_lines = 10; + + p_dev->resource[1]->start = p_dev->resource[0]->start; + for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { + p_dev->resource[0]->start = ioaddr; + if (!pcmcia_request_io(p_dev)) + return 0; } return -ENODEV; } static int -xirc2ps_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +xirc2ps_config_check(struct pcmcia_device *p_dev, void *priv_data) { int *pass = priv_data; + resource_size_t tmp = p_dev->resource[1]->start; - if (cf->io.nwin > 0 && (cf->io.win[0].base & 0xf) == 8) { - p_dev->resource[1]->start = cf->io.win[0].base; - p_dev->resource[0]->start = p_dev->resource[1]->start - + (*pass ? (cf->index & 0x20 ? -24:8) - : (cf->index & 0x20 ? 8:-24)); - if (!pcmcia_request_io(p_dev)) - return 0; - } - return -ENODEV; + tmp += (*pass ? (p_dev->config_index & 0x20 ? -24 : 8) + : (p_dev->config_index & 0x20 ? 8 : -24)); + + if ((p_dev->resource[0]->start & 0xf) == 8) + return -ENODEV; + + p_dev->resource[0]->end = 18; + p_dev->resource[1]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; + p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->io_lines = 10; + p_dev->resource[1]->start = p_dev->resource[0]->start; + p_dev->resource[0]->start = tmp; + return pcmcia_request_io(p_dev); } @@ -727,11 +694,6 @@ static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev, }; -/**************** - * xirc2ps_config() is scheduled to run after a CARD_INSERTION event - * is received, to configure the PCMCIA socket, and to make the - * ethernet device available to the system. - */ static int xirc2ps_config(struct pcmcia_device * link) { @@ -807,32 +769,24 @@ xirc2ps_config(struct pcmcia_device * link) goto failure; } - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; - link->io_lines = 10; if (local->modem) { int pass; + link->config_flags |= CONF_AUTO_SET_IO; - if (do_sound) { - link->conf.Attributes |= CONF_ENABLE_SPKR; - link->conf.Status |= CCSR_AUDIO_ENA; - } - link->resource[1]->end = 8; - link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; if (local->dingo) { /* Take the Modem IO port from the CIS and scan for a free * Ethernet port */ - link->resource[0]->end = 16; /* no Mako stuff anymore */ if (!pcmcia_loop_config(link, xirc2ps_config_modem, NULL)) goto port_found; } else { - link->resource[0]->end = 18; /* We do 2 passes here: The first one uses the regular mapping and * the second tries again, thereby considering that the 32 ports are * mirrored every 32 bytes. Actually we use a mirrored port for * the Mako if (on the first pass) the COR bit 5 is set. */ for (pass=0; pass < 2; pass++) - if (!pcmcia_loop_config(link, xirc2ps_config_check, &pass)) + if (!pcmcia_loop_config(link, xirc2ps_config_check, + &pass)) goto port_found; /* if special option: * try to configure as Ethernet only. @@ -840,7 +794,9 @@ xirc2ps_config(struct pcmcia_device * link) } printk(KNOT_XIRC "no ports available\n"); } else { + link->io_lines = 10; link->resource[0]->end = 16; + link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { link->resource[0]->start = ioaddr; if (!(err = pcmcia_request_io(link))) @@ -861,16 +817,14 @@ xirc2ps_config(struct pcmcia_device * link) if ((err=pcmcia_request_irq(link, xirc2ps_interrupt))) goto config_error; - /**************** - * This actually configures the PCMCIA socket -- setting up - * the I/O windows and the interrupt mapping. - */ - if ((err=pcmcia_request_configuration(link, &link->conf))) + link->config_flags |= CONF_ENABLE_IRQ; + if (do_sound) + link->config_flags |= CONF_ENABLE_SPKR; + + if ((err = pcmcia_enable_device(link))) goto config_error; if (local->dingo) { - win_req_t req; - /* Reset the modem's BAR to the correct value * This is necessary because in the RequestConfiguration call, * the base address of the ethernet port (BasePort1) is written @@ -890,14 +844,14 @@ xirc2ps_config(struct pcmcia_device * link) * is at 0x0800. So we allocate a window into the attribute * memory and write direct to the CIS registers */ - req.Attributes = WIN_DATA_WIDTH_8|WIN_MEMORY_TYPE_AM|WIN_ENABLE; - req.Base = req.Size = 0; - req.AccessSpeed = 0; - if ((err = pcmcia_request_window(link, &req, &link->win))) + link->resource[2]->flags = WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_AM | + WIN_ENABLE; + link->resource[2]->start = link->resource[2]->end = 0; + if ((err = pcmcia_request_window(link, link->resource[2], 0))) goto config_error; - local->dingo_ccr = ioremap(req.Base,0x1000) + 0x0800; - if ((err = pcmcia_map_mem_page(link, link->win, 0))) + local->dingo_ccr = ioremap(link->resource[2]->start, 0x1000) + 0x0800; + if ((err = pcmcia_map_mem_page(link, link->resource[2], 0))) goto config_error; /* Setup the CCRs; there are no infos in the CIS about the Ethernet @@ -978,17 +932,12 @@ xirc2ps_config(struct pcmcia_device * link) return -ENODEV; } /* xirc2ps_config */ -/**************** - * After a card is removed, xirc2ps_release() will unregister the net - * device, and release the PCMCIA configuration. If the device is - * still open, this will be postponed until it is closed. - */ static void xirc2ps_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "release\n"); - if (link->win) { + if (link->resource[2]->end) { struct net_device *dev = link->priv; local_info_t *local = netdev_priv(dev); if (local->dingo) @@ -1830,9 +1779,7 @@ MODULE_DEVICE_TABLE(pcmcia, xirc2ps_ids); static struct pcmcia_driver xirc2ps_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "xirc2ps_cs", - }, + .name = "xirc2ps_cs", .probe = xirc2ps_probe, .remove = xirc2ps_detach, .id_table = xirc2ps_ids, diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 6a6b8199a0d..6c58da2b882 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -308,7 +308,7 @@ static int mdio_bus_suspend(struct device *dev) * may call phy routines that try to grab the same lock, and that may * lead to a deadlock. */ - if (phydev->attached_dev) + if (phydev->attached_dev && phydev->adjust_link) phy_stop_machine(phydev); if (!mdio_bus_phy_may_suspend(phydev)) @@ -331,7 +331,7 @@ static int mdio_bus_resume(struct device *dev) return ret; no_resume: - if (phydev->attached_dev) + if (phydev->attached_dev && phydev->adjust_link) phy_start_machine(phydev, NULL); return 0; diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index af50a530dae..78d70a6481b 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c @@ -184,7 +184,7 @@ ppp_asynctty_open(struct tty_struct *tty) tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); atomic_set(&ap->refcnt, 1); - init_MUTEX_LOCKED(&ap->dead_sem); + sema_init(&ap->dead_sem, 0); ap->chan.private = ap; ap->chan.ops = &async_ops; diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 6695a51e09e..4bddb2afdd1 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -856,7 +856,8 @@ static const struct file_operations ppp_device_fops = { .poll = ppp_poll, .unlocked_ioctl = ppp_ioctl, .open = ppp_open, - .release = ppp_release + .release = ppp_release, + .llseek = noop_llseek, }; static __net_init int ppp_init_net(struct net *net) @@ -1314,8 +1315,13 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb) hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN; i = 0; list_for_each_entry(pch, &ppp->channels, clist) { - navail += pch->avail = (pch->chan != NULL); - pch->speed = pch->chan->speed; + if (pch->chan) { + pch->avail = 1; + navail++; + pch->speed = pch->chan->speed; + } else { + pch->avail = 0; + } if (pch->avail) { if (skb_queue_empty(&pch->file.xq) || !pch->had_frag) { diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c index 75ba744b173..2c7cf0b6481 100644 --- a/drivers/net/qlcnic/qlcnic_init.c +++ b/drivers/net/qlcnic/qlcnic_init.c @@ -1316,7 +1316,7 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, return -ENOMEM; } - skb_reserve(skb, 2); + skb_reserve(skb, NET_IP_ALIGN); dma = pci_map_single(pdev, skb->data, rds_ring->dma_size, PCI_DMA_FROMDEVICE); @@ -1404,7 +1404,6 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter, if (pkt_offset) skb_pull(skb, pkt_offset); - skb->truesize = skb->len + sizeof(struct sk_buff); skb->protocol = eth_type_trans(skb, netdev); napi_gro_receive(&sds_ring->napi, skb); @@ -1466,8 +1465,6 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter, skb_put(skb, lro_length + data_offset); - skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb); - skb_pull(skb, l2_hdr_offset); skb->protocol = eth_type_trans(skb, netdev); @@ -1700,8 +1697,6 @@ qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, if (pkt_offset) skb_pull(skb, pkt_offset); - skb->truesize = skb->len + sizeof(struct sk_buff); - if (!qlcnic_check_loopback_buff(skb->data)) adapter->diag_cnt++; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 078bbf4e6f1..992db2fa136 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -1212,7 +1212,8 @@ static void rtl8169_update_counters(struct net_device *dev) if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) return; - counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr); + counters = dma_alloc_coherent(&tp->pci_dev->dev, sizeof(*counters), + &paddr, GFP_KERNEL); if (!counters) return; @@ -1233,7 +1234,8 @@ static void rtl8169_update_counters(struct net_device *dev) RTL_W32(CounterAddrLow, 0); RTL_W32(CounterAddrHigh, 0); - pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr); + dma_free_coherent(&tp->pci_dev->dev, sizeof(*counters), counters, + paddr); } static void rtl8169_get_ethtool_stats(struct net_device *dev, @@ -2934,7 +2936,7 @@ static const struct rtl_cfg_info { .hw_start = rtl_hw_start_8168, .region = 2, .align = 8, - .intr_event = SYSErr | LinkChg | RxOverflow | + .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow | TxErr | TxOK | RxOK | RxErr, .napi_event = TxErr | TxOK | RxOK | RxOverflow, .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, @@ -3292,15 +3294,15 @@ static int rtl8169_open(struct net_device *dev) /* * Rx and Tx desscriptors needs 256 bytes alignment. - * pci_alloc_consistent provides more. + * dma_alloc_coherent provides more. */ - tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, - &tp->TxPhyAddr); + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); if (!tp->TxDescArray) goto err_pm_runtime_put; - tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, - &tp->RxPhyAddr); + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); if (!tp->RxDescArray) goto err_free_tx_0; @@ -3334,12 +3336,12 @@ out: err_release_ring_2: rtl8169_rx_clear(tp); err_free_rx_1: - pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, - tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); tp->RxDescArray = NULL; err_free_tx_0: - pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, - tp->TxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); tp->TxDescArray = NULL; err_pm_runtime_put: pm_runtime_put_noidle(&pdev->dev); @@ -3975,7 +3977,7 @@ static void rtl8169_free_rx_skb(struct rtl8169_private *tp, { struct pci_dev *pdev = tp->pci_dev; - pci_unmap_single(pdev, le64_to_cpu(desc->addr), tp->rx_buf_sz, + dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(*sk_buff); *sk_buff = NULL; @@ -4000,7 +4002,7 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct net_device *dev, struct RxDesc *desc, int rx_buf_sz, - unsigned int align) + unsigned int align, gfp_t gfp) { struct sk_buff *skb; dma_addr_t mapping; @@ -4008,13 +4010,13 @@ static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev, pad = align ? align : NET_IP_ALIGN; - skb = netdev_alloc_skb(dev, rx_buf_sz + pad); + skb = __netdev_alloc_skb(dev, rx_buf_sz + pad, gfp); if (!skb) goto err_out; skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad); - mapping = pci_map_single(pdev, skb->data, rx_buf_sz, + mapping = dma_map_single(&pdev->dev, skb->data, rx_buf_sz, PCI_DMA_FROMDEVICE); rtl8169_map_to_asic(desc, mapping, rx_buf_sz); @@ -4039,7 +4041,7 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp) } static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, - u32 start, u32 end) + u32 start, u32 end, gfp_t gfp) { u32 cur; @@ -4054,7 +4056,7 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev, skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev, tp->RxDescArray + i, - tp->rx_buf_sz, tp->align); + tp->rx_buf_sz, tp->align, gfp); if (!skb) break; @@ -4082,7 +4084,7 @@ static int rtl8169_init_ring(struct net_device *dev) memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); - if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) + if (rtl8169_rx_fill(tp, dev, 0, NUM_RX_DESC, GFP_KERNEL) != NUM_RX_DESC) goto err_out; rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); @@ -4099,7 +4101,8 @@ static void rtl8169_unmap_tx_skb(struct pci_dev *pdev, struct ring_info *tx_skb, { unsigned int len = tx_skb->len; - pci_unmap_single(pdev, le64_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len, + PCI_DMA_TODEVICE); desc->opts1 = 0x00; desc->opts2 = 0x00; desc->addr = 0x00; @@ -4243,7 +4246,8 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, txd = tp->TxDescArray + entry; len = frag->size; addr = ((void *) page_address(frag->page)) + frag->page_offset; - mapping = pci_map_single(tp->pci_dev, addr, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&tp->pci_dev->dev, addr, len, + PCI_DMA_TODEVICE); /* anti gcc 2.95.3 bugware (sic) */ status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); @@ -4313,7 +4317,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->tx_skb[entry].skb = skb; } - mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len, + PCI_DMA_TODEVICE); tp->tx_skb[entry].len = len; txd->addr = cpu_to_le64(mapping); @@ -4477,8 +4482,8 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, if (!skb) goto out; - pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, pkt_size, + PCI_DMA_FROMDEVICE); skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); *sk_buff = skb; done = true; @@ -4549,11 +4554,11 @@ static int rtl8169_rx_interrupt(struct net_device *dev, rtl8169_rx_csum(skb, desc); if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) { - pci_dma_sync_single_for_device(pdev, addr, + dma_sync_single_for_device(&pdev->dev, addr, pkt_size, PCI_DMA_FROMDEVICE); rtl8169_mark_to_asic(desc, tp->rx_buf_sz); } else { - pci_unmap_single(pdev, addr, tp->rx_buf_sz, + dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz, PCI_DMA_FROMDEVICE); tp->Rx_skbuff[entry] = NULL; } @@ -4583,7 +4588,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev, count = cur_rx - tp->cur_rx; tp->cur_rx = cur_rx; - delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); + delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, GFP_ATOMIC); if (!delta && count) netif_info(tp, intr, dev, "no Rx buffer allocated\n"); tp->dirty_rx += delta; @@ -4625,8 +4630,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) } /* Work around for rx fifo overflow */ - if (unlikely(status & RxFIFOOver) && - (tp->mac_version == RTL_GIGA_MAC_VER_11)) { + if (unlikely(status & RxFIFOOver)) { netif_stop_queue(dev); rtl8169_tx_timeout(dev); break; @@ -4770,10 +4774,10 @@ static int rtl8169_close(struct net_device *dev) free_irq(dev->irq, dev); - pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, - tp->RxPhyAddr); - pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, - tp->TxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); tp->TxDescArray = NULL; tp->RxDescArray = NULL; diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c index 07eb884ff98..44150f2f7bf 100644 --- a/drivers/net/rionet.c +++ b/drivers/net/rionet.c @@ -384,7 +384,7 @@ static void rionet_remove(struct rio_dev *rdev) free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ? __ilog2(sizeof(void *)) + 4 : 0); unregister_netdev(ndev); - kfree(ndev); + free_netdev(ndev); list_for_each_entry_safe(peer, tmp, &rionet_peers, node) { list_del(&peer->node); diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c index cc4bd8c65f8..9265315baa0 100644 --- a/drivers/net/sgiseeq.c +++ b/drivers/net/sgiseeq.c @@ -804,7 +804,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev) err_out_free_page: free_page((unsigned long) sp->srings); err_out_free_dev: - kfree(dev); + free_netdev(dev); err_out: return err; diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 40e5c46e757..465ae7e8450 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -43,6 +43,7 @@ #include <linux/seq_file.h> #include <linux/mii.h> #include <linux/slab.h> +#include <linux/dmi.h> #include <asm/irq.h> #include "skge.h" @@ -3868,6 +3869,8 @@ static void __devinit skge_show_addr(struct net_device *dev) netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); } +static int only_32bit_dma; + static int __devinit skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -3889,7 +3892,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, pci_set_master(pdev); - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { @@ -4147,8 +4150,21 @@ static struct pci_driver skge_driver = { .shutdown = skge_shutdown, }; +static struct dmi_system_id skge_32bit_dma_boards[] = { + { + .ident = "Gigabyte nForce boards", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), + DMI_MATCH(DMI_BOARD_NAME, "nForce"), + }, + }, + {} +}; + static int __init skge_init_module(void) { + if (dmi_check_system(skge_32bit_dma_boards)) + only_32bit_dma = 1; skge_debug_init(); return pci_register_driver(&skge_driver); } diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 10cf0cbc218..726df611ee1 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c @@ -72,6 +72,7 @@ static const char version[] = #include <linux/sched.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/crc32.h> diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 0909ae934ad..8150ba15411 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -58,6 +58,7 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(SMSC_DRV_VERSION); +MODULE_ALIAS("platform:smsc911x"); #if USE_DEBUG > 0 static int debug = 16; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index bc3af78a869..1ec4b9e0239 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -4666,7 +4666,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) desc_idx, *post_ptr); drop_it_no_recycle: /* Other statistics kept track of by card. */ - tp->net_stats.rx_dropped++; + tp->rx_dropped++; goto next_pkt; } @@ -4726,7 +4726,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) if (len > (tp->dev->mtu + ETH_HLEN) && skb->protocol != htons(ETH_P_8021Q)) { dev_kfree_skb(skb); - goto next_pkt; + goto drop_it_no_recycle; } if (desc->type_flags & RXD_FLAG_VLAN && @@ -9240,6 +9240,8 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, stats->rx_missed_errors = old_stats->rx_missed_errors + get_stat64(&hw_stats->rx_discards); + stats->rx_dropped = tp->rx_dropped; + return stats; } diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 4937bd19096..be7ff138a7f 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h @@ -2759,7 +2759,7 @@ struct tg3 { /* begin "everything else" cacheline(s) section */ - struct rtnl_link_stats64 net_stats; + unsigned long rx_dropped; struct rtnl_link_stats64 net_stats_prev; struct tg3_ethtool_stats estats; struct tg3_ethtool_stats estats_prev; diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 5efa57757a2..6888e3d4146 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -243,6 +243,7 @@ enum { NWayState = (1 << 14) | (1 << 13) | (1 << 12), NWayRestart = (1 << 12), NonselPortActive = (1 << 9), + SelPortActive = (1 << 8), LinkFailStatus = (1 << 2), NetCxnErr = (1 << 1), }; @@ -363,7 +364,9 @@ static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, }; /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; -static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, }; +static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; +/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */ +static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; @@ -1064,6 +1067,9 @@ static void de21041_media_timer (unsigned long data) unsigned int carrier; unsigned long flags; + /* clear port active bits */ + dw32(SIAStatus, NonselPortActive | SelPortActive); + carrier = (status & NetCxnErr) ? 0 : 1; if (carrier) { @@ -1158,14 +1164,29 @@ no_link_yet: static void de_media_interrupt (struct de_private *de, u32 status) { if (status & LinkPass) { + /* Ignore if current media is AUI or BNC and we can't use TP */ + if ((de->media_type == DE_MEDIA_AUI || + de->media_type == DE_MEDIA_BNC) && + (de->media_lock || + !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))) + return; + /* If current media is not TP, change it to TP */ + if ((de->media_type == DE_MEDIA_AUI || + de->media_type == DE_MEDIA_BNC)) { + de->media_type = DE_MEDIA_TP_AUTO; + de_stop_rxtx(de); + de_set_media(de); + de_start_rxtx(de); + } de_link_up(de); mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); return; } BUG_ON(!(status & LinkFail)); - - if (netif_carrier_ok(de->dev)) { + /* Mark the link as down only if current media is TP */ + if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI && + de->media_type != DE_MEDIA_BNC) { de_link_down(de); mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); } @@ -1229,6 +1250,7 @@ static void de_adapter_sleep (struct de_private *de) if (de->de21040) return; + dw32(CSR13, 0); /* Reset phy */ pci_read_config_dword(de->pdev, PCIPM, &pmctl); pmctl |= PM_Sleep; pci_write_config_dword(de->pdev, PCIPM, pmctl); @@ -1574,12 +1596,15 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) return 0; /* nothing to change */ de_link_down(de); + mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); de_stop_rxtx(de); de->media_type = new_media; de->media_lock = media_lock; de->media_advertise = ecmd->advertising; de_set_media(de); + if (netif_running(de->dev)) + de_start_rxtx(de); return 0; } @@ -1911,8 +1936,14 @@ fill_defaults: for (i = 0; i < DE_MAX_MEDIA; i++) { if (de->media[i].csr13 == 0xffff) de->media[i].csr13 = t21041_csr13[i]; - if (de->media[i].csr14 == 0xffff) - de->media[i].csr14 = t21041_csr14[i]; + if (de->media[i].csr14 == 0xffff) { + /* autonegotiation is broken at least on some chip + revisions - rev. 0x21 works, 0x11 does not */ + if (de->pdev->revision < 0x20) + de->media[i].csr14 = t21041_csr14_brk[i]; + else + de->media[i].csr14 = t21041_csr14[i]; + } if (de->media[i].csr15 == 0xffff) de->media[i].csr15 = t21041_csr15[i]; } @@ -2158,6 +2189,8 @@ static int de_resume (struct pci_dev *pdev) dev_err(&dev->dev, "pci_enable_device failed in resume\n"); goto out; } + pci_set_master(pdev); + de_init_rings(de); de_init_hw(de); out_attach: netif_device_attach(dev); diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 6efca66b876..1cd752f9a6e 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1652,6 +1652,8 @@ static int hso_get_count(struct hso_serial *serial, struct uart_icount cnow; struct hso_tiocmget *tiocmget = serial->tiocmget; + memset(&icount, 0, sizeof(struct serial_icounter_struct)); + if (!tiocmget) return -ENOENT; spin_lock_irq(&serial->serial_lock); diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 04c6cd4333f..10bafd59f9c 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -575,7 +575,7 @@ static int cosa_probe(int base, int irq, int dma) /* Initialize the chardev data structures */ mutex_init(&chan->rlock); - init_MUTEX(&chan->wsem); + sema_init(&chan->wsem, 1); /* Register the network interface */ if (!(chan->netdev = alloc_hdlcdev(chan))) { diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c index b1aec3e1892..9c70b5fa3f5 100644 --- a/drivers/net/wimax/i2400m/debugfs.c +++ b/drivers/net/wimax/i2400m/debugfs.c @@ -119,6 +119,7 @@ const struct file_operations i2400m_rx_stats_fops = { .open = i2400m_stats_open, .read = i2400m_rx_stats_read, .write = i2400m_rx_stats_write, + .llseek = default_llseek, }; @@ -171,6 +172,7 @@ const struct file_operations i2400m_tx_stats_fops = { .open = i2400m_stats_open, .read = i2400m_tx_stats_read, .write = i2400m_tx_stats_write, + .llseek = default_llseek, }; diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 8cc9e319f43..1737d1488b3 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c @@ -1244,16 +1244,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) int i, result; struct device *dev = i2400m_dev(i2400m); const struct i2400m_msg_hdr *msg_hdr; - size_t pl_itr, pl_size, skb_len; + size_t pl_itr, pl_size; unsigned long flags; - unsigned num_pls, single_last; + unsigned num_pls, single_last, skb_len; skb_len = skb->len; - d_fnstart(4, dev, "(i2400m %p skb %p [size %zu])\n", + d_fnstart(4, dev, "(i2400m %p skb %p [size %u])\n", i2400m, skb, skb_len); result = -EIO; msg_hdr = (void *) skb->data; - result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb->len); + result = i2400m_rx_msg_hdr_check(i2400m, msg_hdr, skb_len); if (result < 0) goto error_msg_hdr_check; result = -EIO; @@ -1261,10 +1261,10 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) pl_itr = sizeof(*msg_hdr) + /* Check payload descriptor(s) */ num_pls * sizeof(msg_hdr->pld[0]); pl_itr = ALIGN(pl_itr, I2400M_PL_ALIGN); - if (pl_itr > skb->len) { /* got all the payload descriptors? */ + if (pl_itr > skb_len) { /* got all the payload descriptors? */ dev_err(dev, "RX: HW BUG? message too short (%u bytes) for " "%u payload descriptors (%zu each, total %zu)\n", - skb->len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); + skb_len, num_pls, sizeof(msg_hdr->pld[0]), pl_itr); goto error_pl_descr_short; } /* Walk each payload payload--check we really got it */ @@ -1272,7 +1272,7 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) /* work around old gcc warnings */ pl_size = i2400m_pld_size(&msg_hdr->pld[i]); result = i2400m_rx_pl_descr_check(i2400m, &msg_hdr->pld[i], - pl_itr, skb->len); + pl_itr, skb_len); if (result < 0) goto error_pl_descr_check; single_last = num_pls == 1 || i == num_pls - 1; @@ -1290,16 +1290,16 @@ int i2400m_rx(struct i2400m *i2400m, struct sk_buff *skb) if (i < i2400m->rx_pl_min) i2400m->rx_pl_min = i; i2400m->rx_num++; - i2400m->rx_size_acc += skb->len; - if (skb->len < i2400m->rx_size_min) - i2400m->rx_size_min = skb->len; - if (skb->len > i2400m->rx_size_max) - i2400m->rx_size_max = skb->len; + i2400m->rx_size_acc += skb_len; + if (skb_len < i2400m->rx_size_min) + i2400m->rx_size_min = skb_len; + if (skb_len > i2400m->rx_size_max) + i2400m->rx_size_max = skb_len; spin_unlock_irqrestore(&i2400m->rx_lock, flags); error_pl_descr_check: error_pl_descr_short: error_msg_hdr_check: - d_fnend(4, dev, "(i2400m %p skb %p [size %zu]) = %d\n", + d_fnend(4, dev, "(i2400m %p skb %p [size %u]) = %d\n", i2400m, skb, skb_len, result); return result; } diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 1d05445d4ba..ce77575e88b 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -4430,21 +4430,24 @@ static const struct file_operations proc_statsdelta_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_statsdelta_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_stats_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_stats_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_status_ops = { .owner = THIS_MODULE, .read = proc_read, .open = proc_status_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_SSID_ops = { @@ -4452,7 +4455,8 @@ static const struct file_operations proc_SSID_ops = { .read = proc_read, .write = proc_write, .open = proc_SSID_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_BSSList_ops = { @@ -4460,7 +4464,8 @@ static const struct file_operations proc_BSSList_ops = { .read = proc_read, .write = proc_write, .open = proc_BSSList_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_APList_ops = { @@ -4468,7 +4473,8 @@ static const struct file_operations proc_APList_ops = { .read = proc_read, .write = proc_write, .open = proc_APList_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_config_ops = { @@ -4476,7 +4482,8 @@ static const struct file_operations proc_config_ops = { .read = proc_read, .write = proc_write, .open = proc_config_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static const struct file_operations proc_wepkey_ops = { @@ -4484,7 +4491,8 @@ static const struct file_operations proc_wepkey_ops = { .read = proc_read, .write = proc_write, .open = proc_wepkey_open, - .release = proc_close + .release = proc_close, + .llseek = default_llseek, }; static struct proc_dir_entry *airo_entry; diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c index 9a121a5b787..df2484d4547 100644 --- a/drivers/net/wireless/airo_cs.c +++ b/drivers/net/wireless/airo_cs.c @@ -32,7 +32,6 @@ #include <linux/timer.h> #include <linux/netdevice.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -54,58 +53,21 @@ MODULE_SUPPORTED_DEVICE("Aironet 4500, 4800 and Cisco 340 PCMCIA cards"); /*====================================================================*/ -/* - The event() function is this driver's Card Services event handler. - It will be called by Card Services when an appropriate card status - event is received. The config() and release() entry points are - used to configure or release a socket, in response to card - insertion and ejection events. They are invoked from the airo_cs - event handler. -*/ - static int airo_config(struct pcmcia_device *link); static void airo_release(struct pcmcia_device *link); -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static void airo_detach(struct pcmcia_device *p_dev); typedef struct local_info_t { struct net_device *eth_dev; } local_info_t; -/*====================================================================== - - airo_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - - ======================================================================*/ - static int airo_probe(struct pcmcia_device *p_dev) { local_info_t *local; dev_dbg(&p_dev->dev, "airo_attach()\n"); - /* - General socket configuration defaults can go here. In this - client, we assume very little, and rely on the CIS for almost - everything. In most clients, many details (i.e., number, sizes, - and attributes of IO windows) are fixed by the nature of the - device, and can be hard-wired here. - */ - p_dev->conf.Attributes = 0; - p_dev->conf.IntType = INT_MEMORY_AND_IO; - /* Allocate space for private device-specific data */ local = kzalloc(sizeof(local_info_t), GFP_KERNEL); if (!local) { @@ -117,15 +79,6 @@ static int airo_probe(struct pcmcia_device *p_dev) return airo_config(p_dev); } /* airo_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - - ======================================================================*/ - static void airo_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "airo_detach\n"); @@ -140,60 +93,12 @@ static void airo_detach(struct pcmcia_device *link) kfree(link->priv); } /* airo_detach */ -/*====================================================================== - - airo_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. - - ======================================================================*/ - -static int airo_cs_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int airo_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { - if (cfg->index == 0) - return -ENODEV; - - /* Does this card need audio output? */ - if (cfg->flags & CISTPL_CFTABLE_AUDIO) { - p_dev->conf.Attributes |= CONF_ENABLE_SPKR; - p_dev->conf.Status = CCSR_AUDIO_ENA; - } - - /* Use power settings for Vcc and Vpp if present */ - /* Note that the CIS values need to be rescaled */ - if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM]/10000; - else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; - - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - } + if (p_dev->config_index == 0) + return -EINVAL; - /* This reserves IO space but doesn't actually enable it */ - if (pcmcia_request_io(p_dev) != 0) - return -ENODEV; - - /* If we got this far, we're cool! */ - return 0; + return pcmcia_request_io(p_dev); } @@ -206,20 +111,9 @@ static int airo_config(struct pcmcia_device *link) dev_dbg(&link->dev, "airo_config\n"); - /* - * In this loop, we scan the CIS for configuration table - * entries, each of which describes a valid card - * configuration, including voltage, IO window, memory window, - * and interrupt settings. - * - * We make no assumptions about the card to be configured: we - * use just the information available in the CIS. In an ideal - * world, this would work for any PCMCIA card, but it requires - * a complete and accurate CIS. In practice, a driver usually - * "knows" most of these things without consulting the CIS, - * and most client drivers will only use the CIS to fill in - * implementation-defined details. - */ + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | + CONF_AUTO_AUDIO | CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, airo_cs_config_check, NULL); if (ret) goto failed; @@ -227,12 +121,7 @@ static int airo_config(struct pcmcia_device *link) if (!link->irq) goto failed; - /* - This actually configures the PCMCIA socket -- setting up - the I/O windows and the interrupt mapping, and putting the - card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; ((local_info_t *)link->priv)->eth_dev = @@ -241,17 +130,6 @@ static int airo_config(struct pcmcia_device *link) if (!((local_info_t *)link->priv)->eth_dev) goto failed; - /* Finally, report what we've done */ - dev_info(&link->dev, "index 0x%02x: ", - link->conf.ConfigIndex); - if (link->conf.Vpp) - printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); - printk(", irq %d", link->irq); - if (link->resource[0]) - printk(" & %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - printk("\n"); return 0; failed: @@ -259,14 +137,6 @@ static int airo_config(struct pcmcia_device *link) return -ENODEV; } /* airo_config */ -/*====================================================================== - - After a card is removed, airo_release() will unregister the - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - - ======================================================================*/ - static void airo_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "airo_release\n"); @@ -305,9 +175,7 @@ MODULE_DEVICE_TABLE(pcmcia, airo_ids); static struct pcmcia_driver airo_driver = { .owner = THIS_MODULE, - .drv = { - .name = "airo_cs", - }, + .name = "airo_cs", .probe = airo_probe, .remove = airo_detach, .id_table = airo_ids, @@ -315,12 +183,12 @@ static struct pcmcia_driver airo_driver = { .resume = airo_resume, }; -static int airo_cs_init(void) +static int __init airo_cs_init(void) { return pcmcia_register_driver(&airo_driver); } -static void airo_cs_cleanup(void) +static void __exit airo_cs_cleanup(void) { pcmcia_unregister_driver(&airo_driver); } diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index 4cccc29964f..fb339c3852e 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -271,6 +271,7 @@ static const struct file_operations fops_beacon = { .write = write_file_beacon, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -290,6 +291,7 @@ static const struct file_operations fops_reset = { .write = write_file_reset, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = noop_llseek, }; @@ -369,6 +371,7 @@ static const struct file_operations fops_debug = { .write = write_file_debug, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -480,6 +483,7 @@ static const struct file_operations fops_antenna = { .write = write_file_antenna, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -591,6 +595,7 @@ static const struct file_operations fops_frameerrors = { .write = write_file_frameerrors, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -748,6 +753,7 @@ static const struct file_operations fops_ani = { .write = write_file_ani, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -811,6 +817,7 @@ static const struct file_operations fops_queue = { .write = write_file_queue, .open = ath5k_debugfs_open, .owner = THIS_MODULE, + .llseek = default_llseek, }; diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index cc648b6ae31..a3d95cca8f0 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -543,7 +543,7 @@ static u8 ath9k_hw_chan_2_clockrate_mhz(struct ath_hw *ah) if (conf_is_ht40(conf)) return clockrate * 2; - return clockrate * 2; + return clockrate; } static int32_t ath9k_hw_ani_get_listen_time(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index 54aae931424..cf500bf25ad 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -71,7 +71,8 @@ static const struct file_operations fops_debug = { .read = read_file_debug, .write = write_file_debug, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; #endif @@ -116,7 +117,8 @@ static const struct file_operations fops_tx_chainmask = { .read = read_file_tx_chainmask, .write = write_file_tx_chainmask, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -158,7 +160,8 @@ static const struct file_operations fops_rx_chainmask = { .read = read_file_rx_chainmask, .write = write_file_rx_chainmask, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -259,7 +262,8 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf, static const struct file_operations fops_dma = { .read = read_file_dma, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; @@ -375,7 +379,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf, static const struct file_operations fops_interrupt = { .read = read_file_interrupt, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; void ath_debug_stat_rc(struct ath_softc *sc, int final_rate) @@ -464,7 +469,8 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf, static const struct file_operations fops_rcstat = { .read = read_file_rcstat, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static const char * ath_wiphy_state_str(enum ath_wiphy_state state) @@ -623,7 +629,8 @@ static const struct file_operations fops_wiphy = { .read = read_file_wiphy, .write = write_file_wiphy, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; #define PR(str, elem) \ @@ -702,7 +709,8 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, static const struct file_operations fops_xmit = { .read = read_file_xmit, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static ssize_t read_file_recv(struct file *file, char __user *user_buf, @@ -814,7 +822,8 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) static const struct file_operations fops_recv = { .read = read_file_recv, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static ssize_t read_file_regidx(struct file *file, char __user *user_buf, @@ -852,7 +861,8 @@ static const struct file_operations fops_regidx = { .read = read_file_regidx, .write = write_file_regidx, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static ssize_t read_file_regval(struct file *file, char __user *user_buf, @@ -894,7 +904,8 @@ static const struct file_operations fops_regval = { .read = read_file_regval, .write = write_file_regval, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; int ath9k_init_debug(struct ath_hw *ah) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 7d09b4b17bb..bc2ca7d898e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -536,7 +536,8 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf, static const struct file_operations fops_tgt_stats = { .read = read_file_tgt_stats, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static ssize_t read_file_xmit(struct file *file, char __user *user_buf, @@ -584,7 +585,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf, static const struct file_operations fops_xmit = { .read = read_file_xmit, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; static ssize_t read_file_recv(struct file *file, char __user *user_buf, @@ -613,7 +615,8 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf, static const struct file_operations fops_recv = { .read = read_file_recv, .open = ath9k_debugfs_open, - .owner = THIS_MODULE + .owner = THIS_MODULE, + .llseek = default_llseek, }; int ath9k_htc_init_debug(struct ath_hw *ah) diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c index 3b632161c10..c96e19da294 100644 --- a/drivers/net/wireless/atmel_cs.c +++ b/drivers/net/wireless/atmel_cs.c @@ -42,7 +42,6 @@ #include <linux/moduleparam.h> #include <linux/device.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -64,58 +63,21 @@ MODULE_SUPPORTED_DEVICE("Atmel at76c50x PCMCIA cards"); /*====================================================================*/ -/* - The event() function is this driver's Card Services event handler. - It will be called by Card Services when an appropriate card status - event is received. The config() and release() entry points are - used to configure or release a socket, in response to card - insertion and ejection events. They are invoked from the atmel_cs - event handler. -*/ - static int atmel_config(struct pcmcia_device *link); static void atmel_release(struct pcmcia_device *link); -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static void atmel_detach(struct pcmcia_device *p_dev); typedef struct local_info_t { struct net_device *eth_dev; } local_info_t; -/*====================================================================== - - atmel_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - - ======================================================================*/ - static int atmel_probe(struct pcmcia_device *p_dev) { local_info_t *local; dev_dbg(&p_dev->dev, "atmel_attach()\n"); - /* - General socket configuration defaults can go here. In this - client, we assume very little, and rely on the CIS for almost - everything. In most clients, many details (i.e., number, sizes, - and attributes of IO windows) are fixed by the nature of the - device, and can be hard-wired here. - */ - p_dev->conf.Attributes = 0; - p_dev->conf.IntType = INT_MEMORY_AND_IO; - /* Allocate space for private device-specific data */ local = kzalloc(sizeof(local_info_t), GFP_KERNEL); if (!local) { @@ -127,15 +89,6 @@ static int atmel_probe(struct pcmcia_device *p_dev) return atmel_config(p_dev); } /* atmel_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - - ======================================================================*/ - static void atmel_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "atmel_detach\n"); @@ -145,14 +98,6 @@ static void atmel_detach(struct pcmcia_device *link) kfree(link->priv); } -/*====================================================================== - - atmel_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. - - ======================================================================*/ - /* Call-back function to interrogate PCMCIA-specific information about the current existance of the card */ static int card_present(void *arg) @@ -165,47 +110,11 @@ static int card_present(void *arg) return 0; } -static int atmel_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data) { - if (cfg->index == 0) - return -ENODEV; - - /* Does this card need audio output? */ - if (cfg->flags & CISTPL_CFTABLE_AUDIO) { - p_dev->conf.Attributes |= CONF_ENABLE_SPKR; - p_dev->conf.Status = CCSR_AUDIO_ENA; - } + if (p_dev->config_index == 0) + return -EINVAL; - /* Use power settings for Vcc and Vpp if present */ - /* Note that the CIS values need to be rescaled */ - if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM]/10000; - else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; - - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - } - - /* This reserves IO space but doesn't actually enable it */ return pcmcia_request_io(p_dev); } @@ -220,18 +129,9 @@ static int atmel_config(struct pcmcia_device *link) dev_dbg(&link->dev, "atmel_config\n"); - /* - In this loop, we scan the CIS for configuration table entries, - each of which describes a valid card configuration, including - voltage, IO window, memory window, and interrupt settings. - - We make no assumptions about the card to be configured: we use - just the information available in the CIS. In an ideal world, - this would work for any PCMCIA card, but it requires a complete - and accurate CIS. In practice, a driver usually "knows" most of - these things without consulting the CIS, and most client drivers - will only use the CIS to fill in implementation-defined details. - */ + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | + CONF_AUTO_AUDIO | CONF_AUTO_SET_IO; + if (pcmcia_loop_config(link, atmel_config_check, NULL)) goto failed; @@ -240,12 +140,7 @@ static int atmel_config(struct pcmcia_device *link) goto failed; } - /* - This actually configures the PCMCIA socket -- setting up - the I/O windows and the interrupt mapping, and putting the - card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -267,14 +162,6 @@ static int atmel_config(struct pcmcia_device *link) return -ENODEV; } -/*====================================================================== - - After a card is removed, atmel_release() will unregister the - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - - ======================================================================*/ - static void atmel_release(struct pcmcia_device *link) { struct net_device *dev = ((local_info_t*)link->priv)->eth_dev; @@ -353,9 +240,7 @@ MODULE_DEVICE_TABLE(pcmcia, atmel_ids); static struct pcmcia_driver atmel_driver = { .owner = THIS_MODULE, - .drv = { - .name = "atmel_cs", - }, + .name = "atmel_cs", .probe = atmel_probe, .remove = atmel_detach, .id_table = atmel_ids, @@ -363,12 +248,12 @@ static struct pcmcia_driver atmel_driver = { .resume = atmel_resume, }; -static int atmel_cs_init(void) +static int __init atmel_cs_init(void) { return pcmcia_register_driver(&atmel_driver); } -static void atmel_cs_cleanup(void) +static void __exit atmel_cs_cleanup(void) { pcmcia_unregister_driver(&atmel_driver); } diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c index 80b19a44a40..59f59fa4033 100644 --- a/drivers/net/wireless/b43/debugfs.c +++ b/drivers/net/wireless/b43/debugfs.c @@ -627,6 +627,7 @@ out_unlock: .open = b43_debugfs_open, \ .read = b43_debugfs_read, \ .write = b43_debugfs_write, \ + .llseek = generic_file_llseek, \ }, \ .file_struct_offset = offsetof(struct b43_dfsentry, \ file_##name), \ diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c index dfbc41d431f..7dcba5fafdc 100644 --- a/drivers/net/wireless/b43/pcmcia.c +++ b/drivers/net/wireless/b43/pcmcia.c @@ -26,7 +26,6 @@ #include <linux/ssb/ssb.h> #include <linux/slab.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> @@ -63,7 +62,6 @@ static int b43_pcmcia_resume(struct pcmcia_device *dev) static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev) { struct ssb_bus *ssb; - win_req_t win; int err = -ENOMEM; int res = 0; @@ -73,30 +71,28 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev) err = -ENODEV; - dev->conf.Attributes = CONF_ENABLE_IRQ; - dev->conf.IntType = INT_MEMORY_AND_IO; + dev->config_flags |= CONF_ENABLE_IRQ; - win.Attributes = WIN_ENABLE | WIN_DATA_WIDTH_16 | + dev->resource[2]->flags |= WIN_ENABLE | WIN_DATA_WIDTH_16 | WIN_USE_WAIT; - win.Base = 0; - win.Size = SSB_CORE_SIZE; - win.AccessSpeed = 250; - res = pcmcia_request_window(dev, &win, &dev->win); + dev->resource[2]->start = 0; + dev->resource[2]->end = SSB_CORE_SIZE; + res = pcmcia_request_window(dev, dev->resource[2], 250); if (res != 0) goto err_kfree_ssb; - res = pcmcia_map_mem_page(dev, dev->win, 0); + res = pcmcia_map_mem_page(dev, dev->resource[2], 0); if (res != 0) goto err_disable; if (!dev->irq) goto err_disable; - res = pcmcia_request_configuration(dev, &dev->conf); + res = pcmcia_enable_device(dev); if (res != 0) goto err_disable; - err = ssb_bus_pcmciabus_register(ssb, dev, win.Base); + err = ssb_bus_pcmciabus_register(ssb, dev, dev->resource[2]->start); if (err) goto err_disable; dev->priv = ssb; @@ -125,9 +121,7 @@ static void __devexit b43_pcmcia_remove(struct pcmcia_device *dev) static struct pcmcia_driver b43_pcmcia_driver = { .owner = THIS_MODULE, - .drv = { - .name = "b43-pcmcia", - }, + .name = "b43-pcmcia", .id_table = b43_pcmcia_tbl, .probe = b43_pcmcia_probe, .remove = __devexit_p(b43_pcmcia_remove), diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c index 1f85ac569fe..f232618f2cd 100644 --- a/drivers/net/wireless/b43legacy/debugfs.c +++ b/drivers/net/wireless/b43legacy/debugfs.c @@ -334,6 +334,7 @@ out_unlock: .open = b43legacy_debugfs_open, \ .read = b43legacy_debugfs_read, \ .write = b43legacy_debugfs_write, \ + .llseek = generic_file_llseek, \ }, \ .file_struct_offset = offsetof(struct b43legacy_dfsentry, \ file_##name), \ diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c index ba54d1b04d2..bd8a4134ede 100644 --- a/drivers/net/wireless/hostap/hostap_cs.c +++ b/drivers/net/wireless/hostap/hostap_cs.c @@ -12,7 +12,6 @@ #include <linux/wireless.h> #include <net/iw_handler.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -437,7 +436,6 @@ static int hostap_cs_probe(struct pcmcia_device *p_dev) int ret; PDEBUG(DEBUG_HW, "%s: setting Vcc=33 (constant)\n", dev_info); - p_dev->conf.IntType = INT_MEMORY_AND_IO; ret = prism2_config(p_dev); if (ret) { @@ -468,74 +466,11 @@ static void prism2_detach(struct pcmcia_device *link) } -/* run after a CARD_INSERTION event is received to configure the PCMCIA - * socket and make the device available to the system */ - -static int prism2_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int prism2_config_check(struct pcmcia_device *p_dev, void *priv_data) { - if (cfg->index == 0) - return -ENODEV; - - PDEBUG(DEBUG_EXTRA, "Checking CFTABLE_ENTRY 0x%02X " - "(default 0x%02X)\n", cfg->index, dflt->index); - - /* Does this card need audio output? */ - if (cfg->flags & CISTPL_CFTABLE_AUDIO) { - p_dev->conf.Attributes |= CONF_ENABLE_SPKR; - p_dev->conf.Status = CCSR_AUDIO_ENA; - } - - /* Use power settings for Vcc and Vpp if present */ - /* Note that the CIS values need to be rescaled */ - if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / - 10000 && !ignore_cis_vcc) { - PDEBUG(DEBUG_EXTRA, " Vcc mismatch - skipping" - " this entry\n"); - return -ENODEV; - } - } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / - 10000 && !ignore_cis_vcc) { - PDEBUG(DEBUG_EXTRA, " Vcc (default) mismatch " - "- skipping this entry\n"); - return -ENODEV; - } - } + if (p_dev->config_index == 0) + return -EINVAL; - if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; - else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; - - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d " - "dflt->io.nwin=%d\n", - cfg->io.nwin, dflt->io.nwin); - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - } - - /* This reserves IO space but doesn't actually enable it */ return pcmcia_request_io(p_dev); } @@ -557,6 +492,10 @@ static int prism2_config(struct pcmcia_device *link) } /* Look for an appropriate configuration table entry in the CIS */ + link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | + CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_IO | CONF_ENABLE_IRQ; + if (ignore_cis_vcc) + link->config_flags &= ~CONF_AUTO_CHECK_VCC; ret = pcmcia_loop_config(link, prism2_config_check, NULL); if (ret) { if (!ignore_cis_vcc) @@ -588,12 +527,7 @@ static int prism2_config(struct pcmcia_device *link) if (ret) goto failed_unlock; - /* - * This actually configures the PCMCIA socket -- setting up - * the I/O windows and the interrupt mapping, and putting the - * card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed_unlock; @@ -602,20 +536,6 @@ static int prism2_config(struct pcmcia_device *link) spin_unlock_irqrestore(&local->irq_init_lock, flags); - /* Finally, report what we've done */ - printk(KERN_INFO "%s: index 0x%02x: ", - dev_info, link->conf.ConfigIndex); - if (link->conf.Vpp) - printk(", Vpp %d.%d", link->conf.Vpp / 10, - link->conf.Vpp % 10); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %d", link->irq); - if (link->resource[0]) - printk(" & %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - printk("\n"); - local->shutdown = 0; sandisk_enable_wireless(dev); @@ -627,7 +547,7 @@ static int prism2_config(struct pcmcia_device *link) return ret; failed_unlock: - spin_unlock_irqrestore(&local->irq_init_lock, flags); + spin_unlock_irqrestore(&local->irq_init_lock, flags); failed: kfree(hw_priv); prism2_release((u_long)link); @@ -779,9 +699,7 @@ MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids); static struct pcmcia_driver hostap_driver = { - .drv = { - .name = "hostap_cs", - }, + .name = "hostap_cs", .probe = hostap_cs_probe, .remove = prism2_detach, .owner = THIS_MODULE, diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c index 8e84a08ff95..293e1dbc166 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c @@ -873,6 +873,7 @@ static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file, static const struct file_operations rs_sta_dbgfs_stats_table_ops = { .read = iwl3945_sta_dbgfs_stats_table_read, .open = iwl3945_open_file_generic, + .llseek = default_llseek, }; static void iwl3945_add_debugfs(void *priv, void *priv_sta, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 9dd9e64c2b0..8fd00a6e512 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -1411,7 +1411,7 @@ void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) clear_bit(STATUS_SCAN_HW, &priv->status); clear_bit(STATUS_SCANNING, &priv->status); /* inform mac80211 scan aborted */ - queue_work(priv->workqueue, &priv->scan_completed); + queue_work(priv->workqueue, &priv->abort_scan); } int iwlagn_manage_ibss_station(struct iwl_priv *priv, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 23e5c42e7d7..a4378ba31ef 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -2873,6 +2873,7 @@ static const struct file_operations rs_sta_dbgfs_scale_table_ops = { .write = rs_sta_dbgfs_scale_table_write, .read = rs_sta_dbgfs_scale_table_read, .open = open_file_generic, + .llseek = default_llseek, }; static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -2915,6 +2916,7 @@ static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file, static const struct file_operations rs_sta_dbgfs_stats_table_ops = { .read = rs_sta_dbgfs_stats_table_read, .open = open_file_generic, + .llseek = default_llseek, }; static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, @@ -2946,6 +2948,7 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = { .read = rs_sta_dbgfs_rate_scale_data_read, .open = open_file_generic, + .llseek = default_llseek, }; static void rs_add_debugfs(void *priv, void *priv_sta, diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c index 07dbc279644..e23c4060a0f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.c +++ b/drivers/net/wireless/iwlwifi/iwl-core.c @@ -2613,6 +2613,11 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external) if (test_bit(STATUS_EXIT_PENDING, &priv->status)) return -EINVAL; + if (test_bit(STATUS_SCANNING, &priv->status)) { + IWL_DEBUG_INFO(priv, "scan in progress.\n"); + return -EINVAL; + } + if (mode >= IWL_MAX_FORCE_RESET) { IWL_DEBUG_INFO(priv, "invalid reset request.\n"); return -EINVAL; diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c index e96a1bb1278..a32d5d33764 100644 --- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c +++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c @@ -87,6 +87,7 @@ static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file) static const struct file_operations iwl_dbgfs_##name##_ops = { \ .read = iwl_dbgfs_##name##_read, \ .open = iwl_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ }; #define DEBUGFS_WRITE_FILE_OPS(name) \ @@ -94,6 +95,7 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .open = iwl_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ }; @@ -104,6 +106,7 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \ .write = iwl_dbgfs_##name##_write, \ .read = iwl_dbgfs_##name##_read, \ .open = iwl_dbgfs_open_file_generic, \ + .llseek = generic_file_llseek, \ }; static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file, diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 59a308b02f9..d31661c1ce7 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -3018,7 +3018,7 @@ void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) clear_bit(STATUS_SCANNING, &priv->status); /* inform mac80211 scan aborted */ - queue_work(priv->workqueue, &priv->scan_completed); + queue_work(priv->workqueue, &priv->abort_scan); } static void iwl3945_bg_restart(struct work_struct *data) diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c index 53b0b7711f0..0a0cc9667cd 100644 --- a/drivers/net/wireless/iwmc3200wifi/debugfs.c +++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c @@ -402,24 +402,28 @@ static const struct file_operations iwm_debugfs_txq_fops = { .owner = THIS_MODULE, .open = iwm_generic_open, .read = iwm_debugfs_txq_read, + .llseek = default_llseek, }; static const struct file_operations iwm_debugfs_tx_credit_fops = { .owner = THIS_MODULE, .open = iwm_generic_open, .read = iwm_debugfs_tx_credit_read, + .llseek = default_llseek, }; static const struct file_operations iwm_debugfs_rx_ticket_fops = { .owner = THIS_MODULE, .open = iwm_generic_open, .read = iwm_debugfs_rx_ticket_read, + .llseek = default_llseek, }; static const struct file_operations iwm_debugfs_fw_err_fops = { .owner = THIS_MODULE, .open = iwm_generic_open, .read = iwm_debugfs_fw_err_read, + .llseek = default_llseek, }; void iwm_debugfs_init(struct iwm_priv *iwm) diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c index edcb52330cf..56383e7be83 100644 --- a/drivers/net/wireless/iwmc3200wifi/sdio.c +++ b/drivers/net/wireless/iwmc3200wifi/sdio.c @@ -364,6 +364,7 @@ static const struct file_operations iwm_debugfs_sdio_fops = { .owner = THIS_MODULE, .open = iwm_debugfs_sdio_open, .read = iwm_debugfs_sdio_read, + .llseek = default_llseek, }; static void if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir) diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c index 651a79c8de8..fbf3b0332bb 100644 --- a/drivers/net/wireless/libertas/debugfs.c +++ b/drivers/net/wireless/libertas/debugfs.c @@ -696,6 +696,7 @@ out_unlock: .open = open_file_generic, \ .read = (fread), \ .write = (fwrite), \ + .llseek = generic_file_llseek, \ } struct lbs_debugfs_files { @@ -961,6 +962,7 @@ static const struct file_operations lbs_debug_fops = { .open = open_file_generic, .write = lbs_debugfs_write, .read = lbs_debugfs_read, + .llseek = default_llseek, }; /** diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c index 9c298396be5..ff1280f4133 100644 --- a/drivers/net/wireless/libertas/if_cs.c +++ b/drivers/net/wireless/libertas/if_cs.c @@ -28,7 +28,6 @@ #include <linux/firmware.h> #include <linux/netdevice.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -761,15 +760,6 @@ static int if_cs_host_to_card(struct lbs_private *priv, } -/********************************************************************/ -/* Card Services */ -/********************************************************************/ - -/* - * After a card is removed, if_cs_release() will unregister the - * device, and release the PCMCIA configuration. If the device is - * still open, this will be postponed until it is closed. - */ static void if_cs_release(struct pcmcia_device *p_dev) { struct if_cs_card *card = p_dev->priv; @@ -785,31 +775,12 @@ static void if_cs_release(struct pcmcia_device *p_dev) } -/* - * This creates an "instance" of the driver, allocating local data - * structures for one device. The device is registered with Card - * Services. - * - * The dev_link structure is initialized, but we don't actually - * configure the card at this point -- we wait until we receive a card - * insertion event. - */ - -static int if_cs_ioprobe(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int if_cs_ioprobe(struct pcmcia_device *p_dev, void *priv_data) { + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - p_dev->resource[0]->start = cfg->io.win[0].base; - p_dev->resource[0]->end = cfg->io.win[0].len; - - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - /* IO window settings */ - if (cfg->io.nwin != 1) { + if (p_dev->resource[1]->end) { lbs_pr_err("wrong CIS (check number of IO windows)\n"); return -ENODEV; } @@ -835,15 +806,13 @@ static int if_cs_probe(struct pcmcia_device *p_dev) card->p_dev = p_dev; p_dev->priv = card; - p_dev->conf.Attributes = 0; - p_dev->conf.IntType = INT_MEMORY_AND_IO; + p_dev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; if (pcmcia_loop_config(p_dev, if_cs_ioprobe, NULL)) { lbs_pr_err("error in pcmcia_loop_config\n"); goto out1; } - /* * Allocate an interrupt line. Note that this does not assign * a handler to the interrupt, unless the 'Handler' member of @@ -861,14 +830,9 @@ static int if_cs_probe(struct pcmcia_device *p_dev) goto out1; } - /* - * This actually configures the PCMCIA socket -- setting up - * the I/O windows and the interrupt mapping, and putting the - * card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(p_dev, &p_dev->conf); + ret = pcmcia_enable_device(p_dev); if (ret) { - lbs_pr_err("error in pcmcia_request_configuration\n"); + lbs_pr_err("error in pcmcia_enable_device\n"); goto out2; } @@ -962,12 +926,6 @@ out: } -/* - * This deletes a driver "instance". The device is de-registered with - * Card Services. If it has been released, all local data structures - * are freed. Otherwise, the structures will be freed when the device - * is released. - */ static void if_cs_detach(struct pcmcia_device *p_dev) { struct if_cs_card *card = p_dev->priv; @@ -1000,9 +958,7 @@ MODULE_DEVICE_TABLE(pcmcia, if_cs_ids); static struct pcmcia_driver lbs_driver = { .owner = THIS_MODULE, - .drv = { - .name = DRV_NAME, - }, + .name = DRV_NAME, .probe = if_cs_probe, .remove = if_cs_detach, .id_table = if_cs_ids, diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c index ef46a2d8853..71b3d68b940 100644 --- a/drivers/net/wireless/orinoco/orinoco_cs.c +++ b/drivers/net/wireless/orinoco/orinoco_cs.c @@ -17,7 +17,6 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -93,14 +92,6 @@ orinoco_cs_hard_reset(struct orinoco_private *priv) /* PCMCIA stuff */ /********************************************************************/ -/* - * This creates an "instance" of the driver, allocating local data - * structures for one device. The device is registered with Card - * Services. - * - * The dev_link structure is initialized, but we don't actually - * configure the card at this point -- we wait until we receive a card - * insertion event. */ static int orinoco_cs_probe(struct pcmcia_device *link) { @@ -117,23 +108,9 @@ orinoco_cs_probe(struct pcmcia_device *link) card->p_dev = link; link->priv = priv; - /* General socket configuration defaults can go here. In this - * client, we assume very little, and rely on the CIS for - * almost everything. In most clients, many details (i.e., - * number, sizes, and attributes of IO windows) are fixed by - * the nature of the device, and can be hard-wired here. */ - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - return orinoco_cs_config(link); } /* orinoco_cs_attach */ -/* - * This deletes a driver "instance". The device is de-registered with - * Card Services. If it has been released, all local data structures - * are freed. Otherwise, the structures will be freed when the device - * is released. - */ static void orinoco_cs_detach(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; @@ -145,76 +122,12 @@ static void orinoco_cs_detach(struct pcmcia_device *link) free_orinocodev(priv); } /* orinoco_cs_detach */ -/* - * orinoco_cs_config() is scheduled to run after a CARD_INSERTION - * event is received, to configure the PCMCIA socket, and to make the - * device available to the system. - */ - -static int orinoco_cs_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int orinoco_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { - if (cfg->index == 0) - goto next_entry; - - /* Use power settings for Vcc and Vpp if present */ - /* Note that the CIS values need to be rescaled */ - if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) { - DEBUG(2, "%s: Vcc mismatch (vcc = %d, CIS = %d)\n", - __func__, vcc, - cfg->vcc.param[CISTPL_POWER_VNOM] / 10000); - if (!ignore_cis_vcc) - goto next_entry; - } - } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000) { - DEBUG(2, "%s: Vcc mismatch (vcc = %d, CIS = %d)\n", - __func__, vcc, - dflt->vcc.param[CISTPL_POWER_VNOM] / 10000); - if (!ignore_cis_vcc) - goto next_entry; - } - } + if (p_dev->config_index == 0) + return -EINVAL; - if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = - cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; - else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = - dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; - - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - - /* This reserves IO space but doesn't actually enable it */ - if (pcmcia_request_io(p_dev) != 0) - goto next_entry; - } - return 0; - -next_entry: - pcmcia_disable_device(p_dev); - return -ENODEV; + return pcmcia_request_io(p_dev); }; static int @@ -225,20 +138,10 @@ orinoco_cs_config(struct pcmcia_device *link) int ret; void __iomem *mem; - /* - * In this loop, we scan the CIS for configuration table - * entries, each of which describes a valid card - * configuration, including voltage, IO window, memory window, - * and interrupt settings. - * - * We make no assumptions about the card to be configured: we - * use just the information available in the CIS. In an ideal - * world, this would work for any PCMCIA card, but it requires - * a complete and accurate CIS. In practice, a driver usually - * "knows" most of these things without consulting the CIS, - * and most client drivers will only use the CIS to fill in - * implementation-defined details. - */ + link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC | + CONF_AUTO_SET_IO | CONF_ENABLE_IRQ; + if (ignore_cis_vcc) + link->config_flags &= ~CONF_AUTO_CHECK_VCC; ret = pcmcia_loop_config(link, orinoco_cs_config_check, NULL); if (ret) { if (!ignore_cis_vcc) @@ -262,12 +165,7 @@ orinoco_cs_config(struct pcmcia_device *link) hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); - /* - * This actually configures the PCMCIA socket -- setting up - * the I/O windows and the interrupt mapping, and putting the - * card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -291,11 +189,6 @@ orinoco_cs_config(struct pcmcia_device *link) return -ENODEV; } /* orinoco_cs_config */ -/* - * After a card is removed, orinoco_cs_release() will unregister the - * device, and release the PCMCIA configuration. If the device is - * still open, this will be postponed until it is closed. - */ static void orinoco_cs_release(struct pcmcia_device *link) { @@ -344,12 +237,6 @@ static int orinoco_cs_resume(struct pcmcia_device *link) /* Module initialization */ /********************************************************************/ -/* Can't be declared "const" or the whole __initdata section will - * become const */ -static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION - " (David Gibson <hermes@gibson.dropbear.id.au>, " - "Pavel Roskin <proski@gnu.org>, et al)"; - static struct pcmcia_device_id orinoco_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */ PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */ @@ -441,9 +328,7 @@ MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids); static struct pcmcia_driver orinoco_driver = { .owner = THIS_MODULE, - .drv = { - .name = DRIVER_NAME, - }, + .name = DRIVER_NAME, .probe = orinoco_cs_probe, .remove = orinoco_cs_detach, .id_table = orinoco_cs_ids, @@ -454,8 +339,6 @@ static struct pcmcia_driver orinoco_driver = { static int __init init_orinoco_cs(void) { - printk(KERN_DEBUG "%s\n", version); - return pcmcia_register_driver(&orinoco_driver); } diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c index 873877e17e1..fb859a5ad2e 100644 --- a/drivers/net/wireless/orinoco/spectrum_cs.c +++ b/drivers/net/wireless/orinoco/spectrum_cs.c @@ -25,7 +25,6 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -154,14 +153,6 @@ spectrum_cs_stop_firmware(struct orinoco_private *priv, int idle) /* PCMCIA stuff */ /********************************************************************/ -/* - * This creates an "instance" of the driver, allocating local data - * structures for one device. The device is registered with Card - * Services. - * - * The dev_link structure is initialized, but we don't actually - * configure the card at this point -- we wait until we receive a card - * insertion event. */ static int spectrum_cs_probe(struct pcmcia_device *link) { @@ -179,23 +170,9 @@ spectrum_cs_probe(struct pcmcia_device *link) card->p_dev = link; link->priv = priv; - /* General socket configuration defaults can go here. In this - * client, we assume very little, and rely on the CIS for - * almost everything. In most clients, many details (i.e., - * number, sizes, and attributes of IO windows) are fixed by - * the nature of the device, and can be hard-wired here. */ - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - return spectrum_cs_config(link); } /* spectrum_cs_attach */ -/* - * This deletes a driver "instance". The device is de-registered with - * Card Services. If it has been released, all local data structures - * are freed. Otherwise, the structures will be freed when the device - * is released. - */ static void spectrum_cs_detach(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; @@ -207,76 +184,13 @@ static void spectrum_cs_detach(struct pcmcia_device *link) free_orinocodev(priv); } /* spectrum_cs_detach */ -/* - * spectrum_cs_config() is scheduled to run after a CARD_INSERTION - * event is received, to configure the PCMCIA socket, and to make the - * device available to the system. - */ - static int spectrum_cs_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data) { - if (cfg->index == 0) - goto next_entry; - - /* Use power settings for Vcc and Vpp if present */ - /* Note that the CIS values need to be rescaled */ - if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) { - DEBUG(2, "%s: Vcc mismatch (vcc = %d, CIS = %d)\n", - __func__, vcc, - cfg->vcc.param[CISTPL_POWER_VNOM] / 10000); - if (!ignore_cis_vcc) - goto next_entry; - } - } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) { - if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000) { - DEBUG(2, "%s: Vcc mismatch (vcc = %d, CIS = %d)\n", - __func__, vcc, - dflt->vcc.param[CISTPL_POWER_VNOM] / 10000); - if (!ignore_cis_vcc) - goto next_entry; - } - } + if (p_dev->config_index == 0) + return -EINVAL; - if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = - cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; - else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = - dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; - - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - - /* This reserves IO space but doesn't actually enable it */ - if (pcmcia_request_io(p_dev) != 0) - goto next_entry; - } - return 0; - -next_entry: - pcmcia_disable_device(p_dev); - return -ENODEV; + return pcmcia_request_io(p_dev); }; static int @@ -287,20 +201,10 @@ spectrum_cs_config(struct pcmcia_device *link) int ret; void __iomem *mem; - /* - * In this loop, we scan the CIS for configuration table - * entries, each of which describes a valid card - * configuration, including voltage, IO window, memory window, - * and interrupt settings. - * - * We make no assumptions about the card to be configured: we - * use just the information available in the CIS. In an ideal - * world, this would work for any PCMCIA card, but it requires - * a complete and accurate CIS. In practice, a driver usually - * "knows" most of these things without consulting the CIS, - * and most client drivers will only use the CIS to fill in - * implementation-defined details. - */ + link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC | + CONF_AUTO_SET_IO | CONF_ENABLE_IRQ; + if (ignore_cis_vcc) + link->config_flags &= ~CONF_AUTO_CHECK_VCC; ret = pcmcia_loop_config(link, spectrum_cs_config_check, NULL); if (ret) { if (!ignore_cis_vcc) @@ -325,12 +229,7 @@ spectrum_cs_config(struct pcmcia_device *link) hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); hw->eeprom_pda = true; - /* - * This actually configures the PCMCIA socket -- setting up - * the I/O windows and the interrupt mapping, and putting the - * card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -358,11 +257,6 @@ spectrum_cs_config(struct pcmcia_device *link) return -ENODEV; } /* spectrum_cs_config */ -/* - * After a card is removed, spectrum_cs_release() will unregister the - * device, and release the PCMCIA configuration. If the device is - * still open, this will be postponed until it is closed. - */ static void spectrum_cs_release(struct pcmcia_device *link) { @@ -407,12 +301,6 @@ spectrum_cs_resume(struct pcmcia_device *link) /* Module initialization */ /********************************************************************/ -/* Can't be declared "const" or the whole __initdata section will - * become const */ -static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION - " (Pavel Roskin <proski@gnu.org>," - " David Gibson <hermes@gibson.dropbear.id.au>, et al)"; - static struct pcmcia_device_id spectrum_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4137 */ PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */ @@ -423,9 +311,7 @@ MODULE_DEVICE_TABLE(pcmcia, spectrum_cs_ids); static struct pcmcia_driver orinoco_driver = { .owner = THIS_MODULE, - .drv = { - .name = DRIVER_NAME, - }, + .name = DRIVER_NAME, .probe = spectrum_cs_probe, .remove = spectrum_cs_detach, .suspend = spectrum_cs_suspend, @@ -436,8 +322,6 @@ static struct pcmcia_driver orinoco_driver = { static int __init init_spectrum_cs(void) { - printk(KERN_DEBUG "%s\n", version); - return pcmcia_register_driver(&orinoco_driver); } diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 88560d0ae50..46da03753fd 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -46,7 +46,6 @@ #include <linux/ethtool.h> #include <linux/ieee80211.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -169,13 +168,6 @@ static int bc; */ static char *phy_addr = NULL; - -/* A struct pcmcia_device structure has fields for most things that are needed - to keep track of a socket, but there will usually be some device - specific information that also needs to be kept track of. The - 'priv' pointer in a struct pcmcia_device structure can be used to point to - a device-specific private data structure, like this. -*/ static unsigned int ray_mem_speed = 500; /* WARNING: THIS DRIVER IS NOT CAPABLE OF HANDLING MULTIPLE DEVICES! */ @@ -290,14 +282,6 @@ static const struct net_device_ops ray_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -/*============================================================================= - ray_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. -=============================================================================*/ static int ray_probe(struct pcmcia_device *p_dev) { ray_dev_t *local; @@ -318,9 +302,8 @@ static int ray_probe(struct pcmcia_device *p_dev) p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; /* General socket configuration */ - p_dev->conf.Attributes = CONF_ENABLE_IRQ; - p_dev->conf.IntType = INT_MEMORY_AND_IO; - p_dev->conf.ConfigIndex = 1; + p_dev->config_flags |= CONF_ENABLE_IRQ; + p_dev->config_index = 1; p_dev->priv = dev; @@ -353,12 +336,6 @@ fail_alloc_dev: return -ENOMEM; } /* ray_attach */ -/*============================================================================= - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. -=============================================================================*/ static void ray_detach(struct pcmcia_device *link) { struct net_device *dev; @@ -381,17 +358,11 @@ static void ray_detach(struct pcmcia_device *link) dev_dbg(&link->dev, "ray_cs ray_detach ending\n"); } /* ray_detach */ -/*============================================================================= - ray_config() is run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ethernet device available to the system. -=============================================================================*/ #define MAX_TUPLE_SIZE 128 static int ray_config(struct pcmcia_device *link) { int ret = 0; int i; - win_req_t req; struct net_device *dev = (struct net_device *)link->priv; ray_dev_t *local = netdev_priv(dev); @@ -412,54 +383,50 @@ static int ray_config(struct pcmcia_device *link) goto failed; dev->irq = link->irq; - /* This actually configures the PCMCIA socket -- setting up - the I/O windows and the interrupt mapping. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; /*** Set up 32k window for shared memory (transmit and control) ************/ - req.Attributes = - WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT; - req.Base = 0; - req.Size = 0x8000; - req.AccessSpeed = ray_mem_speed; - ret = pcmcia_request_window(link, &req, &link->win); + link->resource[2]->flags |= WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT; + link->resource[2]->start = 0; + link->resource[2]->end = 0x8000; + ret = pcmcia_request_window(link, link->resource[2], ray_mem_speed); if (ret) goto failed; - ret = pcmcia_map_mem_page(link, link->win, 0); + ret = pcmcia_map_mem_page(link, link->resource[2], 0); if (ret) goto failed; - local->sram = ioremap(req.Base, req.Size); + local->sram = ioremap(link->resource[2]->start, + resource_size(link->resource[2])); /*** Set up 16k window for shared memory (receive buffer) ***************/ - req.Attributes = + link->resource[3]->flags |= WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE | WIN_USE_WAIT; - req.Base = 0; - req.Size = 0x4000; - req.AccessSpeed = ray_mem_speed; - ret = pcmcia_request_window(link, &req, &local->rmem_handle); + link->resource[3]->start = 0; + link->resource[3]->end = 0x4000; + ret = pcmcia_request_window(link, link->resource[3], ray_mem_speed); if (ret) goto failed; - ret = pcmcia_map_mem_page(link, local->rmem_handle, 0x8000); + ret = pcmcia_map_mem_page(link, link->resource[3], 0x8000); if (ret) goto failed; - local->rmem = ioremap(req.Base, req.Size); + local->rmem = ioremap(link->resource[3]->start, + resource_size(link->resource[3])); /*** Set up window for attribute memory ***********************************/ - req.Attributes = + link->resource[4]->flags |= WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_AM | WIN_ENABLE | WIN_USE_WAIT; - req.Base = 0; - req.Size = 0x1000; - req.AccessSpeed = ray_mem_speed; - ret = pcmcia_request_window(link, &req, &local->amem_handle); + link->resource[4]->start = 0; + link->resource[4]->end = 0x1000; + ret = pcmcia_request_window(link, link->resource[4], ray_mem_speed); if (ret) goto failed; - ret = pcmcia_map_mem_page(link, local->amem_handle, 0); + ret = pcmcia_map_mem_page(link, link->resource[4], 0); if (ret) goto failed; - local->amem = ioremap(req.Base, req.Size); + local->amem = ioremap(link->resource[4]->start, + resource_size(link->resource[4])); dev_dbg(&link->dev, "ray_config sram=%p\n", local->sram); dev_dbg(&link->dev, "ray_config rmem=%p\n", local->rmem); @@ -775,11 +742,7 @@ static void join_net(u_long data) local->card_status = CARD_DOING_ACQ; } -/*============================================================================ - After a card is removed, ray_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. -=============================================================================*/ + static void ray_release(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -2802,6 +2765,7 @@ static ssize_t ray_cs_essid_proc_write(struct file *file, static const struct file_operations ray_cs_essid_proc_fops = { .owner = THIS_MODULE, .write = ray_cs_essid_proc_write, + .llseek = noop_llseek, }; static ssize_t int_proc_write(struct file *file, const char __user *buffer, @@ -2835,6 +2799,7 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer, static const struct file_operations int_proc_fops = { .owner = THIS_MODULE, .write = int_proc_write, + .llseek = noop_llseek, }; #endif @@ -2847,9 +2812,7 @@ MODULE_DEVICE_TABLE(pcmcia, ray_ids); static struct pcmcia_driver ray_driver = { .owner = THIS_MODULE, - .drv = { - .name = "ray_cs", - }, + .name = "ray_cs", .probe = ray_probe, .remove = ray_detach, .id_table = ray_ids, diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h index 9f01ddb1974..e79848fbcca 100644 --- a/drivers/net/wireless/ray_cs.h +++ b/drivers/net/wireless/ray_cs.h @@ -25,8 +25,6 @@ struct beacon_rx { typedef struct ray_dev_t { int card_status; int authentication_state; - window_handle_t amem_handle; /* handle to window for attribute memory */ - window_handle_t rmem_handle; /* handle to window for rx buffer on card */ void __iomem *sram; /* pointer to beginning of shared RAM */ void __iomem *amem; /* pointer to attribute mem window */ void __iomem *rmem; /* pointer to receive buffer window */ diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c index b0498e7e7aa..cea81e4c5c8 100644 --- a/drivers/net/wireless/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/rt2x00/rt2x00debug.c @@ -315,6 +315,7 @@ static const struct file_operations rt2x00debug_fop_queue_dump = { .poll = rt2x00debug_poll_queue_dump, .open = rt2x00debug_open_queue_dump, .release = rt2x00debug_release_queue_dump, + .llseek = default_llseek, }; static ssize_t rt2x00debug_read_queue_stats(struct file *file, @@ -371,6 +372,7 @@ static const struct file_operations rt2x00debug_fop_queue_stats = { .read = rt2x00debug_read_queue_stats, .open = rt2x00debug_file_open, .release = rt2x00debug_file_release, + .llseek = default_llseek, }; #ifdef CONFIG_RT2X00_LIB_CRYPTO @@ -423,6 +425,7 @@ static const struct file_operations rt2x00debug_fop_crypto_stats = { .read = rt2x00debug_read_crypto_stats, .open = rt2x00debug_file_open, .release = rt2x00debug_file_release, + .llseek = default_llseek, }; #endif @@ -509,6 +512,7 @@ static const struct file_operations rt2x00debug_fop_##__name = {\ .write = rt2x00debug_write_##__name, \ .open = rt2x00debug_file_open, \ .release = rt2x00debug_file_release, \ + .llseek = generic_file_llseek, \ }; RT2X00DEBUGFS_OPS(csr, "0x%.8x\n", u32); @@ -542,6 +546,7 @@ static const struct file_operations rt2x00debug_fop_dev_flags = { .read = rt2x00debug_read_dev_flags, .open = rt2x00debug_file_open, .release = rt2x00debug_file_release, + .llseek = default_llseek, }; static struct dentry *rt2x00debug_create_file_driver(const char *name, diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c index 5e4465ac08f..fa620a5e530 100644 --- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c +++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c @@ -50,6 +50,7 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = wl1251_open_file_generic, \ + .llseek = generic_file_llseek, \ }; #define DEBUGFS_ADD(name, parent) \ @@ -86,6 +87,7 @@ static ssize_t sub## _ ##name## _read(struct file *file, \ static const struct file_operations sub## _ ##name## _ops = { \ .read = sub## _ ##name## _read, \ .open = wl1251_open_file_generic, \ + .llseek = generic_file_llseek, \ }; #define DEBUGFS_FWSTATS_ADD(sub, name) \ @@ -236,6 +238,7 @@ static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, static const struct file_operations tx_queue_len_ops = { .read = tx_queue_len_read, .open = wl1251_open_file_generic, + .llseek = generic_file_llseek, }; static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf, @@ -257,6 +260,7 @@ static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf, static const struct file_operations tx_queue_status_ops = { .read = tx_queue_status_read, .open = wl1251_open_file_generic, + .llseek = generic_file_llseek, }; static void wl1251_debugfs_delete_files(struct wl1251 *wl) diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c index c239ef4d0b8..66c2b90ddfd 100644 --- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c +++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c @@ -51,6 +51,7 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \ static const struct file_operations name## _ops = { \ .read = name## _read, \ .open = wl1271_open_file_generic, \ + .llseek = generic_file_llseek, \ }; #define DEBUGFS_ADD(name, parent) \ @@ -87,6 +88,7 @@ static ssize_t sub## _ ##name## _read(struct file *file, \ static const struct file_operations sub## _ ##name## _ops = { \ .read = sub## _ ##name## _read, \ .open = wl1271_open_file_generic, \ + .llseek = generic_file_llseek, \ }; #define DEBUGFS_FWSTATS_ADD(sub, name) \ @@ -237,6 +239,7 @@ static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf, static const struct file_operations tx_queue_len_ops = { .read = tx_queue_len_read, .open = wl1271_open_file_generic, + .llseek = default_llseek, }; static ssize_t gpio_power_read(struct file *file, char __user *user_buf, @@ -291,7 +294,8 @@ out: static const struct file_operations gpio_power_ops = { .read = gpio_power_read, .write = gpio_power_write, - .open = wl1271_open_file_generic + .open = wl1271_open_file_generic, + .llseek = default_llseek, }; static void wl1271_debugfs_delete_files(struct wl1271 *wl) diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index a1cc2d498a1..ca3f8961fa2 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -48,7 +48,6 @@ #include <net/iw_handler.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -78,13 +77,6 @@ #define WL3501_RESUME 0 #define WL3501_SUSPEND 1 -/* - * The event() function is this driver's Card Services event handler. It will - * be called by Card Services when an appropriate card status event is - * received. The config() and release() entry points are used to configure or - * release a socket, in response to card insertion and ejection events. They - * are invoked from the wl24 event handler. - */ static int wl3501_config(struct pcmcia_device *link); static void wl3501_release(struct pcmcia_device *link); @@ -1869,15 +1861,6 @@ static const struct net_device_ops wl3501_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -/** - * wl3501_attach - creates an "instance" of the driver - * - * Creates an "instance" of the driver, allocating local data structures for - * one device. The device is registered with Card Services. - * - * The dev_link structure is initialized, but we don't actually configure the - * card at this point -- we wait until we receive a card insertion event. - */ static int wl3501_probe(struct pcmcia_device *p_dev) { struct net_device *dev; @@ -1888,9 +1871,8 @@ static int wl3501_probe(struct pcmcia_device *p_dev) p_dev->resource[0]->flags = IO_DATA_PATH_WIDTH_8; /* General socket configuration */ - p_dev->conf.Attributes = CONF_ENABLE_IRQ; - p_dev->conf.IntType = INT_MEMORY_AND_IO; - p_dev->conf.ConfigIndex = 1; + p_dev->config_flags = CONF_ENABLE_IRQ; + p_dev->config_index = 1; dev = alloc_etherdev(sizeof(struct wl3501_card)); if (!dev) @@ -1914,14 +1896,6 @@ out_link: return -ENOMEM; } -/** - * wl3501_config - configure the PCMCIA socket and make eth device available - * @link - FILL_IN - * - * wl3501_config() is scheduled to run after a CARD_INSERTION event is - * received, to configure the PCMCIA socket, and to make the ethernet device - * available to the system. - */ static int wl3501_config(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -1952,10 +1926,7 @@ static int wl3501_config(struct pcmcia_device *link) if (ret) goto failed; - /* This actually configures the PCMCIA socket -- setting up the I/O - * windows and the interrupt mapping. */ - - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -2010,14 +1981,6 @@ failed: return -ENODEV; } -/** - * wl3501_release - unregister the net, release PCMCIA configuration - * @arg - link - * - * After a card is removed, wl3501_release() will unregister the net device, - * and release the PCMCIA configuration. If the device is still open, this - * will be postponed until it is closed. - */ static void wl3501_release(struct pcmcia_device *link) { pcmcia_disable_device(link); @@ -2056,9 +2019,7 @@ MODULE_DEVICE_TABLE(pcmcia, wl3501_ids); static struct pcmcia_driver wl3501_driver = { .owner = THIS_MODULE, - .drv = { - .name = "wl3501_cs", - }, + .name = "wl3501_cs", .probe = wl3501_probe, .remove = wl3501_detach, .id_table = wl3501_ids, diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c index b336cd9ee7a..f9bda64fcd1 100644 --- a/drivers/oprofile/oprof.c +++ b/drivers/oprofile/oprof.c @@ -225,26 +225,17 @@ post_sync: mutex_unlock(&start_mutex); } -int oprofile_set_backtrace(unsigned long val) +int oprofile_set_ulong(unsigned long *addr, unsigned long val) { - int err = 0; + int err = -EBUSY; mutex_lock(&start_mutex); - - if (oprofile_started) { - err = -EBUSY; - goto out; - } - - if (!oprofile_ops.backtrace) { - err = -EINVAL; - goto out; + if (!oprofile_started) { + *addr = val; + err = 0; } - - oprofile_backtrace_depth = val; - -out: mutex_unlock(&start_mutex); + return err; } @@ -257,16 +248,9 @@ static int __init oprofile_init(void) printk(KERN_INFO "oprofile: using timer interrupt.\n"); err = oprofile_timer_init(&oprofile_ops); if (err) - goto out_arch; + return err; } - err = oprofilefs_register(); - if (err) - goto out_arch; - return 0; - -out_arch: - oprofile_arch_exit(); - return err; + return oprofilefs_register(); } diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h index 47e12cb4ee8..177b73de5e5 100644 --- a/drivers/oprofile/oprof.h +++ b/drivers/oprofile/oprof.h @@ -37,7 +37,7 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root); int oprofile_timer_init(struct oprofile_operations *ops); void oprofile_timer_exit(void); -int oprofile_set_backtrace(unsigned long depth); +int oprofile_set_ulong(unsigned long *addr, unsigned long val); int oprofile_set_timeout(unsigned long time); #endif /* OPROF_H */ diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c index bbd7516e086..89f63456646 100644 --- a/drivers/oprofile/oprofile_files.c +++ b/drivers/oprofile/oprofile_files.c @@ -59,6 +59,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf, static const struct file_operations timeout_fops = { .read = timeout_read, .write = timeout_write, + .llseek = default_llseek, }; #endif @@ -79,21 +80,25 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou if (*offset) return -EINVAL; + if (!oprofile_ops.backtrace) + return -EINVAL; + retval = oprofilefs_ulong_from_user(&val, buf, count); if (retval) return retval; - retval = oprofile_set_backtrace(val); - + retval = oprofile_set_ulong(&oprofile_backtrace_depth, val); if (retval) return retval; + return count; } static const struct file_operations depth_fops = { .read = depth_read, - .write = depth_write + .write = depth_write, + .llseek = default_llseek, }; @@ -105,6 +110,7 @@ static ssize_t pointer_size_read(struct file *file, char __user *buf, size_t cou static const struct file_operations pointer_size_fops = { .read = pointer_size_read, + .llseek = default_llseek, }; @@ -116,6 +122,7 @@ static ssize_t cpu_type_read(struct file *file, char __user *buf, size_t count, static const struct file_operations cpu_type_fops = { .read = cpu_type_read, + .llseek = default_llseek, }; @@ -151,6 +158,7 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co static const struct file_operations enable_fops = { .read = enable_read, .write = enable_write, + .llseek = default_llseek, }; @@ -163,6 +171,7 @@ static ssize_t dump_write(struct file *file, char const __user *buf, size_t coun static const struct file_operations dump_fops = { .write = dump_write, + .llseek = noop_llseek, }; void oprofile_create_files(struct super_block *sb, struct dentry *root) diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c new file mode 100644 index 00000000000..9046f7b2ed7 --- /dev/null +++ b/drivers/oprofile/oprofile_perf.c @@ -0,0 +1,328 @@ +/* + * Copyright 2010 ARM Ltd. + * + * Perf-events backend for OProfile. + */ +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/oprofile.h> +#include <linux/slab.h> + +/* + * Per performance monitor configuration as set via oprofilefs. + */ +struct op_counter_config { + unsigned long count; + unsigned long enabled; + unsigned long event; + unsigned long unit_mask; + unsigned long kernel; + unsigned long user; + struct perf_event_attr attr; +}; + +static int oprofile_perf_enabled; +static DEFINE_MUTEX(oprofile_perf_mutex); + +static struct op_counter_config *counter_config; +static struct perf_event **perf_events[nr_cpumask_bits]; +static int num_counters; + +/* + * Overflow callback for oprofile. + */ +static void op_overflow_handler(struct perf_event *event, int unused, + struct perf_sample_data *data, struct pt_regs *regs) +{ + int id; + u32 cpu = smp_processor_id(); + + for (id = 0; id < num_counters; ++id) + if (perf_events[cpu][id] == event) + break; + + if (id != num_counters) + oprofile_add_sample(regs, id); + else + pr_warning("oprofile: ignoring spurious overflow " + "on cpu %u\n", cpu); +} + +/* + * Called by oprofile_perf_setup to create perf attributes to mirror the oprofile + * settings in counter_config. Attributes are created as `pinned' events and + * so are permanently scheduled on the PMU. + */ +static void op_perf_setup(void) +{ + int i; + u32 size = sizeof(struct perf_event_attr); + struct perf_event_attr *attr; + + for (i = 0; i < num_counters; ++i) { + attr = &counter_config[i].attr; + memset(attr, 0, size); + attr->type = PERF_TYPE_RAW; + attr->size = size; + attr->config = counter_config[i].event; + attr->sample_period = counter_config[i].count; + attr->pinned = 1; + } +} + +static int op_create_counter(int cpu, int event) +{ + struct perf_event *pevent; + + if (!counter_config[event].enabled || perf_events[cpu][event]) + return 0; + + pevent = perf_event_create_kernel_counter(&counter_config[event].attr, + cpu, NULL, + op_overflow_handler); + + if (IS_ERR(pevent)) + return PTR_ERR(pevent); + + if (pevent->state != PERF_EVENT_STATE_ACTIVE) { + perf_event_release_kernel(pevent); + pr_warning("oprofile: failed to enable event %d " + "on CPU %d\n", event, cpu); + return -EBUSY; + } + + perf_events[cpu][event] = pevent; + + return 0; +} + +static void op_destroy_counter(int cpu, int event) +{ + struct perf_event *pevent = perf_events[cpu][event]; + + if (pevent) { + perf_event_release_kernel(pevent); + perf_events[cpu][event] = NULL; + } +} + +/* + * Called by oprofile_perf_start to create active perf events based on the + * perviously configured attributes. + */ +static int op_perf_start(void) +{ + int cpu, event, ret = 0; + + for_each_online_cpu(cpu) { + for (event = 0; event < num_counters; ++event) { + ret = op_create_counter(cpu, event); + if (ret) + return ret; + } + } + + return ret; +} + +/* + * Called by oprofile_perf_stop at the end of a profiling run. + */ +static void op_perf_stop(void) +{ + int cpu, event; + + for_each_online_cpu(cpu) + for (event = 0; event < num_counters; ++event) + op_destroy_counter(cpu, event); +} + +static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root) +{ + unsigned int i; + + for (i = 0; i < num_counters; i++) { + struct dentry *dir; + char buf[4]; + + snprintf(buf, sizeof buf, "%d", i); + dir = oprofilefs_mkdir(sb, root, buf); + oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); + oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); + oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count); + oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); + oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); + oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); + } + + return 0; +} + +static int oprofile_perf_setup(void) +{ + spin_lock(&oprofilefs_lock); + op_perf_setup(); + spin_unlock(&oprofilefs_lock); + return 0; +} + +static int oprofile_perf_start(void) +{ + int ret = -EBUSY; + + mutex_lock(&oprofile_perf_mutex); + if (!oprofile_perf_enabled) { + ret = 0; + op_perf_start(); + oprofile_perf_enabled = 1; + } + mutex_unlock(&oprofile_perf_mutex); + return ret; +} + +static void oprofile_perf_stop(void) +{ + mutex_lock(&oprofile_perf_mutex); + if (oprofile_perf_enabled) + op_perf_stop(); + oprofile_perf_enabled = 0; + mutex_unlock(&oprofile_perf_mutex); +} + +#ifdef CONFIG_PM + +static int oprofile_perf_suspend(struct platform_device *dev, pm_message_t state) +{ + mutex_lock(&oprofile_perf_mutex); + if (oprofile_perf_enabled) + op_perf_stop(); + mutex_unlock(&oprofile_perf_mutex); + return 0; +} + +static int oprofile_perf_resume(struct platform_device *dev) +{ + mutex_lock(&oprofile_perf_mutex); + if (oprofile_perf_enabled && op_perf_start()) + oprofile_perf_enabled = 0; + mutex_unlock(&oprofile_perf_mutex); + return 0; +} + +static struct platform_driver oprofile_driver = { + .driver = { + .name = "oprofile-perf", + }, + .resume = oprofile_perf_resume, + .suspend = oprofile_perf_suspend, +}; + +static struct platform_device *oprofile_pdev; + +static int __init init_driverfs(void) +{ + int ret; + + ret = platform_driver_register(&oprofile_driver); + if (ret) + return ret; + + oprofile_pdev = platform_device_register_simple( + oprofile_driver.driver.name, 0, NULL, 0); + if (IS_ERR(oprofile_pdev)) { + ret = PTR_ERR(oprofile_pdev); + platform_driver_unregister(&oprofile_driver); + } + + return ret; +} + +static void exit_driverfs(void) +{ + platform_device_unregister(oprofile_pdev); + platform_driver_unregister(&oprofile_driver); +} + +#else + +static inline int init_driverfs(void) { return 0; } +static inline void exit_driverfs(void) { } + +#endif /* CONFIG_PM */ + +void oprofile_perf_exit(void) +{ + int cpu, id; + struct perf_event *event; + + for_each_possible_cpu(cpu) { + for (id = 0; id < num_counters; ++id) { + event = perf_events[cpu][id]; + if (event) + perf_event_release_kernel(event); + } + + kfree(perf_events[cpu]); + } + + kfree(counter_config); + exit_driverfs(); +} + +int __init oprofile_perf_init(struct oprofile_operations *ops) +{ + int cpu, ret = 0; + + ret = init_driverfs(); + if (ret) + return ret; + + memset(&perf_events, 0, sizeof(perf_events)); + + num_counters = perf_num_counters(); + if (num_counters <= 0) { + pr_info("oprofile: no performance counters\n"); + ret = -ENODEV; + goto out; + } + + counter_config = kcalloc(num_counters, + sizeof(struct op_counter_config), GFP_KERNEL); + + if (!counter_config) { + pr_info("oprofile: failed to allocate %d " + "counters\n", num_counters); + ret = -ENOMEM; + num_counters = 0; + goto out; + } + + for_each_possible_cpu(cpu) { + perf_events[cpu] = kcalloc(num_counters, + sizeof(struct perf_event *), GFP_KERNEL); + if (!perf_events[cpu]) { + pr_info("oprofile: failed to allocate %d perf events " + "for cpu %d\n", num_counters, cpu); + ret = -ENOMEM; + goto out; + } + } + + ops->create_files = oprofile_perf_create_files; + ops->setup = oprofile_perf_setup; + ops->start = oprofile_perf_start; + ops->stop = oprofile_perf_stop; + ops->shutdown = oprofile_perf_stop; + ops->cpu_type = op_name_from_perf_id(); + + if (!ops->cpu_type) + ret = -ENODEV; + else + pr_info("oprofile: using %s\n", ops->cpu_type); + +out: + if (ret) + oprofile_perf_exit(); + + return ret; +} diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index 2766a6d3c2e..95f711b251a 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -91,16 +91,20 @@ static ssize_t ulong_read_file(struct file *file, char __user *buf, size_t count static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_t count, loff_t *offset) { - unsigned long *value = file->private_data; + unsigned long value; int retval; if (*offset) return -EINVAL; - retval = oprofilefs_ulong_from_user(value, buf, count); + retval = oprofilefs_ulong_from_user(&value, buf, count); + if (retval) + return retval; + retval = oprofile_set_ulong(file->private_data, value); if (retval) return retval; + return count; } @@ -117,59 +121,52 @@ static const struct file_operations ulong_fops = { .read = ulong_read_file, .write = ulong_write_file, .open = default_open, + .llseek = default_llseek, }; static const struct file_operations ulong_ro_fops = { .read = ulong_read_file, .open = default_open, + .llseek = default_llseek, }; -static struct dentry *__oprofilefs_create_file(struct super_block *sb, +static int __oprofilefs_create_file(struct super_block *sb, struct dentry *root, char const *name, const struct file_operations *fops, - int perm) + int perm, void *priv) { struct dentry *dentry; struct inode *inode; dentry = d_alloc_name(root, name); if (!dentry) - return NULL; + return -ENOMEM; inode = oprofilefs_get_inode(sb, S_IFREG | perm); if (!inode) { dput(dentry); - return NULL; + return -ENOMEM; } inode->i_fop = fops; d_add(dentry, inode); - return dentry; + dentry->d_inode->i_private = priv; + return 0; } int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root, char const *name, unsigned long *val) { - struct dentry *d = __oprofilefs_create_file(sb, root, name, - &ulong_fops, 0644); - if (!d) - return -EFAULT; - - d->d_inode->i_private = val; - return 0; + return __oprofilefs_create_file(sb, root, name, + &ulong_fops, 0644, val); } int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root, char const *name, unsigned long *val) { - struct dentry *d = __oprofilefs_create_file(sb, root, name, - &ulong_ro_fops, 0444); - if (!d) - return -EFAULT; - - d->d_inode->i_private = val; - return 0; + return __oprofilefs_create_file(sb, root, name, + &ulong_ro_fops, 0444, val); } @@ -183,37 +180,29 @@ static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t coun static const struct file_operations atomic_ro_fops = { .read = atomic_read_file, .open = default_open, + .llseek = default_llseek, }; int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root, char const *name, atomic_t *val) { - struct dentry *d = __oprofilefs_create_file(sb, root, name, - &atomic_ro_fops, 0444); - if (!d) - return -EFAULT; - - d->d_inode->i_private = val; - return 0; + return __oprofilefs_create_file(sb, root, name, + &atomic_ro_fops, 0444, val); } int oprofilefs_create_file(struct super_block *sb, struct dentry *root, char const *name, const struct file_operations *fops) { - if (!__oprofilefs_create_file(sb, root, name, fops, 0644)) - return -EFAULT; - return 0; + return __oprofilefs_create_file(sb, root, name, fops, 0644, NULL); } int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root, char const *name, const struct file_operations *fops, int perm) { - if (!__oprofilefs_create_file(sb, root, name, fops, perm)) - return -EFAULT; - return 0; + return __oprofilefs_create_file(sb, root, name, fops, perm, NULL); } diff --git a/drivers/parport/parport_cs.c b/drivers/parport/parport_cs.c index 23e50f4a27c..787ebdeae31 100644 --- a/drivers/parport/parport_cs.c +++ b/drivers/parport/parport_cs.c @@ -48,7 +48,6 @@ #include <linux/parport.h> #include <linux/parport_pc.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> @@ -81,14 +80,6 @@ static void parport_detach(struct pcmcia_device *p_dev); static int parport_config(struct pcmcia_device *link); static void parport_cs_release(struct pcmcia_device *); -/*====================================================================== - - parport_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - -======================================================================*/ - static int parport_probe(struct pcmcia_device *link) { parport_info_t *info; @@ -101,23 +92,11 @@ static int parport_probe(struct pcmcia_device *link) link->priv = info; info->p_dev = link; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; return parport_config(link); } /* parport_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void parport_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "parport_detach\n"); @@ -127,36 +106,14 @@ static void parport_detach(struct pcmcia_device *link) kfree(link->priv); } /* parport_detach */ -/*====================================================================== - - parport_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - parport device available to the system. - -======================================================================*/ - -static int parport_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int parport_config_check(struct pcmcia_device *p_dev, void *priv_data) { - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - if (epp_mode) - p_dev->conf.ConfigIndex |= FORCE_EPP_MODE; - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin == 2) { - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - if (pcmcia_request_io(p_dev) != 0) - return -ENODEV; - return 0; - } - return -ENODEV; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + + return pcmcia_request_io(p_dev); } static int parport_config(struct pcmcia_device *link) @@ -167,13 +124,16 @@ static int parport_config(struct pcmcia_device *link) dev_dbg(&link->dev, "parport_config\n"); + if (epp_mode) + link->config_index |= FORCE_EPP_MODE; + ret = pcmcia_loop_config(link, parport_config_check, NULL); if (ret) goto failed; if (!link->irq) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -202,14 +162,6 @@ failed: return -ENODEV; } /* parport_config */ -/*====================================================================== - - After a card is removed, parport_cs_release() will unregister the - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void parport_cs_release(struct pcmcia_device *link) { parport_info_t *info = link->priv; @@ -236,9 +188,7 @@ MODULE_DEVICE_TABLE(pcmcia, parport_ids); static struct pcmcia_driver parport_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "parport_cs", - }, + .name = "parport_cs", .probe = parport_probe, .remove = parport_detach, .id_table = parport_ids, diff --git a/drivers/parport/share.c b/drivers/parport/share.c index dffa5d4fb29..a2d9d1e5926 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -306,7 +306,7 @@ struct parport *parport_register_port(unsigned long base, int irq, int dma, spin_lock_init(&tmp->pardevice_lock); tmp->ieee1284.mode = IEEE1284_MODE_COMPAT; tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE; - init_MUTEX_LOCKED (&tmp->ieee1284.irq); /* actually a semaphore at 0 */ + sema_init(&tmp->ieee1284.irq, 0); tmp->spintime = parport_default_spintime; atomic_set (&tmp->ref_count, 1); INIT_LIST_HEAD(&tmp->full_list); diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 0a19708074c..0157708d474 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -36,6 +36,7 @@ #include <linux/tboot.h> #include <linux/dmi.h> #include <linux/slab.h> +#include <asm/iommu_table.h> #define PREFIX "DMAR: " @@ -687,7 +688,7 @@ failed: return 0; } -void __init detect_intel_iommu(void) +int __init detect_intel_iommu(void) { int ret; @@ -723,6 +724,8 @@ void __init detect_intel_iommu(void) } early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); dmar_tbl = NULL; + + return ret ? 1 : -ENODEV; } @@ -1221,9 +1224,9 @@ const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) } } -void dmar_msi_unmask(unsigned int irq) +void dmar_msi_unmask(struct irq_data *data) { - struct intel_iommu *iommu = get_irq_data(irq); + struct intel_iommu *iommu = irq_data_get_irq_data(data); unsigned long flag; /* unmask it */ @@ -1234,10 +1237,10 @@ void dmar_msi_unmask(unsigned int irq) spin_unlock_irqrestore(&iommu->register_lock, flag); } -void dmar_msi_mask(unsigned int irq) +void dmar_msi_mask(struct irq_data *data) { unsigned long flag; - struct intel_iommu *iommu = get_irq_data(irq); + struct intel_iommu *iommu = irq_data_get_irq_data(data); /* mask it */ spin_lock_irqsave(&iommu->register_lock, flag); @@ -1455,3 +1458,4 @@ int __init dmar_ir_support(void) return 0; return dmar->flags & 0x1; } +IOMMU_INIT_POST(detect_intel_iommu); diff --git a/drivers/pci/hotplug/cpqphp_sysfs.c b/drivers/pci/hotplug/cpqphp_sysfs.c index 56215322930..4cb30447a48 100644 --- a/drivers/pci/hotplug/cpqphp_sysfs.c +++ b/drivers/pci/hotplug/cpqphp_sysfs.c @@ -34,10 +34,11 @@ #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/debugfs.h> #include "cpqphp.h" +static DEFINE_MUTEX(cpqphp_mutex); static int show_ctrl (struct controller *ctrl, char *buf) { char *out = buf; @@ -147,7 +148,7 @@ static int open(struct inode *inode, struct file *file) struct ctrl_dbg *dbg; int retval = -ENOMEM; - lock_kernel(); + mutex_lock(&cpqphp_mutex); dbg = kmalloc(sizeof(*dbg), GFP_KERNEL); if (!dbg) goto exit; @@ -160,7 +161,7 @@ static int open(struct inode *inode, struct file *file) file->private_data = dbg; retval = 0; exit: - unlock_kernel(); + mutex_unlock(&cpqphp_mutex); return retval; } @@ -169,7 +170,7 @@ static loff_t lseek(struct file *file, loff_t off, int whence) struct ctrl_dbg *dbg; loff_t new = -1; - lock_kernel(); + mutex_lock(&cpqphp_mutex); dbg = file->private_data; switch (whence) { @@ -181,10 +182,10 @@ static loff_t lseek(struct file *file, loff_t off, int whence) break; } if (new < 0 || new > dbg->size) { - unlock_kernel(); + mutex_unlock(&cpqphp_mutex); return -EINVAL; } - unlock_kernel(); + mutex_unlock(&cpqphp_mutex); return (file->f_pos = new); } diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 98abf8b9129..834842aa5bb 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c @@ -57,28 +57,22 @@ void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) *msg = cfg->msg; } -void mask_ht_irq(unsigned int irq) +void mask_ht_irq(struct irq_data *data) { - struct ht_irq_cfg *cfg; - struct ht_irq_msg msg; - - cfg = get_irq_data(irq); + struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); + struct ht_irq_msg msg = cfg->msg; - msg = cfg->msg; msg.address_lo |= 1; - write_ht_irq_msg(irq, &msg); + write_ht_irq_msg(data->irq, &msg); } -void unmask_ht_irq(unsigned int irq) +void unmask_ht_irq(struct irq_data *data) { - struct ht_irq_cfg *cfg; - struct ht_irq_msg msg; - - cfg = get_irq_data(irq); + struct ht_irq_cfg *cfg = irq_data_get_irq_data(data); + struct ht_irq_msg msg = cfg->msg; - msg = cfg->msg; msg.address_lo &= ~1; - write_ht_irq_msg(irq, &msg); + write_ht_irq_msg(data->irq, &msg); } /** diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index c3ceebb5be8..4789f8e8bf7 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -71,6 +71,49 @@ #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) +/* page table handling */ +#define LEVEL_STRIDE (9) +#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) + +static inline int agaw_to_level(int agaw) +{ + return agaw + 2; +} + +static inline int agaw_to_width(int agaw) +{ + return 30 + agaw * LEVEL_STRIDE; +} + +static inline int width_to_agaw(int width) +{ + return (width - 30) / LEVEL_STRIDE; +} + +static inline unsigned int level_to_offset_bits(int level) +{ + return (level - 1) * LEVEL_STRIDE; +} + +static inline int pfn_level_offset(unsigned long pfn, int level) +{ + return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; +} + +static inline unsigned long level_mask(int level) +{ + return -1UL << level_to_offset_bits(level); +} + +static inline unsigned long level_size(int level) +{ + return 1UL << level_to_offset_bits(level); +} + +static inline unsigned long align_to_level(unsigned long pfn, int level) +{ + return (pfn + level_size(level) - 1) & level_mask(level); +} /* VT-d pages must always be _smaller_ than MM pages. Otherwise things are never going to work. */ @@ -434,8 +477,6 @@ void free_iova_mem(struct iova *iova) } -static inline int width_to_agaw(int width); - static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw) { unsigned long sagaw; @@ -646,51 +687,6 @@ out: spin_unlock_irqrestore(&iommu->lock, flags); } -/* page table handling */ -#define LEVEL_STRIDE (9) -#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) - -static inline int agaw_to_level(int agaw) -{ - return agaw + 2; -} - -static inline int agaw_to_width(int agaw) -{ - return 30 + agaw * LEVEL_STRIDE; - -} - -static inline int width_to_agaw(int width) -{ - return (width - 30) / LEVEL_STRIDE; -} - -static inline unsigned int level_to_offset_bits(int level) -{ - return (level - 1) * LEVEL_STRIDE; -} - -static inline int pfn_level_offset(unsigned long pfn, int level) -{ - return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK; -} - -static inline unsigned long level_mask(int level) -{ - return -1UL << level_to_offset_bits(level); -} - -static inline unsigned long level_size(int level) -{ - return 1UL << level_to_offset_bits(level); -} - -static inline unsigned long align_to_level(unsigned long pfn, int level) -{ - return (pfn + level_size(level) - 1) & level_mask(level); -} - static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, unsigned long pfn) { @@ -3761,6 +3757,33 @@ static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); +#define GGC 0x52 +#define GGC_MEMORY_SIZE_MASK (0xf << 8) +#define GGC_MEMORY_SIZE_NONE (0x0 << 8) +#define GGC_MEMORY_SIZE_1M (0x1 << 8) +#define GGC_MEMORY_SIZE_2M (0x3 << 8) +#define GGC_MEMORY_VT_ENABLED (0x8 << 8) +#define GGC_MEMORY_SIZE_2M_VT (0x9 << 8) +#define GGC_MEMORY_SIZE_3M_VT (0xa << 8) +#define GGC_MEMORY_SIZE_4M_VT (0xb << 8) + +static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) +{ + unsigned short ggc; + + if (pci_read_config_word(dev, GGC, &ggc)) + return; + + if (!(ggc & GGC_MEMORY_VT_ENABLED)) { + printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); + dmar_map_gfx = 0; + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); + /* On Tylersburg chipsets, some BIOSes have been known to enable the ISOCH DMAR unit for the Azalia sound device, but not give it any TLB entries, which causes it to deadlock. Check for that. We do diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c index fd1d2867cdc..ec87cd66f3e 100644 --- a/drivers/pci/intr_remapping.c +++ b/drivers/pci/intr_remapping.c @@ -46,109 +46,24 @@ static __init int setup_intremap(char *str) } early_param("intremap", setup_intremap); -struct irq_2_iommu { - struct intel_iommu *iommu; - u16 irte_index; - u16 sub_handle; - u8 irte_mask; -}; - -#ifdef CONFIG_GENERIC_HARDIRQS -static struct irq_2_iommu *get_one_free_irq_2_iommu(int node) -{ - struct irq_2_iommu *iommu; - - iommu = kzalloc_node(sizeof(*iommu), GFP_ATOMIC, node); - printk(KERN_DEBUG "alloc irq_2_iommu on node %d\n", node); - - return iommu; -} - -static struct irq_2_iommu *irq_2_iommu(unsigned int irq) -{ - struct irq_desc *desc; - - desc = irq_to_desc(irq); - - if (WARN_ON_ONCE(!desc)) - return NULL; - - return desc->irq_2_iommu; -} - -static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) -{ - struct irq_desc *desc; - struct irq_2_iommu *irq_iommu; - - desc = irq_to_desc(irq); - if (!desc) { - printk(KERN_INFO "can not get irq_desc for %d\n", irq); - return NULL; - } - - irq_iommu = desc->irq_2_iommu; - - if (!irq_iommu) - desc->irq_2_iommu = get_one_free_irq_2_iommu(irq_node(irq)); - - return desc->irq_2_iommu; -} - -#else /* !CONFIG_SPARSE_IRQ */ - -static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; - -static struct irq_2_iommu *irq_2_iommu(unsigned int irq) -{ - if (irq < nr_irqs) - return &irq_2_iommuX[irq]; - - return NULL; -} -static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) -{ - return irq_2_iommu(irq); -} -#endif - static DEFINE_SPINLOCK(irq_2_ir_lock); -static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) -{ - struct irq_2_iommu *irq_iommu; - - irq_iommu = irq_2_iommu(irq); - - if (!irq_iommu) - return NULL; - - if (!irq_iommu->iommu) - return NULL; - - return irq_iommu; -} - -int irq_remapped(int irq) +static struct irq_2_iommu *irq_2_iommu(unsigned int irq) { - return valid_irq_2_iommu(irq) != NULL; + struct irq_cfg *cfg = get_irq_chip_data(irq); + return cfg ? &cfg->irq_2_iommu : NULL; } int get_irte(int irq, struct irte *entry) { - int index; - struct irq_2_iommu *irq_iommu; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); unsigned long flags; + int index; - if (!entry) + if (!entry || !irq_iommu) return -1; spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return -1; - } index = irq_iommu->irte_index + irq_iommu->sub_handle; *entry = *(irq_iommu->iommu->ir_table->base + index); @@ -160,20 +75,14 @@ int get_irte(int irq, struct irte *entry) int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) { struct ir_table *table = iommu->ir_table; - struct irq_2_iommu *irq_iommu; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); u16 index, start_index; unsigned int mask = 0; unsigned long flags; int i; - if (!count) - return -1; - -#ifndef CONFIG_SPARSE_IRQ - /* protect irq_2_iommu_alloc later */ - if (irq >= nr_irqs) + if (!count || !irq_iommu) return -1; -#endif /* * start the IRTE search from index 0. @@ -214,13 +123,6 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) for (i = index; i < index + count; i++) table->base[i].present = 1; - irq_iommu = irq_2_iommu_alloc(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - printk(KERN_ERR "can't allocate irq_2_iommu\n"); - return -1; - } - irq_iommu->iommu = iommu; irq_iommu->irte_index = index; irq_iommu->sub_handle = 0; @@ -244,17 +146,14 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) int map_irq_to_irte_handle(int irq, u16 *sub_handle) { - int index; - struct irq_2_iommu *irq_iommu; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); unsigned long flags; + int index; - spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); + if (!irq_iommu) return -1; - } + spin_lock_irqsave(&irq_2_ir_lock, flags); *sub_handle = irq_iommu->sub_handle; index = irq_iommu->irte_index; spin_unlock_irqrestore(&irq_2_ir_lock, flags); @@ -263,18 +162,13 @@ int map_irq_to_irte_handle(int irq, u16 *sub_handle) int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) { - struct irq_2_iommu *irq_iommu; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); unsigned long flags; - spin_lock_irqsave(&irq_2_ir_lock, flags); - - irq_iommu = irq_2_iommu_alloc(irq); - - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - printk(KERN_ERR "can't allocate irq_2_iommu\n"); + if (!irq_iommu) return -1; - } + + spin_lock_irqsave(&irq_2_ir_lock, flags); irq_iommu->iommu = iommu; irq_iommu->irte_index = index; @@ -286,43 +180,18 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) return 0; } -int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) -{ - struct irq_2_iommu *irq_iommu; - unsigned long flags; - - spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return -1; - } - - irq_iommu->iommu = NULL; - irq_iommu->irte_index = 0; - irq_iommu->sub_handle = 0; - irq_2_iommu(irq)->irte_mask = 0; - - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return 0; -} - int modify_irte(int irq, struct irte *irte_modified) { - int rc; - int index; - struct irte *irte; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); struct intel_iommu *iommu; - struct irq_2_iommu *irq_iommu; unsigned long flags; + struct irte *irte; + int rc, index; - spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); + if (!irq_iommu) return -1; - } + + spin_lock_irqsave(&irq_2_ir_lock, flags); iommu = irq_iommu->iommu; @@ -339,31 +208,6 @@ int modify_irte(int irq, struct irte *irte_modified) return rc; } -int flush_irte(int irq) -{ - int rc; - int index; - struct intel_iommu *iommu; - struct irq_2_iommu *irq_iommu; - unsigned long flags; - - spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return -1; - } - - iommu = irq_iommu->iommu; - - index = irq_iommu->irte_index + irq_iommu->sub_handle; - - rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask); - spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return rc; -} - struct intel_iommu *map_hpet_to_ir(u8 hpet_id) { int i; @@ -420,16 +264,14 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) int free_irte(int irq) { - int rc = 0; - struct irq_2_iommu *irq_iommu; + struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); unsigned long flags; + int rc; - spin_lock_irqsave(&irq_2_ir_lock, flags); - irq_iommu = valid_irq_2_iommu(irq); - if (!irq_iommu) { - spin_unlock_irqrestore(&irq_2_ir_lock, flags); + if (!irq_iommu) return -1; - } + + spin_lock_irqsave(&irq_2_ir_lock, flags); rc = clear_entries(irq_iommu); diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index ce6a3666b3d..553d8ee55c1 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -608,7 +608,7 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno, * the VF BAR size multiplied by the number of VFs. The alignment * is just the VF BAR size. */ -int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) +resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) { struct resource tmp; enum pci_bar_type type; diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 69b7be33b3a..5fcf5aec680 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -170,33 +170,31 @@ static void msix_mask_irq(struct msi_desc *desc, u32 flag) desc->masked = __msix_mask_irq(desc, flag); } -static void msi_set_mask_bit(unsigned irq, u32 flag) +static void msi_set_mask_bit(struct irq_data *data, u32 flag) { - struct msi_desc *desc = get_irq_msi(irq); + struct msi_desc *desc = irq_data_get_msi(data); if (desc->msi_attrib.is_msix) { msix_mask_irq(desc, flag); readl(desc->mask_base); /* Flush write to device */ } else { - unsigned offset = irq - desc->dev->irq; + unsigned offset = data->irq - desc->dev->irq; msi_mask_irq(desc, 1 << offset, flag << offset); } } -void mask_msi_irq(unsigned int irq) +void mask_msi_irq(struct irq_data *data) { - msi_set_mask_bit(irq, 1); + msi_set_mask_bit(data, 1); } -void unmask_msi_irq(unsigned int irq) +void unmask_msi_irq(struct irq_data *data) { - msi_set_mask_bit(irq, 0); + msi_set_mask_bit(data, 0); } -void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) +void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { - struct msi_desc *entry = get_irq_desc_msi(desc); - BUG_ON(entry->dev->current_state != PCI_D0); if (entry->msi_attrib.is_msix) { @@ -227,15 +225,13 @@ void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void read_msi_msg(unsigned int irq, struct msi_msg *msg) { - struct irq_desc *desc = irq_to_desc(irq); + struct msi_desc *entry = get_irq_msi(irq); - read_msi_msg_desc(desc, msg); + __read_msi_msg(entry, msg); } -void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) +void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { - struct msi_desc *entry = get_irq_desc_msi(desc); - /* Assert that the cache is valid, assuming that * valid messages are not all-zeroes. */ BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | @@ -246,15 +242,13 @@ void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) { - struct irq_desc *desc = irq_to_desc(irq); + struct msi_desc *entry = get_irq_msi(irq); - get_cached_msi_msg_desc(desc, msg); + __get_cached_msi_msg(entry, msg); } -void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) +void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { - struct msi_desc *entry = get_irq_desc_msi(desc); - if (entry->dev->current_state != PCI_D0) { /* Don't touch the hardware now */ } else if (entry->msi_attrib.is_msix) { @@ -292,9 +286,9 @@ void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) void write_msi_msg(unsigned int irq, struct msi_msg *msg) { - struct irq_desc *desc = irq_to_desc(irq); + struct msi_desc *entry = get_irq_msi(irq); - write_msi_msg_desc(desc, msg); + __write_msi_msg(entry, msg); } static void free_msi_irqs(struct pci_dev *dev) diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 7754a678ab1..6beb11b617a 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -264,7 +264,8 @@ extern int pci_iov_init(struct pci_dev *dev); extern void pci_iov_release(struct pci_dev *dev); extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); -extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); +extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, + int resno); extern void pci_restore_iov_state(struct pci_dev *dev); extern int pci_iov_bus_range(struct pci_bus *bus); @@ -320,7 +321,7 @@ static inline int pci_ats_enabled(struct pci_dev *dev) } #endif /* CONFIG_PCI_IOV */ -static inline int pci_resource_alignment(struct pci_dev *dev, +static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, struct resource *res) { #ifdef CONFIG_PCI_IOV diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c index 909924692b8..b3cf6223f63 100644 --- a/drivers/pci/pcie/aer/aer_inject.c +++ b/drivers/pci/pcie/aer/aer_inject.c @@ -472,6 +472,7 @@ static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf, static const struct file_operations aer_inject_fops = { .write = aer_inject_write, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice aer_inject_device = { diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 89ed181cd90..857ae01734a 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -163,6 +163,26 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_d DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); /* + * Intel NM10 "TigerPoint" LPC PM1a_STS.BM_STS must be clear + * for some HT machines to use C4 w/o hanging. + */ +static void __devinit quirk_tigerpoint_bm_sts(struct pci_dev *dev) +{ + u32 pmbase; + u16 pm1a; + + pci_read_config_dword(dev, 0x40, &pmbase); + pmbase = pmbase & 0xff80; + pm1a = inw(pmbase); + + if (pm1a & 0x10) { + dev_info(&dev->dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n"); + outw(0x10, pmbase); + } +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); + +/* * Chipsets where PCI->PCI transfers vanish or hang */ static void __devinit quirk_nopcipci(struct pci_dev *dev) diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c index 88c4c409878..95dd7c62741 100644 --- a/drivers/pcmcia/au1000_generic.c +++ b/drivers/pcmcia/au1000_generic.c @@ -441,14 +441,12 @@ int au1x00_pcmcia_socket_probe(struct device *dev, struct pcmcia_low_level *ops, out_err: - flush_scheduled_work(); ops->hw_shutdown(skt); while (i-- > 0) { skt = PCMCIA_SOCKET(i); del_timer_sync(&skt->poll_timer); pcmcia_unregister_socket(&skt->socket); - flush_scheduled_work(); if (i == 0) { iounmap(skt->virt_io + (u32)mips_io_port_base); skt->virt_io = NULL; @@ -480,7 +478,6 @@ int au1x00_drv_pcmcia_remove(struct platform_device *dev) del_timer_sync(&skt->poll_timer); pcmcia_unregister_socket(&skt->socket); - flush_scheduled_work(); skt->ops->hw_shutdown(skt); au1x00_pcmcia_config_skt(skt, &dead_socket); iounmap(skt->virt_io + (u32)mips_io_port_base); diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h index 67530cefcf3..5c36bda2963 100644 --- a/drivers/pcmcia/au1000_generic.h +++ b/drivers/pcmcia/au1000_generic.h @@ -23,7 +23,6 @@ /* include the world */ -#include <pcmcia/cs.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> #include "cs_internal.h" diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c index 807f2d75dad..b2396647a16 100644 --- a/drivers/pcmcia/au1000_pb1x00.c +++ b/drivers/pcmcia/au1000_pb1x00.c @@ -31,7 +31,6 @@ #include <linux/proc_fs.h> #include <linux/types.h> -#include <pcmcia/cs.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 91414a0ddc4..884a984216f 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c @@ -28,7 +28,6 @@ #include <asm/unaligned.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <pcmcia/cisreg.h> #include <pcmcia/cistpl.h> #include "cs_internal.h" diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index 2ec8ac97445..d9ea192c400 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -33,7 +33,6 @@ #include <asm/irq.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -845,7 +844,7 @@ static int pcmcia_socket_dev_resume_noirq(struct device *dev) return __pcmcia_pm_op(dev, socket_early_resume); } -static int pcmcia_socket_dev_resume(struct device *dev) +static int __used pcmcia_socket_dev_resume(struct device *dev) { return __pcmcia_pm_op(dev, socket_late_resume); } diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h index da055dc14d9..7f1953f78b1 100644 --- a/drivers/pcmcia/cs_internal.h +++ b/drivers/pcmcia/cs_internal.h @@ -33,18 +33,9 @@ typedef struct config_t { struct kref ref; unsigned int state; - unsigned int Attributes; - unsigned int IntType; - unsigned int ConfigBase; - unsigned char Status, Pin, Copy, Option, ExtStatus; - unsigned int CardValues; struct resource io[MAX_IO_WIN]; /* io ports */ struct resource mem[MAX_WIN]; /* mem areas */ - - struct { - u_int Attributes; - } irq; } config_t; diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 55570d9e1e4..100c4412457 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -26,7 +26,6 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/ss.h> @@ -52,7 +51,7 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv) if (!p_drv->probe || !p_drv->remove) printk(KERN_DEBUG "pcmcia: %s lacks a requisite callback " - "function\n", p_drv->drv.name); + "function\n", p_drv->name); while (did && did->match_flags) { for (i = 0; i < 4; i++) { @@ -65,7 +64,7 @@ static void pcmcia_check_driver(struct pcmcia_driver *p_drv) printk(KERN_DEBUG "pcmcia: %s: invalid hash for " "product string \"%s\": is 0x%x, should " - "be 0x%x\n", p_drv->drv.name, did->prod_id[i], + "be 0x%x\n", p_drv->name, did->prod_id[i], did->prod_id_hash[i], hash); printk(KERN_DEBUG "pcmcia: see " "Documentation/pcmcia/devicetable.txt for " @@ -180,10 +179,11 @@ int pcmcia_register_driver(struct pcmcia_driver *driver) /* initialize common fields */ driver->drv.bus = &pcmcia_bus_type; driver->drv.owner = driver->owner; + driver->drv.name = driver->name; mutex_init(&driver->dynids.lock); INIT_LIST_HEAD(&driver->dynids.list); - pr_debug("registering driver %s\n", driver->drv.name); + pr_debug("registering driver %s\n", driver->name); error = driver_register(&driver->drv); if (error < 0) @@ -203,7 +203,7 @@ EXPORT_SYMBOL(pcmcia_register_driver); */ void pcmcia_unregister_driver(struct pcmcia_driver *driver) { - pr_debug("unregistering driver %s\n", driver->drv.name); + pr_debug("unregistering driver %s\n", driver->name); driver_unregister(&driver->drv); pcmcia_free_dynids(driver); } @@ -264,7 +264,7 @@ static int pcmcia_device_probe(struct device *dev) p_drv = to_pcmcia_drv(dev->driver); s = p_dev->socket; - dev_dbg(dev, "trying to bind to %s\n", p_drv->drv.name); + dev_dbg(dev, "trying to bind to %s\n", p_drv->name); if ((!p_drv->probe) || (!p_dev->function_config) || (!try_module_get(p_drv->owner))) { @@ -276,21 +276,28 @@ static int pcmcia_device_probe(struct device *dev) ret = pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_CONFIG, &cis_config); if (!ret) { - p_dev->conf.ConfigBase = cis_config.base; - p_dev->conf.Present = cis_config.rmask[0]; + p_dev->config_base = cis_config.base; + p_dev->config_regs = cis_config.rmask[0]; + dev_dbg(dev, "base %x, regs %x", p_dev->config_base, + p_dev->config_regs); } else { dev_printk(KERN_INFO, dev, "pcmcia: could not parse base and rmask0 of CIS\n"); - p_dev->conf.ConfigBase = 0; - p_dev->conf.Present = 0; + p_dev->config_base = 0; + p_dev->config_regs = 0; } ret = p_drv->probe(p_dev); if (ret) { dev_dbg(dev, "binding to %s failed with %d\n", - p_drv->drv.name, ret); + p_drv->name, ret); goto put_module; } + dev_dbg(dev, "%s bound: Vpp %d.%d, idx %x, IRQ %d", p_drv->name, + p_dev->vpp/10, p_dev->vpp%10, p_dev->config_index, p_dev->irq); + dev_dbg(dev, "resources: ioport %pR %pR iomem %pR %pR %pR", + p_dev->resource[0], p_dev->resource[1], p_dev->resource[2], + p_dev->resource[3], p_dev->resource[4]); mutex_lock(&s->ops_mutex); if ((s->pcmcia_pfc) && @@ -374,13 +381,13 @@ static int pcmcia_device_remove(struct device *dev) if (p_dev->_irq || p_dev->_io || p_dev->_locked) dev_printk(KERN_INFO, dev, "pcmcia: driver %s did not release config properly\n", - p_drv->drv.name); + p_drv->name); for (i = 0; i < MAX_WIN; i++) if (p_dev->_win & CLIENT_WIN_REQ(i)) dev_printk(KERN_INFO, dev, "pcmcia: driver %s did not release window properly\n", - p_drv->drv.name); + p_drv->name); /* references from pcmcia_probe_device */ pcmcia_put_dev(p_dev); @@ -1136,7 +1143,7 @@ static int pcmcia_dev_suspend(struct device *dev, pm_message_t state) dev_printk(KERN_ERR, dev, "pcmcia: device %s (driver %s) did " "not want to go to sleep (%d)\n", - p_dev->devname, p_drv->drv.name, ret); + p_dev->devname, p_drv->name, ret); mutex_lock(&p_dev->socket->ops_mutex); p_dev->suspended = 0; mutex_unlock(&p_dev->socket->ops_mutex); @@ -1178,7 +1185,7 @@ static int pcmcia_dev_resume(struct device *dev) if (p_dev->device_no == p_dev->func) { dev_dbg(dev, "requesting configuration\n"); - ret = pcmcia_request_configuration(p_dev, &p_dev->conf); + ret = pcmcia_enable_device(p_dev); if (ret) goto out; } diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c index 05d0879ce93..fc7906eaf22 100644 --- a/drivers/pcmcia/i82092.c +++ b/drivers/pcmcia/i82092.c @@ -16,7 +16,6 @@ #include <linux/device.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <asm/system.h> #include <asm/io.h> diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c index 61746bd598b..72a033a2acd 100644 --- a/drivers/pcmcia/i82365.c +++ b/drivers/pcmcia/i82365.c @@ -51,7 +51,6 @@ #include <asm/system.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <linux/isapnp.h> diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c index 24de4992586..2adb0106a03 100644 --- a/drivers/pcmcia/m32r_cfc.c +++ b/drivers/pcmcia/m32r_cfc.c @@ -27,7 +27,6 @@ #include <asm/system.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #undef MAX_IO_WIN /* FIXME */ #define MAX_IO_WIN 1 diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c index 8e4723844ad..1511ff71c87 100644 --- a/drivers/pcmcia/m32r_pcc.c +++ b/drivers/pcmcia/m32r_pcc.c @@ -28,7 +28,6 @@ #include <asm/addrspace.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> /* XXX: should be moved into asm/irq.h */ #define PCC0_IRQ 24 diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index f0ecad99ce8..99d4f23cb43 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c @@ -59,7 +59,6 @@ #include <asm/irq.h> #include <asm/fs_pd.h> -#include <pcmcia/cs.h> #include <pcmcia/ss.h> #define pcmcia_info(args...) printk(KERN_INFO "m8xx_pcmcia: "args) diff --git a/drivers/pcmcia/o2micro.h b/drivers/pcmcia/o2micro.h index e74bebac269..5096e92c7a4 100644 --- a/drivers/pcmcia/o2micro.h +++ b/drivers/pcmcia/o2micro.h @@ -153,14 +153,14 @@ static int o2micro_override(struct yenta_socket *socket) if (use_speedup) { dev_info(&socket->dev->dev, - "O2: enabling read prefetch/write burst\n"); + "O2: enabling read prefetch/write burst. If you experience problems or performance issues, use the yenta_socket parameter 'o2_speedup=off'\n"); config_writeb(socket, O2_RESERVED1, a | O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST); config_writeb(socket, O2_RESERVED2, b | O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST); } else { dev_info(&socket->dev->dev, - "O2: disabling read prefetch/write burst\n"); + "O2: disabling read prefetch/write burst. If you experience problems or performance issues, use the yenta_socket parameter 'o2_speedup=on'\n"); config_writeb(socket, O2_RESERVED1, a & ~(O2_RES_READ_PREFETCH | O2_RES_WRITE_BURST)); config_writeb(socket, O2_RESERVED2, diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c index 0ac54da1588..e2c92415b89 100644 --- a/drivers/pcmcia/pcmcia_cis.c +++ b/drivers/pcmcia/pcmcia_cis.c @@ -6,7 +6,7 @@ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * Copyright (C) 1999 David A. Hinds - * Copyright (C) 2004-2009 Dominik Brodowski + * Copyright (C) 2004-2010 Dominik Brodowski * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -22,7 +22,6 @@ #include <pcmcia/cisreg.h> #include <pcmcia/cistpl.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <pcmcia/ds.h> #include "cs_internal.h" @@ -126,14 +125,24 @@ next_entry: return ret; } + +/** + * pcmcia_io_cfg_data_width() - convert cfgtable to data path width parameter + */ +static int pcmcia_io_cfg_data_width(unsigned int flags) +{ + if (!(flags & CISTPL_IO_8BIT)) + return IO_DATA_PATH_WIDTH_16; + if (!(flags & CISTPL_IO_16BIT)) + return IO_DATA_PATH_WIDTH_8; + return IO_DATA_PATH_WIDTH_AUTO; +} + + struct pcmcia_cfg_mem { struct pcmcia_device *p_dev; + int (*conf_check) (struct pcmcia_device *p_dev, void *priv_data); void *priv_data; - int (*conf_check) (struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data); cisparse_t parse; cistpl_cftable_entry_t dflt; }; @@ -147,25 +156,102 @@ struct pcmcia_cfg_mem { */ static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv) { - cistpl_cftable_entry_t *cfg = &parse->cftable_entry; struct pcmcia_cfg_mem *cfg_mem = priv; + struct pcmcia_device *p_dev = cfg_mem->p_dev; + cistpl_cftable_entry_t *cfg = &parse->cftable_entry; + cistpl_cftable_entry_t *dflt = &cfg_mem->dflt; + unsigned int flags = p_dev->config_flags; + unsigned int vcc = p_dev->socket->socket.Vcc; + + dev_dbg(&p_dev->dev, "testing configuration %x, autoconf %x\n", + cfg->index, flags); /* default values */ - cfg_mem->p_dev->conf.ConfigIndex = cfg->index; + cfg_mem->p_dev->config_index = cfg->index; if (cfg->flags & CISTPL_CFTABLE_DEFAULT) cfg_mem->dflt = *cfg; - return cfg_mem->conf_check(cfg_mem->p_dev, cfg, &cfg_mem->dflt, - cfg_mem->p_dev->socket->socket.Vcc, - cfg_mem->priv_data); + /* check for matching Vcc? */ + if (flags & CONF_AUTO_CHECK_VCC) { + if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) { + if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000) + return -ENODEV; + } else if (dflt->vcc.present & (1 << CISTPL_POWER_VNOM)) { + if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM] / 10000) + return -ENODEV; + } + } + + /* set Vpp? */ + if (flags & CONF_AUTO_SET_VPP) { + if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) + p_dev->vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; + else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) + p_dev->vpp = + dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; + } + + /* enable audio? */ + if ((flags & CONF_AUTO_AUDIO) && (cfg->flags & CISTPL_CFTABLE_AUDIO)) + p_dev->config_flags |= CONF_ENABLE_SPKR; + + + /* IO window settings? */ + if (flags & CONF_AUTO_SET_IO) { + cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; + int i = 0; + + p_dev->resource[0]->start = p_dev->resource[0]->end = 0; + p_dev->resource[1]->start = p_dev->resource[1]->end = 0; + if (io->nwin == 0) + return -ENODEV; + + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= + pcmcia_io_cfg_data_width(io->flags); + if (io->nwin > 1) { + /* For multifunction cards, by convention, we + * configure the network function with window 0, + * and serial with window 1 */ + i = (io->win[1].len > io->win[0].len); + p_dev->resource[1]->flags = p_dev->resource[0]->flags; + p_dev->resource[1]->start = io->win[1-i].base; + p_dev->resource[1]->end = io->win[1-i].len; + } + p_dev->resource[0]->start = io->win[i].base; + p_dev->resource[0]->end = io->win[i].len; + p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; + } + + /* MEM window settings? */ + if (flags & CONF_AUTO_SET_IOMEM) { + /* so far, we only set one memory window */ + cistpl_mem_t *mem = (cfg->mem.nwin) ? &cfg->mem : &dflt->mem; + + p_dev->resource[2]->start = p_dev->resource[2]->end = 0; + if (mem->nwin == 0) + return -ENODEV; + + p_dev->resource[2]->start = mem->win[0].host_addr; + p_dev->resource[2]->end = mem->win[0].len; + if (p_dev->resource[2]->end < 0x1000) + p_dev->resource[2]->end = 0x1000; + p_dev->card_addr = mem->win[0].card_addr; + } + + dev_dbg(&p_dev->dev, + "checking configuration %x: %pr %pr %pr (%d lines)\n", + p_dev->config_index, p_dev->resource[0], p_dev->resource[1], + p_dev->resource[2], p_dev->io_lines); + + return cfg_mem->conf_check(p_dev, cfg_mem->priv_data); } /** * pcmcia_loop_config() - loop over configuration options * @p_dev: the struct pcmcia_device which we need to loop for. * @conf_check: function to call for each configuration option. - * It gets passed the struct pcmcia_device, the CIS data - * describing the configuration option, and private data + * It gets passed the struct pcmcia_device and private data * being passed to pcmcia_loop_config() * @priv_data: private data to be passed to the conf_check function. * @@ -175,9 +261,6 @@ static int pcmcia_do_loop_config(tuple_t *tuple, cisparse_t *parse, void *priv) */ int pcmcia_loop_config(struct pcmcia_device *p_dev, int (*conf_check) (struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data), void *priv_data) { diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index a5c176598d9..0bdda5b3ed5 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c @@ -6,7 +6,7 @@ * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * * Copyright (C) 1999 David A. Hinds - * Copyright (C) 2004-2005 Dominik Brodowski + * Copyright (C) 2004-2010 Dominik Brodowski * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -26,7 +26,6 @@ #include <asm/irq.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -56,6 +55,12 @@ struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align, } +/** + * release_io_space() - release IO ports allocated with alloc_io_space() + * @s: pcmcia socket + * @res: resource to release + * + */ static void release_io_space(struct pcmcia_socket *s, struct resource *res) { resource_size_t num = resource_size(res); @@ -81,9 +86,14 @@ static void release_io_space(struct pcmcia_socket *s, struct resource *res) } } } -} /* release_io_space */ +} + -/** alloc_io_space +/** + * alloc_io_space() - allocate IO ports for use by a PCMCIA device + * @s: pcmcia socket + * @res: resource to allocate (begin: begin, end: size) + * @lines: number of IO lines decoded by the PCMCIA card * * Special stuff for managing IO windows, because they are scarce */ @@ -135,7 +145,7 @@ static int alloc_io_space(struct pcmcia_socket *s, struct resource *res, } dev_dbg(&s->dev, "alloc_io_space request result %d: %pR\n", ret, res); return ret; -} /* alloc_io_space */ +} /** @@ -168,14 +178,14 @@ static int pcmcia_access_config(struct pcmcia_device *p_dev, return -EACCES; } - addr = (c->ConfigBase + where) >> 1; + addr = (p_dev->config_base + where) >> 1; ret = accessf(s, 1, addr, 1, val); mutex_unlock(&s->ops_mutex); return ret; -} /* pcmcia_access_config */ +} /** @@ -204,11 +214,20 @@ int pcmcia_write_config_byte(struct pcmcia_device *p_dev, off_t where, u8 val) EXPORT_SYMBOL(pcmcia_write_config_byte); -int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh, +/** + * pcmcia_map_mem_page() - modify iomem window to point to a different offset + * @p_dev: pcmcia device + * @res: iomem resource already enabled by pcmcia_request_window() + * @offset: card_offset to map + * + * pcmcia_map_mem_page() modifies what can be read and written by accessing + * an iomem range previously enabled by pcmcia_request_window(), by setting + * the card_offset value to @offset. + */ +int pcmcia_map_mem_page(struct pcmcia_device *p_dev, struct resource *res, unsigned int offset) { struct pcmcia_socket *s = p_dev->socket; - struct resource *res = wh; unsigned int w; int ret; @@ -223,98 +242,111 @@ int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t wh, dev_warn(&p_dev->dev, "failed to set_mem_map\n"); mutex_unlock(&s->ops_mutex); return ret; -} /* pcmcia_map_mem_page */ +} EXPORT_SYMBOL(pcmcia_map_mem_page); -/** pcmcia_modify_configuration +/** + * pcmcia_fixup_iowidth() - reduce io width to 8bit + * @p_dev: pcmcia device * - * Modify a locked socket configuration + * pcmcia_fixup_iowidth() allows a PCMCIA device driver to reduce the + * IO width to 8bit after having called pcmcia_enable_device() + * previously. */ -int pcmcia_modify_configuration(struct pcmcia_device *p_dev, - modconf_t *mod) +int pcmcia_fixup_iowidth(struct pcmcia_device *p_dev) { - struct pcmcia_socket *s; - config_t *c; - int ret; - - s = p_dev->socket; + struct pcmcia_socket *s = p_dev->socket; + pccard_io_map io_off = { 0, 0, 0, 0, 1 }; + pccard_io_map io_on; + int i, ret = 0; mutex_lock(&s->ops_mutex); - c = p_dev->function_config; - if (!(s->state & SOCKET_PRESENT)) { - dev_dbg(&p_dev->dev, "No card present\n"); - ret = -ENODEV; - goto unlock; - } - if (!(c->state & CONFIG_LOCKED)) { - dev_dbg(&p_dev->dev, "Configuration isnt't locked\n"); + dev_dbg(&p_dev->dev, "fixup iowidth to 8bit\n"); + + if (!(s->state & SOCKET_PRESENT) || + !(p_dev->function_config->state & CONFIG_LOCKED)) { + dev_dbg(&p_dev->dev, "No card? Config not locked?\n"); ret = -EACCES; goto unlock; } - if (mod->Attributes & (CONF_IRQ_CHANGE_VALID | CONF_VCC_CHANGE_VALID)) { - dev_dbg(&p_dev->dev, - "changing Vcc or IRQ is not allowed at this time\n"); - ret = -EINVAL; - goto unlock; - } + io_on.speed = io_speed; + for (i = 0; i < MAX_IO_WIN; i++) { + if (!s->io[i].res) + continue; + io_off.map = i; + io_on.map = i; - /* We only allow changing Vpp1 and Vpp2 to the same value */ - if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) && - (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { - if (mod->Vpp1 != mod->Vpp2) { - dev_dbg(&p_dev->dev, - "Vpp1 and Vpp2 must be the same\n"); - ret = -EINVAL; - goto unlock; - } - s->socket.Vpp = mod->Vpp1; - if (s->ops->set_socket(s, &s->socket)) { - dev_printk(KERN_WARNING, &p_dev->dev, - "Unable to set VPP\n"); - ret = -EIO; - goto unlock; - } - } else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) || - (mod->Attributes & CONF_VPP2_CHANGE_VALID)) { - dev_dbg(&p_dev->dev, - "changing Vcc is not allowed at this time\n"); - ret = -EINVAL; - goto unlock; + io_on.flags = MAP_ACTIVE | IO_DATA_PATH_WIDTH_8; + io_on.start = s->io[i].res->start; + io_on.stop = s->io[i].res->end; + + s->ops->set_io_map(s, &io_off); + mdelay(40); + s->ops->set_io_map(s, &io_on); } +unlock: + mutex_unlock(&s->ops_mutex); + + return ret; +} +EXPORT_SYMBOL(pcmcia_fixup_iowidth); + + +/** + * pcmcia_fixup_vpp() - set Vpp to a new voltage level + * @p_dev: pcmcia device + * @new_vpp: new Vpp voltage + * + * pcmcia_fixup_vpp() allows a PCMCIA device driver to set Vpp to + * a new voltage level between calls to pcmcia_enable_device() + * and pcmcia_disable_device(). + */ +int pcmcia_fixup_vpp(struct pcmcia_device *p_dev, unsigned char new_vpp) +{ + struct pcmcia_socket *s = p_dev->socket; + int ret = 0; - if (mod->Attributes & CONF_IO_CHANGE_WIDTH) { - pccard_io_map io_off = { 0, 0, 0, 0, 1 }; - pccard_io_map io_on; - int i; + mutex_lock(&s->ops_mutex); - io_on.speed = io_speed; - for (i = 0; i < MAX_IO_WIN; i++) { - if (!s->io[i].res) - continue; - io_off.map = i; - io_on.map = i; + dev_dbg(&p_dev->dev, "fixup Vpp to %d\n", new_vpp); - io_on.flags = MAP_ACTIVE | IO_DATA_PATH_WIDTH_8; - io_on.start = s->io[i].res->start; - io_on.stop = s->io[i].res->end; + if (!(s->state & SOCKET_PRESENT) || + !(p_dev->function_config->state & CONFIG_LOCKED)) { + dev_dbg(&p_dev->dev, "No card? Config not locked?\n"); + ret = -EACCES; + goto unlock; + } - s->ops->set_io_map(s, &io_off); - mdelay(40); - s->ops->set_io_map(s, &io_on); - } + s->socket.Vpp = new_vpp; + if (s->ops->set_socket(s, &s->socket)) { + dev_warn(&p_dev->dev, "Unable to set VPP\n"); + ret = -EIO; + goto unlock; } - ret = 0; + p_dev->vpp = new_vpp; + unlock: mutex_unlock(&s->ops_mutex); return ret; -} /* modify_configuration */ -EXPORT_SYMBOL(pcmcia_modify_configuration); +} +EXPORT_SYMBOL(pcmcia_fixup_vpp); +/** + * pcmcia_release_configuration() - physically disable a PCMCIA device + * @p_dev: pcmcia device + * + * pcmcia_release_configuration() is the 1:1 counterpart to + * pcmcia_enable_device(): If a PCMCIA device is no longer used by any + * driver, the Vpp voltage is set to 0, IRQs will no longer be generated, + * and I/O ranges will be disabled. As pcmcia_release_io() and + * pcmcia_release_window() still need to be called, device drivers are + * expected to call pcmcia_disable_device() instead. + */ int pcmcia_release_configuration(struct pcmcia_device *p_dev) { pccard_io_map io = { 0, 0, 0, 0, 1 }; @@ -327,7 +359,7 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev) if (p_dev->_locked) { p_dev->_locked = 0; if (--(s->lock_count) == 0) { - s->socket.flags = SS_OUTPUT_ENA; /* Is this correct? */ + s->socket.flags = SS_OUTPUT_ENA; /* Is this correct? */ s->socket.Vpp = 0; s->socket.io_irq = 0; s->ops->set_socket(s, &s->socket); @@ -349,16 +381,18 @@ int pcmcia_release_configuration(struct pcmcia_device *p_dev) mutex_unlock(&s->ops_mutex); return 0; -} /* pcmcia_release_configuration */ +} -/** pcmcia_release_io +/** + * pcmcia_release_io() - release I/O allocated by a PCMCIA device + * @p_dev: pcmcia device * - * Release_io() releases the I/O ranges allocated by a client. This - * may be invoked some time after a card ejection has already dumped - * the actual socket configuration, so if the client is "stale", we - * don't bother checking the port ranges against the current socket - * values. + * pcmcia_release_io() releases the I/O ranges allocated by a PCMCIA + * device. This may be invoked some time after a card ejection has + * already dumped the actual socket configuration, so if the client is + * "stale", we don't bother checking the port ranges against the + * current socket values. */ static int pcmcia_release_io(struct pcmcia_device *p_dev) { @@ -387,6 +421,14 @@ out: } /* pcmcia_release_io */ +/** + * pcmcia_release_window() - release reserved iomem for PCMCIA devices + * @p_dev: pcmcia device + * @res: iomem resource to release + * + * pcmcia_release_window() releases &struct resource *res which was + * previously reserved by calling pcmcia_request_window(). + */ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res) { struct pcmcia_socket *s = p_dev->socket; @@ -420,6 +462,8 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res) kfree(win->res); win->res = NULL; } + res->start = res->end = 0; + res->flags = IORESOURCE_MEM; p_dev->_win &= ~CLIENT_WIN_REQ(w); mutex_unlock(&s->ops_mutex); @@ -428,23 +472,30 @@ int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res) EXPORT_SYMBOL(pcmcia_release_window); -int pcmcia_request_configuration(struct pcmcia_device *p_dev, - config_req_t *req) +/** + * pcmcia_enable_device() - set up and activate a PCMCIA device + * @p_dev: the associated PCMCIA device + * + * pcmcia_enable_device() physically enables a PCMCIA device. It parses + * the flags passed to in @flags and stored in @p_dev->flags and sets up + * the Vpp voltage, enables the speaker line, I/O ports and store proper + * values to configuration registers. + */ +int pcmcia_enable_device(struct pcmcia_device *p_dev) { int i; - u_int base; + unsigned int base; struct pcmcia_socket *s = p_dev->socket; config_t *c; pccard_io_map iomap; + unsigned char status = 0; + unsigned char ext_status = 0; + unsigned char option = 0; + unsigned int flags = p_dev->config_flags; if (!(s->state & SOCKET_PRESENT)) return -ENODEV; - if (req->IntType & INT_CARDBUS) { - dev_dbg(&p_dev->dev, "IntType may not be INT_CARDBUS\n"); - return -EINVAL; - } - mutex_lock(&s->ops_mutex); c = p_dev->function_config; if (c->state & CONFIG_LOCKED) { @@ -454,7 +505,7 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev, } /* Do power control. We don't allow changes in Vcc. */ - s->socket.Vpp = req->Vpp; + s->socket.Vpp = p_dev->vpp; if (s->ops->set_socket(s, &s->socket)) { mutex_unlock(&s->ops_mutex); dev_printk(KERN_WARNING, &p_dev->dev, @@ -463,64 +514,74 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev, } /* Pick memory or I/O card, DMA mode, interrupt */ - c->IntType = req->IntType; - c->Attributes = req->Attributes; - if (req->IntType & INT_MEMORY_AND_IO) + if (p_dev->_io || flags & CONF_ENABLE_IRQ) + flags |= CONF_ENABLE_IOCARD; + if (flags & CONF_ENABLE_IOCARD) s->socket.flags |= SS_IOCARD; - if (req->IntType & INT_ZOOMED_VIDEO) - s->socket.flags |= SS_ZVCARD | SS_IOCARD; - if (req->Attributes & CONF_ENABLE_DMA) - s->socket.flags |= SS_DMA_MODE; - if (req->Attributes & CONF_ENABLE_SPKR) + if (flags & CONF_ENABLE_SPKR) { s->socket.flags |= SS_SPKR_ENA; - if (req->Attributes & CONF_ENABLE_IRQ) + status = CCSR_AUDIO_ENA; + if (!(p_dev->config_regs & PRESENT_STATUS)) + dev_warn(&p_dev->dev, "speaker requested, but " + "PRESENT_STATUS not set!\n"); + } + if (flags & CONF_ENABLE_IRQ) s->socket.io_irq = s->pcmcia_irq; else s->socket.io_irq = 0; + if (flags & CONF_ENABLE_ESR) { + p_dev->config_regs |= PRESENT_EXT_STATUS; + ext_status = ESR_REQ_ATTN_ENA; + } s->ops->set_socket(s, &s->socket); s->lock_count++; + dev_dbg(&p_dev->dev, + "enable_device: V %d, flags %x, base %x, regs %x, idx %x\n", + p_dev->vpp, flags, p_dev->config_base, p_dev->config_regs, + p_dev->config_index); + /* Set up CIS configuration registers */ - base = c->ConfigBase = req->ConfigBase; - c->CardValues = req->Present; - if (req->Present & PRESENT_COPY) { - c->Copy = req->Copy; - pcmcia_write_cis_mem(s, 1, (base + CISREG_SCR)>>1, 1, &c->Copy); - } - if (req->Present & PRESENT_OPTION) { + base = p_dev->config_base; + if (p_dev->config_regs & PRESENT_COPY) { + u16 tmp = 0; + dev_dbg(&p_dev->dev, "clearing CISREG_SCR\n"); + pcmcia_write_cis_mem(s, 1, (base + CISREG_SCR)>>1, 1, &tmp); + } + if (p_dev->config_regs & PRESENT_PIN_REPLACE) { + u16 tmp = 0; + dev_dbg(&p_dev->dev, "clearing CISREG_PRR\n"); + pcmcia_write_cis_mem(s, 1, (base + CISREG_PRR)>>1, 1, &tmp); + } + if (p_dev->config_regs & PRESENT_OPTION) { if (s->functions == 1) { - c->Option = req->ConfigIndex & COR_CONFIG_MASK; + option = p_dev->config_index & COR_CONFIG_MASK; } else { - c->Option = req->ConfigIndex & COR_MFC_CONFIG_MASK; - c->Option |= COR_FUNC_ENA|COR_IREQ_ENA; - if (req->Present & PRESENT_IOBASE_0) - c->Option |= COR_ADDR_DECODE; + option = p_dev->config_index & COR_MFC_CONFIG_MASK; + option |= COR_FUNC_ENA|COR_IREQ_ENA; + if (p_dev->config_regs & PRESENT_IOBASE_0) + option |= COR_ADDR_DECODE; } - if ((req->Attributes & CONF_ENABLE_IRQ) && - !(req->Attributes & CONF_ENABLE_PULSE_IRQ)) - c->Option |= COR_LEVEL_REQ; - pcmcia_write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &c->Option); + if ((flags & CONF_ENABLE_IRQ) && + !(flags & CONF_ENABLE_PULSE_IRQ)) + option |= COR_LEVEL_REQ; + pcmcia_write_cis_mem(s, 1, (base + CISREG_COR)>>1, 1, &option); mdelay(40); } - if (req->Present & PRESENT_STATUS) { - c->Status = req->Status; - pcmcia_write_cis_mem(s, 1, (base + CISREG_CCSR)>>1, 1, &c->Status); - } - if (req->Present & PRESENT_PIN_REPLACE) { - c->Pin = req->Pin; - pcmcia_write_cis_mem(s, 1, (base + CISREG_PRR)>>1, 1, &c->Pin); - } - if (req->Present & PRESENT_EXT_STATUS) { - c->ExtStatus = req->ExtStatus; - pcmcia_write_cis_mem(s, 1, (base + CISREG_ESR)>>1, 1, &c->ExtStatus); - } - if (req->Present & PRESENT_IOBASE_0) { + if (p_dev->config_regs & PRESENT_STATUS) + pcmcia_write_cis_mem(s, 1, (base + CISREG_CCSR)>>1, 1, &status); + + if (p_dev->config_regs & PRESENT_EXT_STATUS) + pcmcia_write_cis_mem(s, 1, (base + CISREG_ESR)>>1, 1, + &ext_status); + + if (p_dev->config_regs & PRESENT_IOBASE_0) { u8 b = c->io[0].start & 0xff; pcmcia_write_cis_mem(s, 1, (base + CISREG_IOBASE_0)>>1, 1, &b); b = (c->io[0].start >> 8) & 0xff; pcmcia_write_cis_mem(s, 1, (base + CISREG_IOBASE_1)>>1, 1, &b); } - if (req->Present & PRESENT_IOSIZE) { + if (p_dev->config_regs & PRESENT_IOSIZE) { u8 b = resource_size(&c->io[0]) + resource_size(&c->io[1]) - 1; pcmcia_write_cis_mem(s, 1, (base + CISREG_IOSIZE)>>1, 1, &b); } @@ -551,14 +612,15 @@ int pcmcia_request_configuration(struct pcmcia_device *p_dev, p_dev->_locked = 1; mutex_unlock(&s->ops_mutex); return 0; -} /* pcmcia_request_configuration */ -EXPORT_SYMBOL(pcmcia_request_configuration); +} /* pcmcia_enable_device */ +EXPORT_SYMBOL(pcmcia_enable_device); /** * pcmcia_request_io() - attempt to reserve port ranges for PCMCIA devices + * @p_dev: the associated PCMCIA device * - * pcmcia_request_io() attepts to reserve the IO port ranges specified in + * pcmcia_request_io() attempts to reserve the IO port ranges specified in * &struct pcmcia_device @p_dev->resource[0] and @p_dev->resource[1]. The * "start" value is the requested start of the IO port resource; "end" * reflects the number of ports requested. The number of IO lines requested @@ -595,7 +657,13 @@ int pcmcia_request_io(struct pcmcia_device *p_dev) if (c->io[1].end) { ret = alloc_io_space(s, &c->io[1], p_dev->io_lines); if (ret) { + struct resource tmp = c->io[0]; + /* release the previously allocated resource */ release_io_space(s, &c->io[0]); + /* but preserve the settings, for they worked... */ + c->io[0].end = resource_size(&tmp); + c->io[0].start = tmp.start; + c->io[0].flags = tmp.flags; goto out; } } else @@ -616,11 +684,13 @@ EXPORT_SYMBOL(pcmcia_request_io); /** * pcmcia_request_irq() - attempt to request a IRQ for a PCMCIA device + * @p_dev: the associated PCMCIA device + * @handler: IRQ handler to register * - * pcmcia_request_irq() is a wrapper around request_irq which will allow + * pcmcia_request_irq() is a wrapper around request_irq() which allows * the PCMCIA core to clean up the registration in pcmcia_disable_device(). * Drivers are free to use request_irq() directly, but then they need to - * call free_irq themselfves, too. Also, only IRQF_SHARED capable IRQ + * call free_irq() themselfves, too. Also, only %IRQF_SHARED capable IRQ * handlers are allowed. */ int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev, @@ -643,12 +713,14 @@ EXPORT_SYMBOL(pcmcia_request_irq); /** * pcmcia_request_exclusive_irq() - attempt to request an exclusive IRQ first + * @p_dev: the associated PCMCIA device + * @handler: IRQ handler to register * - * pcmcia_request_exclusive_irq() is a wrapper around request_irq which + * pcmcia_request_exclusive_irq() is a wrapper around request_irq() which * attempts first to request an exclusive IRQ. If it fails, it also accepts * a shared IRQ, but prints out a warning. PCMCIA drivers should allow for * IRQ sharing and either use request_irq directly (then they need to call - * free_irq themselves, too), or the pcmcia_request_irq() function. + * free_irq() themselves, too), or the pcmcia_request_irq() function. */ int __must_check __pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev, @@ -789,38 +861,47 @@ int pcmcia_setup_irq(struct pcmcia_device *p_dev) } -/** pcmcia_request_window +/** + * pcmcia_request_window() - attempt to reserve iomem for PCMCIA devices + * @p_dev: the associated PCMCIA device + * @res: &struct resource pointing to p_dev->resource[2..5] + * @speed: access speed * - * Request_window() establishes a mapping between card memory space - * and system memory space. + * pcmcia_request_window() attepts to reserve an iomem ranges specified in + * &struct resource @res pointing to one of the entries in + * &struct pcmcia_device @p_dev->resource[2..5]. The "start" value is the + * requested start of the IO mem resource; "end" reflects the size + * requested. */ -int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_handle_t *wh) +int pcmcia_request_window(struct pcmcia_device *p_dev, struct resource *res, + unsigned int speed) { struct pcmcia_socket *s = p_dev->socket; pccard_mem_map *win; u_long align; - struct resource *res; int w; + dev_dbg(&p_dev->dev, "request_window %pR %d\n", res, speed); + if (!(s->state & SOCKET_PRESENT)) { dev_dbg(&p_dev->dev, "No card present\n"); return -ENODEV; } /* Window size defaults to smallest available */ - if (req->Size == 0) - req->Size = s->map_size; - align = (s->features & SS_CAP_MEM_ALIGN) ? req->Size : s->map_size; - if (req->Size & (s->map_size-1)) { + if (res->end == 0) + res->end = s->map_size; + align = (s->features & SS_CAP_MEM_ALIGN) ? res->end : s->map_size; + if (res->end & (s->map_size-1)) { dev_dbg(&p_dev->dev, "invalid map size\n"); return -EINVAL; } - if ((req->Base && (s->features & SS_CAP_STATIC_MAP)) || - (req->Base & (align-1))) { + if ((res->start && (s->features & SS_CAP_STATIC_MAP)) || + (res->start & (align-1))) { dev_dbg(&p_dev->dev, "invalid base address\n"); return -EINVAL; } - if (req->Base) + if (res->start) align = 0; /* Allocate system memory window */ @@ -837,7 +918,7 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha win = &s->win[w]; if (!(s->features & SS_CAP_STATIC_MAP)) { - win->res = pcmcia_find_mem_region(req->Base, req->Size, align, + win->res = pcmcia_find_mem_region(res->start, res->end, align, 0, s); if (!win->res) { dev_dbg(&p_dev->dev, "allocating mem region failed\n"); @@ -849,8 +930,8 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha /* Configure the socket controller */ win->map = w+1; - win->flags = req->Attributes; - win->speed = req->AccessSpeed; + win->flags = res->flags & WIN_FLAGS_MAP; + win->speed = speed; win->card_start = 0; if (s->ops->set_mem_map(s, win) != 0) { @@ -862,17 +943,14 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha /* Return window handle */ if (s->features & SS_CAP_STATIC_MAP) - req->Base = win->static_start; + res->start = win->static_start; else - req->Base = win->res->start; + res->start = win->res->start; /* convert to new-style resources */ - res = p_dev->resource[w + MAX_IO_WIN]; - res->start = req->Base; - res->end = req->Base + req->Size - 1; - res->flags &= ~IORESOURCE_BITS; - res->flags |= (req->Attributes & WIN_FLAGS_MAP) | (win->map << 2); - res->flags |= IORESOURCE_MEM; + res->end += res->start - 1; + res->flags &= ~WIN_FLAGS_REQ; + res->flags |= (win->map << 2) | IORESOURCE_MEM; res->parent = win->res; if (win->res) request_resource(&iomem_resource, res); @@ -880,15 +958,30 @@ int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, window_ha dev_dbg(&p_dev->dev, "request_window results in %pR\n", res); mutex_unlock(&s->ops_mutex); - *wh = res; return 0; } /* pcmcia_request_window */ EXPORT_SYMBOL(pcmcia_request_window); + +/** + * pcmcia_disable_device() - disable and clean up a PCMCIA device + * @p_dev: the associated PCMCIA device + * + * pcmcia_disable_device() is the driver-callable counterpart to + * pcmcia_enable_device(): If a PCMCIA device is no longer used, + * drivers are expected to clean up and disable the device by calling + * this function. Any I/O ranges (iomem and ioports) will be released, + * the Vpp voltage will be set to 0, and IRQs will no longer be + * generated -- at least if there is no other card function (of + * multifunction devices) being used. + */ void pcmcia_disable_device(struct pcmcia_device *p_dev) { int i; + + dev_dbg(&p_dev->dev, "disabling device\n"); + for (i = 0; i < MAX_WIN; i++) { struct resource *res = p_dev->resource[MAX_IO_WIN + i]; if (res->flags & WIN_FLAGS_REQ) diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index b8a869af0f4..8cbfa067171 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c @@ -18,7 +18,6 @@ #include <linux/io.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <asm/system.h> @@ -646,7 +645,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, if (!pci_resource_start(dev, 0)) { dev_warn(&dev->dev, "refusing to load the driver as the " "io_base is NULL.\n"); - goto err_out_free_mem; + goto err_out_disable; } dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx " diff --git a/drivers/pcmcia/rsrc_iodyn.c b/drivers/pcmcia/rsrc_iodyn.c index 8510c35d295..523eb691c30 100644 --- a/drivers/pcmcia/rsrc_iodyn.c +++ b/drivers/pcmcia/rsrc_iodyn.c @@ -17,7 +17,6 @@ #include <linux/kernel.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include "cs_internal.h" diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c index 4e80421fd90..aa628ed0e9f 100644 --- a/drivers/pcmcia/rsrc_mgr.c +++ b/drivers/pcmcia/rsrc_mgr.c @@ -17,7 +17,6 @@ #include <linux/kernel.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include "cs_internal.h" diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 96f348b35fd..b187555d438 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -29,7 +29,6 @@ #include <asm/irq.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include "cs_internal.h" diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c index e0985148029..945857f8c28 100644 --- a/drivers/pcmcia/sa1100_generic.c +++ b/drivers/pcmcia/sa1100_generic.c @@ -35,7 +35,6 @@ #include <linux/slab.h> #include <linux/platform_device.h> -#include <pcmcia/cs.h> #include <pcmcia/ss.h> #include <asm/hardware/scoop.h> diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 6f1a86b43c6..689e3c02edb 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c @@ -627,8 +627,6 @@ void soc_pcmcia_remove_one(struct soc_pcmcia_socket *skt) pcmcia_unregister_socket(&skt->socket); - flush_scheduled_work(); - skt->ops->hw_shutdown(skt); soc_common_pcmcia_config_skt(skt, &dead_socket); @@ -720,8 +718,6 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt) pcmcia_unregister_socket(&skt->socket); out_err_7: - flush_scheduled_work(); - skt->ops->hw_shutdown(skt); out_err_6: list_del(&skt->node); diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h index 3fba3a67912..bbcd5385a22 100644 --- a/drivers/pcmcia/soc_common.h +++ b/drivers/pcmcia/soc_common.h @@ -11,7 +11,6 @@ /* include the world */ #include <linux/cpufreq.h> -#include <pcmcia/cs.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c index cb0d3ace18b..71aeed93037 100644 --- a/drivers/pcmcia/socket_sysfs.c +++ b/drivers/pcmcia/socket_sysfs.c @@ -27,7 +27,6 @@ #include <asm/irq.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c index be0d841c7eb..310160bffe3 100644 --- a/drivers/pcmcia/tcic.c +++ b/drivers/pcmcia/tcic.c @@ -49,7 +49,6 @@ #include <asm/io.h> #include <asm/system.h> -#include <pcmcia/cs.h> #include <pcmcia/ss.h> #include "tcic.h" diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c index 9b3c15827e5..c6d36b3a6ce 100644 --- a/drivers/pcmcia/vrc4173_cardu.c +++ b/drivers/pcmcia/vrc4173_cardu.c @@ -461,7 +461,7 @@ static int __devinit vrc4173_cardu_probe(struct pci_dev *dev, { vrc4173_socket_t *socket; unsigned long start, len, flags; - int slot, err; + int slot, err, ret; slot = vrc4173_cardu_slots++; socket = &cardu_sockets[slot]; @@ -474,43 +474,63 @@ static int __devinit vrc4173_cardu_probe(struct pci_dev *dev, return err; start = pci_resource_start(dev, 0); - if (start == 0) - return -ENODEV; + if (start == 0) { + ret = -ENODEV; + goto disable; + } len = pci_resource_len(dev, 0); - if (len == 0) - return -ENODEV; + if (len == 0) { + ret = -ENODEV; + goto disable; + } - if (((flags = pci_resource_flags(dev, 0)) & IORESOURCE_MEM) == 0) - return -EBUSY; + flags = pci_resource_flags(dev, 0); + if ((flags & IORESOURCE_MEM) == 0) { + ret = -EBUSY; + goto disable; + } - if ((err = pci_request_regions(dev, socket->name)) < 0) - return err; + err = pci_request_regions(dev, socket->name); + if (err < 0) { + ret = err; + goto disable; + } socket->base = ioremap(start, len); - if (socket->base == NULL) - return -ENODEV; + if (socket->base == NULL) { + ret = -ENODEV; + goto release; + } socket->dev = dev; socket->pcmcia_socket = pcmcia_register_socket(slot, &cardu_operations, 1); if (socket->pcmcia_socket == NULL) { - iounmap(socket->base); - socket->base = NULL; - return -ENOMEM; + ret = -ENOMEM; + goto unmap; } if (request_irq(dev->irq, cardu_interrupt, IRQF_SHARED, socket->name, socket) < 0) { - pcmcia_unregister_socket(socket->pcmcia_socket); - socket->pcmcia_socket = NULL; - iounmap(socket->base); - socket->base = NULL; - return -EBUSY; + ret = -EBUSY; + goto unregister; } printk(KERN_INFO "%s at %#08lx, IRQ %d\n", socket->name, start, dev->irq); return 0; + +unregister: + pcmcia_unregister_socket(socket->pcmcia_socket); + socket->pcmcia_socket = NULL; +unmap: + iounmap(socket->base); + socket->base = NULL; +release: + pci_release_regions(dev); +disable: + pci_disable_device(dev); + return ret; } static int __devinit vrc4173_cardu_setup(char *options) diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c index fa88c360c37..3b67a1b6a19 100644 --- a/drivers/pcmcia/xxs1500_ss.c +++ b/drivers/pcmcia/xxs1500_ss.c @@ -17,7 +17,6 @@ #include <linux/slab.h> #include <linux/spinlock.h> -#include <pcmcia/cs.h> #include <pcmcia/ss.h> #include <pcmcia/cistpl.h> diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 414d9a6f9a3..408dbaa080a 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -20,7 +20,6 @@ #include <linux/slab.h> #include <pcmcia/ss.h> -#include <pcmcia/cs.h> #include "yenta_socket.h" #include "i82365.h" diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 9024480a822..c44a5e8b8b8 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -51,7 +51,6 @@ * TODO: * - handle CPU hotplug * - provide turbo enable/disable api - * - make sure we can write turbo enable/disable reg based on MISC_EN * * Related documents: * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2 @@ -230,7 +229,7 @@ #define THM_TC2 0xac #define THM_DTV 0xb0 #define THM_ITV 0xd8 -#define ITV_ME_SEQNO_MASK 0x000f0000 /* ME should update every ~200ms */ +#define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */ #define ITV_ME_SEQNO_SHIFT (16) #define ITV_MCH_TEMP_MASK 0x0000ff00 #define ITV_MCH_TEMP_SHIFT (8) @@ -325,6 +324,7 @@ struct ips_driver { bool gpu_preferred; bool poll_turbo_status; bool second_cpu; + bool turbo_toggle_allowed; struct ips_mcp_limits *limits; /* Optional MCH interfaces for if i915 is in use */ @@ -415,7 +415,7 @@ static void ips_cpu_lower(struct ips_driver *ips) new_limit = cur_limit - 8; /* 1W decrease */ /* Clamp to SKU TDP limit */ - if (((new_limit * 10) / 8) < (ips->orig_turbo_limit & TURBO_TDP_MASK)) + if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK)) new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK; thm_writew(THM_MPCPC, (new_limit * 10) / 8); @@ -461,7 +461,8 @@ static void ips_enable_cpu_turbo(struct ips_driver *ips) if (ips->__cpu_turbo_on) return; - on_each_cpu(do_enable_cpu_turbo, ips, 1); + if (ips->turbo_toggle_allowed) + on_each_cpu(do_enable_cpu_turbo, ips, 1); ips->__cpu_turbo_on = true; } @@ -498,7 +499,8 @@ static void ips_disable_cpu_turbo(struct ips_driver *ips) if (!ips->__cpu_turbo_on) return; - on_each_cpu(do_disable_cpu_turbo, ips, 1); + if (ips->turbo_toggle_allowed) + on_each_cpu(do_disable_cpu_turbo, ips, 1); ips->__cpu_turbo_on = false; } @@ -598,17 +600,29 @@ static bool mcp_exceeded(struct ips_driver *ips) { unsigned long flags; bool ret = false; + u32 temp_limit; + u32 avg_power; + const char *msg = "MCP limit exceeded: "; spin_lock_irqsave(&ips->turbo_status_lock, flags); - if (ips->mcp_avg_temp > (ips->mcp_temp_limit * 100)) - ret = true; - if (ips->cpu_avg_power + ips->mch_avg_power > ips->mcp_power_limit) + + temp_limit = ips->mcp_temp_limit * 100; + if (ips->mcp_avg_temp > temp_limit) { + dev_info(&ips->dev->dev, + "%sAvg temp %u, limit %u\n", msg, ips->mcp_avg_temp, + temp_limit); ret = true; - spin_unlock_irqrestore(&ips->turbo_status_lock, flags); + } - if (ret) + avg_power = ips->cpu_avg_power + ips->mch_avg_power; + if (avg_power > ips->mcp_power_limit) { dev_info(&ips->dev->dev, - "MCP power or thermal limit exceeded\n"); + "%sAvg power %u, limit %u\n", msg, avg_power, + ips->mcp_power_limit); + ret = true; + } + + spin_unlock_irqrestore(&ips->turbo_status_lock, flags); return ret; } @@ -663,6 +677,27 @@ static bool mch_exceeded(struct ips_driver *ips) } /** + * verify_limits - verify BIOS provided limits + * @ips: IPS structure + * + * BIOS can optionally provide non-default limits for power and temp. Check + * them here and use the defaults if the BIOS values are not provided or + * are otherwise unusable. + */ +static void verify_limits(struct ips_driver *ips) +{ + if (ips->mcp_power_limit < ips->limits->mcp_power_limit || + ips->mcp_power_limit > 35000) + ips->mcp_power_limit = ips->limits->mcp_power_limit; + + if (ips->mcp_temp_limit < ips->limits->core_temp_limit || + ips->mcp_temp_limit < ips->limits->mch_temp_limit || + ips->mcp_temp_limit > 150) + ips->mcp_temp_limit = min(ips->limits->core_temp_limit, + ips->limits->mch_temp_limit); +} + +/** * update_turbo_limits - get various limits & settings from regs * @ips: IPS driver struct * @@ -680,12 +715,21 @@ static void update_turbo_limits(struct ips_driver *ips) u32 hts = thm_readl(THM_HTS); ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS); - ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS); + /* + * Disable turbo for now, until we can figure out why the power figures + * are wrong + */ + ips->cpu_turbo_enabled = false; + + if (ips->gpu_busy) + ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS); + ips->core_power_limit = thm_readw(THM_MPCPC); ips->mch_power_limit = thm_readw(THM_MMGPC); ips->mcp_temp_limit = thm_readw(THM_PTL); ips->mcp_power_limit = thm_readw(THM_MPPC); + verify_limits(ips); /* Ignore BIOS CPU vs GPU pref */ } @@ -858,7 +902,7 @@ static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period) ret = (ret * 1000) / 65535; *last = val; - return ret; + return 0; } static const u16 temp_decay_factor = 2; @@ -940,7 +984,6 @@ static int ips_monitor(void *data) kfree(mch_samples); kfree(cpu_samples); kfree(mchp_samples); - kthread_stop(ips->adjust); return -ENOMEM; } @@ -948,7 +991,7 @@ static int ips_monitor(void *data) ITV_ME_SEQNO_SHIFT; seqno_timestamp = get_jiffies_64(); - old_cpu_power = thm_readl(THM_CEC) / 65535; + old_cpu_power = thm_readl(THM_CEC); schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); /* Collect an initial average */ @@ -1150,11 +1193,18 @@ static irqreturn_t ips_irq_handler(int irq, void *arg) STS_GPL_SHIFT; /* ignore EC CPU vs GPU pref */ ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS); - ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS); + /* + * Disable turbo for now, until we can figure + * out why the power figures are wrong + */ + ips->cpu_turbo_enabled = false; + if (ips->gpu_busy) + ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS); ips->mcp_temp_limit = (sts & STS_PTL_MASK) >> STS_PTL_SHIFT; ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >> STS_PPL_SHIFT; + verify_limits(ips); spin_unlock(&ips->turbo_status_lock); thm_writeb(THM_SEC, SEC_ACK); @@ -1333,8 +1383,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) * turbo manually or we'll get an illegal MSR access, even though * turbo will still be available. */ - if (!(misc_en & IA32_MISC_TURBO_EN)) - ; /* add turbo MSR write allowed flag if necessary */ + if (misc_en & IA32_MISC_TURBO_EN) + ips->turbo_toggle_allowed = true; + else + ips->turbo_toggle_allowed = false; if (strstr(boot_cpu_data.x86_model_id, "CPU M")) limits = &ips_sv_limits; @@ -1351,9 +1403,10 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) tdp = turbo_power & TURBO_TDP_MASK; /* Sanity check TDP against CPU */ - if (limits->mcp_power_limit != (tdp / 8) * 1000) { - dev_warn(&ips->dev->dev, "Warning: CPU TDP doesn't match expected value (found %d, expected %d)\n", - tdp / 8, limits->mcp_power_limit / 1000); + if (limits->core_power_limit != (tdp / 8) * 1000) { + dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n", + tdp / 8, limits->core_power_limit / 1000); + limits->core_power_limit = (tdp / 8) * 1000; } out: @@ -1390,7 +1443,7 @@ static bool ips_get_i915_syms(struct ips_driver *ips) return true; out_put_busy: - symbol_put(i915_gpu_turbo_disable); + symbol_put(i915_gpu_busy); out_put_lower: symbol_put(i915_gpu_lower); out_put_raise: @@ -1532,22 +1585,27 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) /* Save turbo limits & ratios */ rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); - ips_enable_cpu_turbo(ips); - ips->cpu_turbo_enabled = true; + ips_disable_cpu_turbo(ips); + ips->cpu_turbo_enabled = false; - /* Set up the work queue and monitor/adjust threads */ - ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor"); - if (IS_ERR(ips->monitor)) { + /* Create thermal adjust thread */ + ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust"); + if (IS_ERR(ips->adjust)) { dev_err(&dev->dev, - "failed to create thermal monitor thread, aborting\n"); + "failed to create thermal adjust thread, aborting\n"); ret = -ENOMEM; goto error_free_irq; + } - ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust"); - if (IS_ERR(ips->adjust)) { + /* + * Set up the work queue and monitor thread. The monitor thread + * will wake up ips_adjust thread. + */ + ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor"); + if (IS_ERR(ips->monitor)) { dev_err(&dev->dev, - "failed to create thermal adjust thread, aborting\n"); + "failed to create thermal monitor thread, aborting\n"); ret = -ENOMEM; goto error_thread_cleanup; } @@ -1566,7 +1624,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) return ret; error_thread_cleanup: - kthread_stop(ips->monitor); + kthread_stop(ips->adjust); error_free_irq: free_irq(ips->dev->irq, ips); error_unmap: diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index e3154ff7a39..f200677851b 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -2360,6 +2360,7 @@ static const struct file_operations sonypi_misc_fops = { .release = sonypi_misc_release, .fasync = sonypi_misc_fasync, .unlocked_ioctl = sonypi_misc_ioctl, + .llseek = noop_llseek, }; static struct miscdevice sonypi_misc_device = { diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index e35ed128bde..2d61186ad5a 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -3093,7 +3093,8 @@ static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = { TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */ }; -typedef u16 tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; +typedef u16 tpacpi_keymap_entry_t; +typedef tpacpi_keymap_entry_t tpacpi_keymap_t[TPACPI_HOTKEY_MAP_LEN]; static int __init hotkey_init(struct ibm_init_struct *iibm) { @@ -3230,7 +3231,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm) }; #define TPACPI_HOTKEY_MAP_SIZE sizeof(tpacpi_keymap_t) -#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_t[0]) +#define TPACPI_HOTKEY_MAP_TYPESIZE sizeof(tpacpi_keymap_entry_t) int res, i; int status; diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c index 3f94edab25f..e73ebefdf3e 100644 --- a/drivers/pnp/isapnp/proc.c +++ b/drivers/pnp/isapnp/proc.c @@ -31,8 +31,9 @@ static struct proc_dir_entry *isapnp_proc_bus_dir = NULL; static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence) { loff_t new = -1; + struct inode *inode = file->f_path.dentry->d_inode; - lock_kernel(); + mutex_lock(&inode->i_mutex); switch (whence) { case 0: new = off; @@ -44,12 +45,12 @@ static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence) new = 256 + off; break; } - if (new < 0 || new > 256) { - unlock_kernel(); - return -EINVAL; - } - unlock_kernel(); - return (file->f_pos = new); + if (new < 0 || new > 256) + new = -EINVAL; + else + file->f_pos = new; + mutex_unlock(&inode->i_mutex); + return new; } static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf, diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c index df1fb53c09d..a4be41614ee 100644 --- a/drivers/regulator/ad5398.c +++ b/drivers/regulator/ad5398.c @@ -256,7 +256,6 @@ static int __devexit ad5398_remove(struct i2c_client *client) regulator_unregister(chip->rdev); kfree(chip); - i2c_set_clientdata(client, NULL); return 0; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 422a709d271..cc8b337b911 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -700,7 +700,7 @@ static void print_constraints(struct regulator_dev *rdev) constraints->min_uA != constraints->max_uA) { ret = _regulator_get_current_limit(rdev); if (ret > 0) - count += sprintf(buf + count, "at %d uA ", ret / 1000); + count += sprintf(buf + count, "at %d mA ", ret / 1000); } if (constraints->valid_modes_mask & REGULATOR_MODE_FAST) @@ -2302,8 +2302,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, dev_set_name(&rdev->dev, "regulator.%d", atomic_inc_return(®ulator_no) - 1); ret = device_register(&rdev->dev); - if (ret != 0) + if (ret != 0) { + put_device(&rdev->dev); goto clean; + } dev_set_drvdata(&rdev->dev, rdev); diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c index d61ecb885a8..b8cc6389a54 100644 --- a/drivers/regulator/isl6271a-regulator.c +++ b/drivers/regulator/isl6271a-regulator.c @@ -191,8 +191,6 @@ static int __devexit isl6271a_remove(struct i2c_client *i2c) struct isl_pmic *pmic = i2c_get_clientdata(i2c); int i; - i2c_set_clientdata(i2c, NULL); - for (i = 0; i < 3; i++) regulator_unregister(pmic->rdev[i]); diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c index 4520ace3f7e..6b60a9c0366 100644 --- a/drivers/regulator/max8649.c +++ b/drivers/regulator/max8649.c @@ -330,7 +330,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client, /* set external clock frequency */ info->extclk_freq = pdata->extclk_freq; max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, - info->extclk_freq); + info->extclk_freq << 6); } if (pdata->ramp_timing) { diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c index d26780ea254..261a07e0fb2 100644 --- a/drivers/rtc/rtc-ab3100.c +++ b/drivers/rtc/rtc-ab3100.c @@ -235,6 +235,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev) err = PTR_ERR(rtc); return err; } + platform_set_drvdata(pdev, rtc); return 0; } @@ -244,6 +245,7 @@ static int __exit ab3100_rtc_remove(struct platform_device *pdev) struct rtc_device *rtc = platform_get_drvdata(pdev); rtc_device_unregister(rtc); + platform_set_drvdata(pdev, NULL); return 0; } diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c index 9daed8db83d..9de8516e353 100644 --- a/drivers/rtc/rtc-ds3232.c +++ b/drivers/rtc/rtc-ds3232.c @@ -268,7 +268,6 @@ out_irq: free_irq(client->irq, client); out_free: - i2c_set_clientdata(client, NULL); kfree(ds3232); return ret; } @@ -287,7 +286,6 @@ static int __devexit ds3232_remove(struct i2c_client *client) } rtc_device_unregister(ds3232->rtc); - i2c_set_clientdata(client, NULL); kfree(ds3232); return 0; } diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index d60557cae8e..5a8daa35806 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -20,7 +20,7 @@ #include <linux/module.h> #include <linux/rtc.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/string.h> #ifdef CONFIG_RTC_DRV_M41T80_WDT #include <linux/fs.h> @@ -68,6 +68,7 @@ #define DRV_VERSION "0.05" +static DEFINE_MUTEX(m41t80_rtc_mutex); static const struct i2c_device_id m41t80_id[] = { { "m41t62", M41T80_FEATURE_SQ | M41T80_FEATURE_SQ_ALT }, { "m41t65", M41T80_FEATURE_HT | M41T80_FEATURE_WD }, @@ -677,9 +678,9 @@ static long wdt_unlocked_ioctl(struct file *file, unsigned int cmd, { int ret; - lock_kernel(); + mutex_lock(&m41t80_rtc_mutex); ret = wdt_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&m41t80_rtc_mutex); return ret; } @@ -693,16 +694,16 @@ static long wdt_unlocked_ioctl(struct file *file, unsigned int cmd, static int wdt_open(struct inode *inode, struct file *file) { if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) { - lock_kernel(); + mutex_lock(&m41t80_rtc_mutex); if (test_and_set_bit(0, &wdt_is_open)) { - unlock_kernel(); + mutex_unlock(&m41t80_rtc_mutex); return -EBUSY; } /* * Activate */ wdt_is_open = 1; - unlock_kernel(); + mutex_unlock(&m41t80_rtc_mutex); return nonseekable_open(inode, file); } return -ENODEV; @@ -748,6 +749,7 @@ static const struct file_operations wdt_fops = { .write = wdt_write, .open = wdt_open, .release = wdt_release, + .llseek = no_llseek, }; static struct miscdevice wdt_dev = { diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index a0d3ec89d41..f57a87f4ae9 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -310,11 +310,6 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) s3c_rtc_setaie(alrm->enabled); - if (alrm->enabled) - enable_irq_wake(s3c_rtc_alarmno); - else - disable_irq_wake(s3c_rtc_alarmno); - return 0; } @@ -587,6 +582,10 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) ticnt_en_save &= S3C64XX_RTCCON_TICEN; } s3c_rtc_enable(pdev, 0); + + if (device_may_wakeup(&pdev->dev)) + enable_irq_wake(s3c_rtc_alarmno); + return 0; } @@ -600,6 +599,10 @@ static int s3c_rtc_resume(struct platform_device *pdev) tmp = readb(s3c_rtc_base + S3C2410_RTCCON); writeb(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); } + + if (device_may_wakeup(&pdev->dev)) + disable_irq_wake(s3c_rtc_alarmno); + return 0; } #else diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 8373ca0de8e..aa95f100176 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -21,7 +21,6 @@ #include <linux/hdreg.h> #include <linux/async.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <asm/ccwdev.h> #include <asm/ebcdic.h> @@ -2197,7 +2196,6 @@ static void dasd_setup_queue(struct dasd_block *block) */ blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); - blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN); } /* @@ -2236,7 +2234,6 @@ static int dasd_open(struct block_device *bdev, fmode_t mode) if (!block) return -ENODEV; - lock_kernel(); base = block->base; atomic_inc(&block->open_count); if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { @@ -2271,14 +2268,12 @@ static int dasd_open(struct block_device *bdev, fmode_t mode) goto out; } - unlock_kernel(); return 0; out: module_put(base->discipline->owner); unlock: atomic_dec(&block->open_count); - unlock_kernel(); return rc; } @@ -2286,10 +2281,8 @@ static int dasd_release(struct gendisk *disk, fmode_t mode) { struct dasd_block *block = disk->private_data; - lock_kernel(); atomic_dec(&block->open_count); module_put(block->base->discipline->owner); - unlock_kernel(); return 0; } diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 7158f9528ec..c71d89dba30 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -670,6 +670,7 @@ static const struct file_operations dasd_eer_fops = { .read = &dasd_eer_read, .poll = &dasd_eer_poll, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice *dasd_eer_dev = NULL; diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 1557214944f..26075e95b1b 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -16,7 +16,6 @@ #include <linux/major.h> #include <linux/fs.h> #include <linux/blkpg.h> -#include <linux/smp_lock.h> #include <linux/slab.h> #include <asm/compat.h> #include <asm/ccwdev.h> @@ -370,9 +369,8 @@ static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd, return ret; } -static int -dasd_do_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) +int dasd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) { struct dasd_block *block = bdev->bd_disk->private_data; void __user *argp; @@ -430,14 +428,3 @@ dasd_do_ioctl(struct block_device *bdev, fmode_t mode, return -EINVAL; } } - -int dasd_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - int rc; - - lock_kernel(); - rc = dasd_do_ioctl(bdev, mode, cmd, arg); - unlock_kernel(); - return rc; -} diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 2bd72aa34c5..9b43ae94beb 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -14,7 +14,6 @@ #include <linux/init.h> #include <linux/slab.h> #include <linux/blkdev.h> -#include <linux/smp_lock.h> #include <linux/completion.h> #include <linux/interrupt.h> #include <linux/platform_device.h> @@ -776,7 +775,6 @@ dcssblk_open(struct block_device *bdev, fmode_t mode) struct dcssblk_dev_info *dev_info; int rc; - lock_kernel(); dev_info = bdev->bd_disk->private_data; if (NULL == dev_info) { rc = -ENODEV; @@ -786,7 +784,6 @@ dcssblk_open(struct block_device *bdev, fmode_t mode) bdev->bd_block_size = 4096; rc = 0; out: - unlock_kernel(); return rc; } @@ -797,7 +794,6 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) struct segment_info *entry; int rc; - lock_kernel(); if (!dev_info) { rc = -ENODEV; goto out; @@ -815,7 +811,6 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) up_write(&dcssblk_devices_sem); rc = 0; out: - unlock_kernel(); return rc; } diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 857dfcb7b35..eb28fb01a38 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -520,6 +520,7 @@ static const struct file_operations fs3270_fops = { .compat_ioctl = fs3270_ioctl, /* ioctl */ .open = fs3270_open, /* open */ .release = fs3270_close, /* release */ + .llseek = no_llseek, }; /* diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index e021ec663ef..5b8b8592d31 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c @@ -447,6 +447,7 @@ static const struct file_operations mon_fops = { .release = &mon_close, .read = &mon_read, .poll = &mon_poll, + .llseek = noop_llseek, }; static struct miscdevice mon_dev = { diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 572a1e7fd09..e0702d3ea33 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c @@ -274,6 +274,7 @@ static const struct file_operations monwrite_fops = { .open = &monwrite_open, .release = &monwrite_close, .write = &monwrite_write, + .llseek = noop_llseek, }; static struct miscdevice mon_dev = { diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index f6d72e1f2a3..5707a80b96b 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c @@ -468,7 +468,7 @@ sclp_sync_wait(void) cr0_sync &= 0xffff00a0; cr0_sync |= 0x00000200; __ctl_load(cr0_sync, 0, 0); - __raw_local_irq_stosm(0x01); + __arch_local_irq_stosm(0x01); /* Loop until driver state indicates finished request */ while (sclp_running_state != sclp_running_state_idle) { /* Check for expired request timer */ diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 85cf607fc78..f0fa9ca5cb2 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -16,7 +16,7 @@ #include <linux/fs.h> #include <linux/module.h> #include <linux/blkdev.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/buffer_head.h> #include <linux/kernel.h> @@ -45,6 +45,7 @@ /* * file operation structure for tape block frontend */ +static DEFINE_MUTEX(tape_block_mutex); static int tapeblock_open(struct block_device *, fmode_t); static int tapeblock_release(struct gendisk *, fmode_t); static int tapeblock_medium_changed(struct gendisk *); @@ -361,7 +362,7 @@ tapeblock_open(struct block_device *bdev, fmode_t mode) struct tape_device * device; int rc; - lock_kernel(); + mutex_lock(&tape_block_mutex); device = tape_get_device(disk->private_data); if (device->required_tapemarks) { @@ -385,14 +386,14 @@ tapeblock_open(struct block_device *bdev, fmode_t mode) * is called. */ tape_state_set(device, TS_BLKUSE); - unlock_kernel(); + mutex_unlock(&tape_block_mutex); return 0; release: tape_release(device); put_device: tape_put_device(device); - unlock_kernel(); + mutex_unlock(&tape_block_mutex); return rc; } @@ -407,11 +408,11 @@ tapeblock_release(struct gendisk *disk, fmode_t mode) { struct tape_device *device = disk->private_data; - lock_kernel(); + mutex_lock(&tape_block_mutex); tape_state_set(device, TS_IN_USE); tape_release(device); tape_put_device(device); - unlock_kernel(); + mutex_unlock(&tape_block_mutex); return 0; } diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 539045acaad..883e2db02bd 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -53,6 +53,7 @@ static const struct file_operations tape_fops = #endif .open = tapechar_open, .release = tapechar_release, + .llseek = no_llseek, }; static int tapechar_major = TAPECHAR_MAJOR; diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 04e532eec03..0e7cb1a8415 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -177,6 +177,7 @@ static const struct file_operations vmcp_fops = { .write = vmcp_write, .unlocked_ioctl = vmcp_ioctl, .compat_ioctl = vmcp_ioctl, + .llseek = no_llseek, }; static struct miscdevice vmcp_dev = { diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index e40a1b89286..0d6dc4b92cc 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -97,6 +97,7 @@ static const struct file_operations vmlogrdr_fops = { .open = vmlogrdr_open, .release = vmlogrdr_release, .read = vmlogrdr_read, + .llseek = no_llseek, }; diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index e13508c98b1..12ef9121d4f 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c @@ -297,6 +297,7 @@ static const struct file_operations vmwdt_fops = { .unlocked_ioctl = &vmwdt_ioctl, .write = &vmwdt_write, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice vmwdt_dev = { diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index f5ea3384a4b..3b94044027c 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c @@ -459,6 +459,7 @@ static const struct file_operations zcore_memmap_fops = { .read = zcore_memmap_read, .open = zcore_memmap_open, .release = zcore_memmap_release, + .llseek = no_llseek, }; static ssize_t zcore_reipl_write(struct file *filp, const char __user *buf, @@ -486,6 +487,7 @@ static const struct file_operations zcore_reipl_fops = { .write = zcore_reipl_write, .open = zcore_reipl_open, .release = zcore_reipl_release, + .llseek = no_llseek, }; #ifdef CONFIG_32BIT diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index a83877c664a..f2b77e7bfc6 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -806,6 +806,7 @@ static const struct file_operations chsc_fops = { .open = nonseekable_open, .unlocked_ioctl = chsc_ioctl, .compat_ioctl = chsc_ioctl, + .llseek = no_llseek, }; static struct miscdevice chsc_misc_device = { diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index ac94ac75145..ca8e1c240c3 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -1067,6 +1067,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf, static const struct file_operations cio_settle_proc_fops = { .open = nonseekable_open, .write = cio_settle_write, + .llseek = no_llseek, }; static int __init cio_settle_init(void) diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 41e0aaefafd..f5221749d18 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -897,7 +897,8 @@ static const struct file_operations zcrypt_fops = { .compat_ioctl = zcrypt_compat_ioctl, #endif .open = zcrypt_open, - .release = zcrypt_release + .release = zcrypt_release, + .llseek = no_llseek, }; /* diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 6edf20b62de..2c7d2d9be4d 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1154,7 +1154,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) dev_fsm, dev_fsm_len, GFP_KERNEL); if (priv->fsm == NULL) { CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); - kfree(dev); + free_netdev(dev); return NULL; } fsm_newstate(priv->fsm, DEV_STATE_STOPPED); @@ -1165,7 +1165,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) grp = ctcmpc_init_mpc_group(priv); if (grp == NULL) { MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); - kfree(dev); + free_netdev(dev); return NULL; } tasklet_init(&grp->mpc_tasklet2, diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index fcbd2b756da..1838cda68ba 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c @@ -251,8 +251,9 @@ static const struct file_operations zfcp_cfdc_fops = { .open = nonseekable_open, .unlocked_ioctl = zfcp_cfdc_dev_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = zfcp_cfdc_dev_ioctl + .compat_ioctl = zfcp_cfdc_dev_ioctl, #endif + .llseek = no_llseek, }; struct miscdevice zfcp_cfdc_misc = { diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index cb000c9833b..208256e39de 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -681,6 +681,7 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter) adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { mask |= SHOST_DIX_TYPE1_PROTECTION; scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); + shost->sg_prot_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2; } diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 1690e53fb84..55f71ea9c41 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c @@ -162,6 +162,7 @@ static const struct file_operations d7s_fops = { .compat_ioctl = d7s_ioctl, .open = d7s_open, .release = d7s_release, + .llseek = noop_llseek, }; static struct miscdevice d7s_miscdev = { diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index 078e5f4520e..8ce414e3948 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c @@ -720,6 +720,7 @@ static const struct file_operations envctrl_fops = { #endif .open = envctrl_open, .release = envctrl_release, + .llseek = noop_llseek, }; static struct miscdevice envctrl_dev = { diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index 4942050dc5b..13f48e28a1e 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c @@ -27,7 +27,7 @@ */ #include <linux/module.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/miscdevice.h> @@ -68,6 +68,8 @@ #define JSF_PART_BITS 2 /* 2 bits of minors to cover JSF_NPART */ #define JSF_PART_MASK 0x3 /* 2 bits mask */ +static DEFINE_MUTEX(jsf_mutex); + /* * Access functions. * We could ioremap(), but it's easier this way. @@ -225,7 +227,7 @@ static loff_t jsf_lseek(struct file * file, loff_t offset, int orig) { loff_t ret; - lock_kernel(); + mutex_lock(&jsf_mutex); switch (orig) { case 0: file->f_pos = offset; @@ -238,7 +240,7 @@ static loff_t jsf_lseek(struct file * file, loff_t offset, int orig) default: ret = -EINVAL; } - unlock_kernel(); + mutex_unlock(&jsf_mutex); return ret; } @@ -384,18 +386,18 @@ static int jsf_ioctl_program(void __user *arg) static long jsf_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { - lock_kernel(); + mutex_lock(&jsf_mutex); int error = -ENOTTY; void __user *argp = (void __user *)arg; if (!capable(CAP_SYS_ADMIN)) { - unlock_kernel(); + mutex_unlock(&jsf_mutex); return -EPERM; } switch (cmd) { case JSFLASH_IDENT: if (copy_to_user(argp, &jsf0.id, JSFIDSZ)) { - unlock_kernel(); + mutex_unlock(&jsf_mutex); return -EFAULT; } break; @@ -407,7 +409,7 @@ static long jsf_ioctl(struct file *f, unsigned int cmd, unsigned long arg) break; } - unlock_kernel(); + mutex_unlock(&jsf_mutex); return error; } @@ -418,17 +420,17 @@ static int jsf_mmap(struct file * file, struct vm_area_struct * vma) static int jsf_open(struct inode * inode, struct file * filp) { - lock_kernel(); + mutex_lock(&jsf_mutex); if (jsf0.base == 0) { - unlock_kernel(); + mutex_unlock(&jsf_mutex); return -ENXIO; } if (test_and_set_bit(0, (void *)&jsf0.busy) != 0) { - unlock_kernel(); + mutex_unlock(&jsf_mutex); return -EBUSY; } - unlock_kernel(); + mutex_unlock(&jsf_mutex); return 0; /* XXX What security? */ } diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index e20b7bdd4c7..fcf08b3f52c 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -92,7 +92,6 @@ #include <linux/pci.h> #include <linux/time.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> @@ -105,6 +104,7 @@ /* Globals */ #define TW_DRIVER_VERSION "2.26.02.014" +static DEFINE_MUTEX(twa_chrdev_mutex); static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT]; static unsigned int twa_device_extension_count; static int twa_major = -1; @@ -222,7 +222,8 @@ static const struct file_operations twa_fops = { .owner = THIS_MODULE, .unlocked_ioctl = twa_chrdev_ioctl, .open = twa_chrdev_open, - .release = NULL + .release = NULL, + .llseek = noop_llseek, }; /* This function will complete an aen request from the isr */ @@ -658,7 +659,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long int retval = TW_IOCTL_ERROR_OS_EFAULT; void __user *argp = (void __user *)arg; - lock_kernel(); + mutex_lock(&twa_chrdev_mutex); /* Only let one of these through at a time */ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { @@ -879,7 +880,7 @@ out3: out2: mutex_unlock(&tw_dev->ioctl_lock); out: - unlock_kernel(); + mutex_unlock(&twa_chrdev_mutex); return retval; } /* End twa_chrdev_ioctl() */ @@ -890,7 +891,6 @@ static int twa_chrdev_open(struct inode *inode, struct file *file) unsigned int minor_number; int retval = TW_IOCTL_ERROR_OS_ENODEV; - cycle_kernel_lock(); minor_number = iminor(inode); if (minor_number >= twa_device_extension_count) goto out; diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index f481e734aad..6a95d111d20 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -64,7 +64,6 @@ #include <linux/pci.h> #include <linux/time.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/irq.h> @@ -77,6 +76,7 @@ /* Globals */ #define TW_DRIVER_VERSION "3.26.02.000" +static DEFINE_MUTEX(twl_chrdev_mutex); static TW_Device_Extension *twl_device_extension_list[TW_MAX_SLOT]; static unsigned int twl_device_extension_count; static int twl_major = -1; @@ -764,7 +764,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long int retval = -EFAULT; void __user *argp = (void __user *)arg; - lock_kernel(); + mutex_lock(&twl_chrdev_mutex); /* Only let one of these through at a time */ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { @@ -861,7 +861,7 @@ out3: out2: mutex_unlock(&tw_dev->ioctl_lock); out: - unlock_kernel(); + mutex_unlock(&twl_chrdev_mutex); return retval; } /* End twl_chrdev_ioctl() */ @@ -876,7 +876,6 @@ static int twl_chrdev_open(struct inode *inode, struct file *file) goto out; } - cycle_kernel_lock(); minor_number = iminor(inode); if (minor_number >= twl_device_extension_count) goto out; @@ -890,7 +889,8 @@ static const struct file_operations twl_fops = { .owner = THIS_MODULE, .unlocked_ioctl = twl_chrdev_ioctl, .open = twl_chrdev_open, - .release = NULL + .release = NULL, + .llseek = noop_llseek, }; /* This function passes sense data from firmware to scsi layer */ diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 30d735ad35b..b1125341f4c 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -199,7 +199,6 @@ #include <linux/module.h> #include <linux/reboot.h> -#include <linux/smp_lock.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> @@ -221,6 +220,7 @@ /* Globals */ #define TW_DRIVER_VERSION "1.26.02.003" +static DEFINE_MUTEX(tw_mutex); static TW_Device_Extension *tw_device_extension_list[TW_MAX_SLOT]; static int tw_device_extension_count = 0; static int twe_major = -1; @@ -900,10 +900,10 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a dprintk(KERN_WARNING "3w-xxxx: tw_chrdev_ioctl()\n"); - lock_kernel(); + mutex_lock(&tw_mutex); /* Only let one of these through at a time */ if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) { - unlock_kernel(); + mutex_unlock(&tw_mutex); return -EINTR; } @@ -1034,7 +1034,7 @@ out2: dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, cpu_addr, dma_handle); out: mutex_unlock(&tw_dev->ioctl_lock); - unlock_kernel(); + mutex_unlock(&tw_mutex); return retval; } /* End tw_chrdev_ioctl() */ @@ -1044,7 +1044,6 @@ static int tw_chrdev_open(struct inode *inode, struct file *file) { unsigned int minor_number; - cycle_kernel_lock(); dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n"); minor_number = iminor(inode); @@ -1059,7 +1058,8 @@ static const struct file_operations tw_fops = { .owner = THIS_MODULE, .unlocked_ioctl = tw_chrdev_ioctl, .open = tw_chrdev_open, - .release = NULL + .release = NULL, + .llseek = noop_llseek, }; /* This function will free up device extension resources */ diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index cad6f9abaeb..dae46d779c7 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -38,7 +38,7 @@ #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/syscalls.h> #include <linux/delay.h> @@ -76,6 +76,7 @@ MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, " MODULE_LICENSE("GPL"); MODULE_VERSION(AAC_DRIVER_FULL_VERSION); +static DEFINE_MUTEX(aac_mutex); static LIST_HEAD(aac_devices); static int aac_cfg_major = -1; char aac_driver_version[] = AAC_DRIVER_FULL_VERSION; @@ -678,7 +679,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file) unsigned minor_number = iminor(inode); int err = -ENODEV; - lock_kernel(); /* BKL pushdown: nothing else protects this list */ + mutex_lock(&aac_mutex); /* BKL pushdown: nothing else protects this list */ list_for_each_entry(aac, &aac_devices, entry) { if (aac->id == minor_number) { file->private_data = aac; @@ -686,7 +687,7 @@ static int aac_cfg_open(struct inode *inode, struct file *file) break; } } - unlock_kernel(); + mutex_unlock(&aac_mutex); return err; } @@ -711,9 +712,9 @@ static long aac_cfg_ioctl(struct file *file, int ret; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - lock_kernel(); + mutex_lock(&aac_mutex); ret = aac_do_ioctl(file->private_data, cmd, (void __user *)arg); - unlock_kernel(); + mutex_unlock(&aac_mutex); return ret; } @@ -722,7 +723,7 @@ static long aac_cfg_ioctl(struct file *file, static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg) { long ret; - lock_kernel(); + mutex_lock(&aac_mutex); switch (cmd) { case FSACTL_MINIPORT_REV_CHECK: case FSACTL_SENDFIB: @@ -756,7 +757,7 @@ static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long ret = -ENOIOCTLCMD; break; } - unlock_kernel(); + mutex_unlock(&aac_mutex); return ret; } @@ -1039,6 +1040,7 @@ static const struct file_operations aac_cfg_fops = { .compat_ioctl = aac_compat_cfg_ioctl, #endif .open = aac_cfg_open, + .llseek = noop_llseek, }; static struct scsi_host_template aac_driver_template = { diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c index 93984c9dfe1..aee73fafccc 100644 --- a/drivers/scsi/aic7xxx_old.c +++ b/drivers/scsi/aic7xxx_old.c @@ -2850,12 +2850,6 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb) aic_dev->r_total++; ptr = aic_dev->r_bins; } - if(cmd->device->simple_tags && cmd->request->cmd_flags & REQ_HARDBARRIER) - { - aic_dev->barrier_total++; - if(scb->tag_action == MSG_ORDERED_Q_TAG) - aic_dev->ordered_total++; - } x = scb->sg_length; x >>= 10; for(i=0; i<6; i++) @@ -10125,7 +10119,6 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd, struct aic_dev_data *aic_dev = cmd->device->hostdata; struct scsi_device *sdptr = cmd->device; unsigned char tindex = TARGET_INDEX(cmd); - struct request *req = cmd->request; int use_sg; mask = (0x01 << tindex); @@ -10144,19 +10137,8 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd, /* We always force TEST_UNIT_READY to untagged */ if (cmd->cmnd[0] != TEST_UNIT_READY && sdptr->simple_tags) { - if (req->cmd_flags & REQ_HARDBARRIER) - { - if(sdptr->ordered_tags) - { - hscb->control |= MSG_ORDERED_Q_TAG; - scb->tag_action = MSG_ORDERED_Q_TAG; - } - } - else - { - hscb->control |= MSG_SIMPLE_Q_TAG; - scb->tag_action = MSG_SIMPLE_Q_TAG; - } + hscb->control |= MSG_SIMPLE_Q_TAG; + scb->tag_action = MSG_SIMPLE_Q_TAG; } } if ( !(aic_dev->dtr_pending) && diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index d6532187f61..a15474eef5f 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -22,7 +22,6 @@ #include <linux/chio.h> /* here are all the ioctls */ #include <linux/mutex.h> #include <linux/idr.h> -#include <linux/smp_lock.h> #include <linux/slab.h> #include <scsi/scsi.h> @@ -44,6 +43,7 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(SCSI_CHANGER_MAJOR); MODULE_ALIAS_SCSI_DEVICE(TYPE_MEDIUM_CHANGER); +static DEFINE_MUTEX(ch_mutex); static int init = 1; module_param(init, int, 0444); MODULE_PARM_DESC(init, \ @@ -581,19 +581,19 @@ ch_open(struct inode *inode, struct file *file) scsi_changer *ch; int minor = iminor(inode); - lock_kernel(); + mutex_lock(&ch_mutex); spin_lock(&ch_index_lock); ch = idr_find(&ch_index_idr, minor); if (NULL == ch || scsi_device_get(ch->device)) { spin_unlock(&ch_index_lock); - unlock_kernel(); + mutex_unlock(&ch_mutex); return -ENXIO; } spin_unlock(&ch_index_lock); file->private_data = ch; - unlock_kernel(); + mutex_unlock(&ch_mutex); return 0; } @@ -981,6 +981,7 @@ static const struct file_operations changer_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = ch_ioctl_compat, #endif + .llseek = noop_llseek, }; static int __init init_ch_module(void) diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index ffc1edf5e80..23dec006338 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -49,7 +49,6 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver"); #include <linux/kernel.h> /* for printk */ #include <linux/sched.h> #include <linux/reboot.h> -#include <linux/smp_lock.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> @@ -76,6 +75,7 @@ MODULE_DESCRIPTION("Adaptec I2O RAID Driver"); * Needed for our management apps *============================================================================ */ +static DEFINE_MUTEX(adpt_mutex); static dpt_sig_S DPTI_sig = { {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, #ifdef __i386__ @@ -126,6 +126,7 @@ static const struct file_operations adpt_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = compat_adpt_ioctl, #endif + .llseek = noop_llseek, }; /* Structures and definitions for synchronous message posting. @@ -1732,12 +1733,12 @@ static int adpt_open(struct inode *inode, struct file *file) int minor; adpt_hba* pHba; - lock_kernel(); + mutex_lock(&adpt_mutex); //TODO check for root access // minor = iminor(inode); if (minor >= hba_count) { - unlock_kernel(); + mutex_unlock(&adpt_mutex); return -ENXIO; } mutex_lock(&adpt_configuration_lock); @@ -1748,7 +1749,7 @@ static int adpt_open(struct inode *inode, struct file *file) } if (pHba == NULL) { mutex_unlock(&adpt_configuration_lock); - unlock_kernel(); + mutex_unlock(&adpt_mutex); return -ENXIO; } @@ -1759,7 +1760,7 @@ static int adpt_open(struct inode *inode, struct file *file) pHba->in_use = 1; mutex_unlock(&adpt_configuration_lock); - unlock_kernel(); + mutex_unlock(&adpt_mutex); return 0; } @@ -2160,9 +2161,9 @@ static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg) inode = file->f_dentry->d_inode; - lock_kernel(); + mutex_lock(&adpt_mutex); ret = adpt_ioctl(inode, file, cmd, arg); - unlock_kernel(); + mutex_unlock(&adpt_mutex); return ret; } @@ -2176,7 +2177,7 @@ static long compat_adpt_ioctl(struct file *file, inode = file->f_dentry->d_inode; - lock_kernel(); + mutex_lock(&adpt_mutex); switch(cmd) { case DPT_SIGNATURE: @@ -2194,7 +2195,7 @@ static long compat_adpt_ioctl(struct file *file, ret = -ENOIOCTLCMD; } - unlock_kernel(); + mutex_unlock(&adpt_mutex); return ret; } diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index b860d650a56..5a3f9310101 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -120,7 +120,7 @@ #include <linux/timer.h> #include <linux/dma-mapping.h> #include <linux/list.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/slab.h> #ifdef GDTH_RTC @@ -140,6 +140,7 @@ #include <scsi/scsi_host.h> #include "gdth.h" +static DEFINE_MUTEX(gdth_mutex); static void gdth_delay(int milliseconds); static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs); static irqreturn_t gdth_interrupt(int irq, void *dev_id); @@ -372,6 +373,7 @@ static const struct file_operations gdth_fops = { .unlocked_ioctl = gdth_unlocked_ioctl, .open = gdth_open, .release = gdth_close, + .llseek = noop_llseek, }; #include "gdth_proc.h" @@ -4042,12 +4044,12 @@ static int gdth_open(struct inode *inode, struct file *filep) { gdth_ha_str *ha; - lock_kernel(); + mutex_lock(&gdth_mutex); list_for_each_entry(ha, &gdth_instances, list) { if (!ha->sdev) ha->sdev = scsi_get_host_dev(ha->shost); } - unlock_kernel(); + mutex_unlock(&gdth_mutex); TRACE(("gdth_open()\n")); return 0; @@ -4615,9 +4617,9 @@ static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd, { int ret; - lock_kernel(); + mutex_lock(&gdth_mutex); ret = gdth_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&gdth_mutex); return ret; } diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 8a8f803439e..10478153641 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -376,6 +376,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost->this_id = sht->this_id; shost->can_queue = sht->can_queue; shost->sg_tablesize = sht->sg_tablesize; + shost->sg_prot_tablesize = sht->sg_prot_tablesize; shost->cmd_per_lun = sht->cmd_per_lun; shost->unchecked_isa_dma = sht->unchecked_isa_dma; shost->use_clustering = sht->use_clustering; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index f0cfba9a1fc..535085cd27e 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -130,17 +130,6 @@ static void sas_scsi_task_done(struct sas_task *task) sc->scsi_done(sc); } -static enum task_attribute sas_scsi_get_task_attr(struct scsi_cmnd *cmd) -{ - enum task_attribute ta = TASK_ATTR_SIMPLE; - if (cmd->request && blk_rq_tagged(cmd->request)) { - if (cmd->device->ordered_tags && - (cmd->request->cmd_flags & REQ_HARDBARRIER)) - ta = TASK_ATTR_ORDERED; - } - return ta; -} - static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, struct domain_device *dev, gfp_t gfp_flags) @@ -160,7 +149,7 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, task->ssp_task.retry_count = 1; int_to_scsilun(cmd->device->lun, &lun); memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); - task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd); + task->ssp_task.task_attr = TASK_ATTR_SIMPLE; memcpy(task->ssp_task.cdb, cmd->cmnd, 16); task->scatter = scsi_sglist(cmd); diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 0b6e3228610..7ceb5cf12c6 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -46,7 +46,7 @@ #include <linux/pci.h> #include <linux/init.h> #include <linux/dma-mapping.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/slab.h> #include <scsi/scsicam.h> @@ -62,6 +62,7 @@ MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver"); MODULE_LICENSE ("GPL"); MODULE_VERSION(MEGARAID_MODULE_VERSION); +static DEFINE_MUTEX(megadev_mutex); static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN; module_param(max_cmd_per_lun, uint, 0); MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)"); @@ -101,6 +102,7 @@ static const struct file_operations megadev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = megadev_unlocked_ioctl, .open = megadev_open, + .llseek = noop_llseek, }; /* @@ -3282,7 +3284,6 @@ mega_init_scb(adapter_t *adapter) static int megadev_open (struct inode *inode, struct file *filep) { - cycle_kernel_lock(); /* * Only allow superuser to access private ioctl interface */ @@ -3701,9 +3702,9 @@ megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { int ret; - lock_kernel(); + mutex_lock(&megadev_mutex); ret = megadev_ioctl(filep, cmd, arg); - unlock_kernel(); + mutex_unlock(&megadev_mutex); return ret; } diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 41f82f76d88..a7008c0c24f 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -16,11 +16,12 @@ */ #include <linux/sched.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include "megaraid_mm.h" // Entry points for char node driver +static DEFINE_MUTEX(mraid_mm_mutex); static int mraid_mm_open(struct inode *, struct file *); static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long); @@ -75,6 +76,7 @@ static const struct file_operations lsi_fops = { .compat_ioctl = mraid_mm_compat_ioctl, #endif .owner = THIS_MODULE, + .llseek = noop_llseek, }; static struct miscdevice megaraid_mm_dev = { @@ -98,7 +100,6 @@ mraid_mm_open(struct inode *inode, struct file *filep) */ if (!capable(CAP_SYS_ADMIN)) return (-EACCES); - cycle_kernel_lock(); return 0; } @@ -224,9 +225,9 @@ mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd, int err; /* inconsistant: mraid_mm_compat_ioctl doesn't take the BKL */ - lock_kernel(); + mutex_lock(&mraid_mm_mutex); err = mraid_mm_ioctl(filep, cmd, arg); - unlock_kernel(); + mutex_unlock(&mraid_mm_mutex); return err; } diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index 99e4478c3f3..51e2579a743 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -33,7 +33,6 @@ #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/delay.h> -#include <linux/smp_lock.h> #include <linux/uio.h> #include <linux/slab.h> #include <asm/uaccess.h> @@ -3557,7 +3556,6 @@ static void megasas_shutdown(struct pci_dev *pdev) */ static int megasas_mgmt_open(struct inode *inode, struct file *filep) { - cycle_kernel_lock(); /* * Allow only those users with admin rights */ @@ -3957,6 +3955,7 @@ static const struct file_operations megasas_mgmt_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = megasas_mgmt_compat_ioctl, #endif + .llseek = noop_llseek, }; /* diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index b774973f076..40cb8aeb21b 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -51,7 +51,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/delay.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/compat.h> #include <linux/poll.h> @@ -61,6 +61,7 @@ #include "mpt2sas_base.h" #include "mpt2sas_ctl.h" +static DEFINE_MUTEX(_ctl_mutex); static struct fasync_struct *async_queue; static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); @@ -2238,9 +2239,9 @@ _ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { long ret; - lock_kernel(); + mutex_lock(&_ctl_mutex); ret = _ctl_ioctl_main(file, cmd, (void __user *)arg); - unlock_kernel(); + mutex_unlock(&_ctl_mutex); return ret; } @@ -2309,12 +2310,12 @@ _ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) { long ret; - lock_kernel(); + mutex_lock(&_ctl_mutex); if (cmd == MPT2COMMAND32) ret = _ctl_compat_mpt_command(file, cmd, arg); else ret = _ctl_ioctl_main(file, cmd, (void __user *)arg); - unlock_kernel(); + mutex_unlock(&_ctl_mutex); return ret; } #endif @@ -2952,6 +2953,7 @@ static const struct file_operations ctl_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = _ctl_ioctl_compat, #endif + .llseek = noop_llseek, }; static struct miscdevice ctl_dev = { diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index ffdd9fdb999..b31a8e3841d 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -182,6 +182,7 @@ static const struct file_operations osd_fops = { .open = osd_uld_open, .release = osd_uld_release, .unlocked_ioctl = osd_uld_ioctl, + .llseek = noop_llseek, }; struct osd_dev *osduld_path_lookup(const char *name) diff --git a/drivers/scsi/osst.c b/drivers/scsi/osst.c index 278b352ae78..54de1d1af1a 100644 --- a/drivers/scsi/osst.c +++ b/drivers/scsi/osst.c @@ -51,7 +51,7 @@ static const char * osst_version = "0.99.4"; #include <linux/moduleparam.h> #include <linux/delay.h> #include <linux/jiffies.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/dma.h> #include <asm/system.h> @@ -80,6 +80,7 @@ static const char * osst_version = "0.99.4"; #include "osst_options.h" #include "osst_detect.h" +static DEFINE_MUTEX(osst_int_mutex); static int max_dev = 0; static int write_threshold_kbs = 0; static int max_sg_segs = 0; @@ -4807,9 +4808,9 @@ static int os_scsi_tape_open(struct inode * inode, struct file * filp) { int ret; - lock_kernel(); + mutex_lock(&osst_int_mutex); ret = __os_scsi_tape_open(inode, filp); - unlock_kernel(); + mutex_unlock(&osst_int_mutex); return ret; } @@ -4943,9 +4944,9 @@ static long osst_ioctl(struct file * file, char * name = tape_name(STp); void __user * p = (void __user *)arg; - lock_kernel(); + mutex_lock(&osst_int_mutex); if (mutex_lock_interruptible(&STp->lock)) { - unlock_kernel(); + mutex_unlock(&osst_int_mutex); return -ERESTARTSYS; } @@ -5260,14 +5261,14 @@ static long osst_ioctl(struct file * file, mutex_unlock(&STp->lock); retval = scsi_ioctl(STp->device, cmd_in, p); - unlock_kernel(); + mutex_unlock(&osst_int_mutex); return retval; out: if (SRpnt) osst_release_request(SRpnt); mutex_unlock(&STp->lock); - unlock_kernel(); + mutex_unlock(&osst_int_mutex); return retval; } diff --git a/drivers/scsi/pcmcia/aha152x_stub.c b/drivers/scsi/pcmcia/aha152x_stub.c index 61f49bdcc0c..e77dd02eccd 100644 --- a/drivers/scsi/pcmcia/aha152x_stub.c +++ b/drivers/scsi/pcmcia/aha152x_stub.c @@ -49,7 +49,6 @@ #include <scsi/scsi_host.h> #include "aha152x.h" -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -86,8 +85,6 @@ static void aha152x_release_cs(struct pcmcia_device *link); static void aha152x_detach(struct pcmcia_device *p_dev); static int aha152x_config_cs(struct pcmcia_device *link); -static struct pcmcia_device *dev_list; - static int aha152x_probe(struct pcmcia_device *link) { scsi_info_t *info; @@ -100,11 +97,8 @@ static int aha152x_probe(struct pcmcia_device *link) info->p_dev = link; link->priv = info; - link->resource[0]->end = 0x20; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - link->conf.Present = PRESENT_OPTION; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + link->config_regs = PRESENT_OPTION; return aha152x_config_cs(link); } /* aha152x_attach */ @@ -123,25 +117,24 @@ static void aha152x_detach(struct pcmcia_device *link) /*====================================================================*/ -static int aha152x_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int aha152x_config_check(struct pcmcia_device *p_dev, void *priv_data) { p_dev->io_lines = 10; + /* For New Media T&J, look for a SCSI window */ - if (cfg->io.win[0].len >= 0x20) - p_dev->resource[0]->start = cfg->io.win[0].base; - else if ((cfg->io.nwin > 1) && - (cfg->io.win[1].len >= 0x20)) - p_dev->resource[0]->start = cfg->io.win[1].base; - if ((cfg->io.nwin > 0) && - (p_dev->resource[0]->start < 0xffff)) { - if (!pcmcia_request_io(p_dev)) - return 0; - } - return -EINVAL; + if ((p_dev->resource[0]->end < 0x20) && + (p_dev->resource[1]->end >= 0x20)) + p_dev->resource[0]->start = p_dev->resource[1]->start; + + if (p_dev->resource[0]->start >= 0xffff) + return -EINVAL; + + p_dev->resource[1]->start = p_dev->resource[1]->end = 0; + p_dev->resource[0]->end = 0x20; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; + + return pcmcia_request_io(p_dev); } static int aha152x_config_cs(struct pcmcia_device *link) @@ -160,7 +153,7 @@ static int aha152x_config_cs(struct pcmcia_device *link) if (!link->irq) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -221,9 +214,7 @@ MODULE_DEVICE_TABLE(pcmcia, aha152x_ids); static struct pcmcia_driver aha152x_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "aha152x_cs", - }, + .name = "aha152x_cs", .probe = aha152x_probe, .remove = aha152x_detach, .id_table = aha152x_ids, @@ -238,7 +229,6 @@ static int __init init_aha152x_cs(void) static void __exit exit_aha152x_cs(void) { pcmcia_unregister_driver(&aha152x_cs_driver); - BUG_ON(dev_list != NULL); } module_init(init_aha152x_cs); diff --git a/drivers/scsi/pcmcia/fdomain_stub.c b/drivers/scsi/pcmcia/fdomain_stub.c index 13dbe5c4849..cd69c2670f8 100644 --- a/drivers/scsi/pcmcia/fdomain_stub.c +++ b/drivers/scsi/pcmcia/fdomain_stub.c @@ -46,7 +46,6 @@ #include <scsi/scsi_host.h> #include "fdomain.h" -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -83,11 +82,8 @@ static int fdomain_probe(struct pcmcia_device *link) info->p_dev = link; link->priv = info; - link->resource[0]->end = 0x10; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - link->conf.Present = PRESENT_OPTION; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + link->config_regs = PRESENT_OPTION; return fdomain_config(link); } /* fdomain_attach */ @@ -105,14 +101,12 @@ static void fdomain_detach(struct pcmcia_device *link) /*====================================================================*/ -static int fdomain_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int fdomain_config_check(struct pcmcia_device *p_dev, void *priv_data) { p_dev->io_lines = 10; - p_dev->resource[0]->start = cfg->io.win[0].base; + p_dev->resource[0]->end = 0x10; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; return pcmcia_request_io(p_dev); } @@ -132,7 +126,7 @@ static int fdomain_config(struct pcmcia_device *link) if (!link->irq) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -194,9 +188,7 @@ MODULE_DEVICE_TABLE(pcmcia, fdomain_ids); static struct pcmcia_driver fdomain_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "fdomain_cs", - }, + .name = "fdomain_cs", .probe = fdomain_probe, .remove = fdomain_detach, .id_table = fdomain_ids, diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index dd9b40306f3..9326c2c1488 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -47,7 +47,6 @@ #include <scsi/scsi.h> #include <scsi/scsi_ioctl.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -1531,15 +1530,6 @@ static int nsp_eh_host_reset(struct scsi_cmnd *SCpnt) PCMCIA functions **********************************************************************/ -/*====================================================================== - nsp_cs_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. -======================================================================*/ static int nsp_cs_probe(struct pcmcia_device *link) { scsi_info_t *info; @@ -1557,14 +1547,6 @@ static int nsp_cs_probe(struct pcmcia_device *link) nsp_dbg(NSP_DEBUG_INIT, "info=0x%p", info); - /* The io structure describes IO port mapping */ - link->resource[0]->end = 0x10; - link->resource[0]->flags = IO_DATA_PATH_WIDTH_AUTO; - - /* General socket configuration */ - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - ret = nsp_cs_config(link); nsp_dbg(NSP_DEBUG_INIT, "link=0x%p", link); @@ -1572,12 +1554,6 @@ static int nsp_cs_probe(struct pcmcia_device *link) } /* nsp_cs_attach */ -/*====================================================================== - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. -======================================================================*/ static void nsp_cs_detach(struct pcmcia_device *link) { nsp_dbg(NSP_DEBUG_INIT, "in, link=0x%p", link); @@ -1590,98 +1566,36 @@ static void nsp_cs_detach(struct pcmcia_device *link) } /* nsp_cs_detach */ -/*====================================================================== - nsp_cs_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - ethernet device available to the system. -======================================================================*/ - -struct nsp_cs_configdata { - nsp_hw_data *data; - win_req_t req; -}; - -static int nsp_cs_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { - struct nsp_cs_configdata *cfg_mem = priv_data; + nsp_hw_data *data = priv_data; - if (cfg->index == 0) + if (p_dev->config_index == 0) return -ENODEV; - /* Does this card need audio output? */ - if (cfg->flags & CISTPL_CFTABLE_AUDIO) { - p_dev->conf.Attributes |= CONF_ENABLE_SPKR; - p_dev->conf.Status = CCSR_AUDIO_ENA; - } - - /* Use power settings for Vcc and Vpp if present */ - /* Note that the CIS values need to be rescaled */ - if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM)) { - if (vcc != cfg->vcc.param[CISTPL_POWER_VNOM]/10000) - return -ENODEV; - else if (dflt->vcc.present & (1<<CISTPL_POWER_VNOM)) { - if (vcc != dflt->vcc.param[CISTPL_POWER_VNOM]/10000) - return -ENODEV; - } - - if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM)) { - p_dev->conf.Vpp = - cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000; - } else if (dflt->vpp1.present & (1 << CISTPL_POWER_VNOM)) { - p_dev->conf.Vpp = - dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; - } - - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = - p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - /* This reserves IO space but doesn't actually enable it */ - if (pcmcia_request_io(p_dev) != 0) - goto next_entry; - } - - if ((cfg->mem.nwin > 0) || (dflt->mem.nwin > 0)) { - cistpl_mem_t *mem = - (cfg->mem.nwin) ? &cfg->mem : &dflt->mem; - cfg_mem->req.Attributes = WIN_DATA_WIDTH_16|WIN_MEMORY_TYPE_CM; - cfg_mem->req.Attributes |= WIN_ENABLE; - cfg_mem->req.Base = mem->win[0].host_addr; - cfg_mem->req.Size = mem->win[0].len; - if (cfg_mem->req.Size < 0x1000) - cfg_mem->req.Size = 0x1000; - cfg_mem->req.AccessSpeed = 0; - if (pcmcia_request_window(p_dev, &cfg_mem->req, &p_dev->win) != 0) - goto next_entry; - if (pcmcia_map_mem_page(p_dev, p_dev->win, - mem->win[0].card_addr) != 0) - goto next_entry; - - cfg_mem->data->MmioAddress = (unsigned long) ioremap_nocache(cfg_mem->req.Base, cfg_mem->req.Size); - cfg_mem->data->MmioLength = cfg_mem->req.Size; - } - /* If we got this far, we're cool! */ - return 0; + /* This reserves IO space but doesn't actually enable it */ + if (pcmcia_request_io(p_dev) != 0) + goto next_entry; + + if (resource_size(p_dev->resource[2])) { + p_dev->resource[2]->flags |= (WIN_DATA_WIDTH_16 | + WIN_MEMORY_TYPE_CM | + WIN_ENABLE); + if (p_dev->resource[2]->end < 0x1000) + p_dev->resource[2]->end = 0x1000; + if (pcmcia_request_window(p_dev, p_dev->resource[2], 0) != 0) + goto next_entry; + if (pcmcia_map_mem_page(p_dev, p_dev->resource[2], + p_dev->card_addr) != 0) + goto next_entry; + + data->MmioAddress = (unsigned long) + ioremap_nocache(p_dev->resource[2]->start, + resource_size(p_dev->resource[2])); + data->MmioLength = resource_size(p_dev->resource[2]); } + /* If we got this far, we're cool! */ + return 0; next_entry: nsp_dbg(NSP_DEBUG_INIT, "next"); @@ -1693,25 +1607,23 @@ static int nsp_cs_config(struct pcmcia_device *link) { int ret; scsi_info_t *info = link->priv; - struct nsp_cs_configdata *cfg_mem; struct Scsi_Host *host; nsp_hw_data *data = &nsp_data_base; nsp_dbg(NSP_DEBUG_INIT, "in"); - cfg_mem = kzalloc(sizeof(*cfg_mem), GFP_KERNEL); - if (!cfg_mem) - return -ENOMEM; - cfg_mem->data = data; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_CHECK_VCC | + CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_SET_IOMEM | + CONF_AUTO_SET_IO; - ret = pcmcia_loop_config(link, nsp_cs_config_check, cfg_mem); + ret = pcmcia_loop_config(link, nsp_cs_config_check, data); if (ret) goto cs_failed; if (pcmcia_request_irq(link, nspintr)) goto cs_failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto cs_failed; @@ -1754,41 +1666,16 @@ static int nsp_cs_config(struct pcmcia_device *link) info->host = host; - /* Finally, report what we've done */ - printk(KERN_INFO "nsp_cs: index 0x%02x: ", - link->conf.ConfigIndex); - if (link->conf.Vpp) { - printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); - } - if (link->conf.Attributes & CONF_ENABLE_IRQ) { - printk(", irq %d", link->irq); - } - if (link->resource[0]) - printk(", io %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - if (link->win) - printk(", mem 0x%06lx-0x%06lx", cfg_mem->req.Base, - cfg_mem->req.Base+cfg_mem->req.Size-1); - printk("\n"); - - kfree(cfg_mem); return 0; cs_failed: nsp_dbg(NSP_DEBUG_INIT, "config fail"); nsp_cs_release(link); - kfree(cfg_mem); return -ENODEV; } /* nsp_cs_config */ -/*====================================================================== - After a card is removed, nsp_cs_release() will unregister the net - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. -======================================================================*/ static void nsp_cs_release(struct pcmcia_device *link) { scsi_info_t *info = link->priv; @@ -1807,7 +1694,7 @@ static void nsp_cs_release(struct pcmcia_device *link) scsi_remove_host(info->host); } - if (link->win) { + if (resource_size(link->resource[2])) { if (data != NULL) { iounmap((void *)(data->MmioAddress)); } @@ -1877,9 +1764,7 @@ MODULE_DEVICE_TABLE(pcmcia, nsp_cs_ids); static struct pcmcia_driver nsp_driver = { .owner = THIS_MODULE, - .drv = { - .name = "nsp_cs", - }, + .name = "nsp_cs", .probe = nsp_cs_probe, .remove = nsp_cs_detach, .id_table = nsp_cs_ids, @@ -1889,14 +1774,11 @@ static struct pcmcia_driver nsp_driver = { static int __init nsp_cs_init(void) { - nsp_msg(KERN_INFO, "loading..."); - return pcmcia_register_driver(&nsp_driver); } static void __exit nsp_cs_exit(void) { - nsp_msg(KERN_INFO, "unloading..."); pcmcia_unregister_driver(&nsp_driver); } diff --git a/drivers/scsi/pcmcia/qlogic_stub.c b/drivers/scsi/pcmcia/qlogic_stub.c index eb775f1a523..9c96ca889ec 100644 --- a/drivers/scsi/pcmcia/qlogic_stub.c +++ b/drivers/scsi/pcmcia/qlogic_stub.c @@ -48,7 +48,6 @@ #include <scsi/scsi_host.h> #include "../qlogicfas408.h" -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/ciscode.h> @@ -156,11 +155,8 @@ static int qlogic_probe(struct pcmcia_device *link) return -ENOMEM; info->p_dev = link; link->priv = info; - link->resource[0]->end = 16; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - link->conf.Present = PRESENT_OPTION; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + link->config_regs = PRESENT_OPTION; return qlogic_config(link); } /* qlogic_attach */ @@ -178,15 +174,11 @@ static void qlogic_detach(struct pcmcia_device *link) /*====================================================================*/ -static int qlogic_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int qlogic_config_check(struct pcmcia_device *p_dev, void *priv_data) { p_dev->io_lines = 10; - p_dev->resource[0]->start = cfg->io.win[0].base; - p_dev->resource[0]->end = cfg->io.win[0].len; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; if (p_dev->resource[0]->start == 0) return -ENODEV; @@ -209,7 +201,7 @@ static int qlogic_config(struct pcmcia_device * link) if (!link->irq) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -264,7 +256,7 @@ static int qlogic_resume(struct pcmcia_device *link) { scsi_info_t *info = link->priv; - pcmcia_request_configuration(link, &link->conf); + pcmcia_enable_device(link); if ((info->manf_id == MANFID_MACNICA) || (info->manf_id == MANFID_PIONEER) || (info->manf_id == 0x0098)) { @@ -302,9 +294,7 @@ MODULE_DEVICE_TABLE(pcmcia, qlogic_ids); static struct pcmcia_driver qlogic_cs_driver = { .owner = THIS_MODULE, - .drv = { .name = "qlogic_cs", - }, .probe = qlogic_probe, .remove = qlogic_detach, .id_table = qlogic_ids, diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c index 321e390c912..0ae27cb5cd6 100644 --- a/drivers/scsi/pcmcia/sym53c500_cs.c +++ b/drivers/scsi/pcmcia/sym53c500_cs.c @@ -71,7 +71,6 @@ #include <scsi/scsi.h> #include <scsi/scsi_host.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <pcmcia/ciscode.h> @@ -684,15 +683,11 @@ static struct scsi_host_template sym53c500_driver_template = { .shost_attrs = SYM53C500_shost_attrs }; -static int SYM53C500_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int SYM53C500_config_check(struct pcmcia_device *p_dev, void *priv_data) { p_dev->io_lines = 10; - p_dev->resource[0]->start = cfg->io.win[0].base; - p_dev->resource[0]->end = cfg->io.win[0].len; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; if (p_dev->resource[0]->start == 0) return -ENODEV; @@ -721,7 +716,7 @@ SYM53C500_config(struct pcmcia_device *link) if (!link->irq) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; @@ -859,10 +854,7 @@ SYM53C500_probe(struct pcmcia_device *link) return -ENOMEM; info->p_dev = link; link->priv = info; - link->resource[0]->end = 16; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; return SYM53C500_config(link); } /* SYM53C500_attach */ @@ -881,9 +873,7 @@ MODULE_DEVICE_TABLE(pcmcia, sym53c500_ids); static struct pcmcia_driver sym53c500_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "sym53c500_cs", - }, + .name = "sym53c500_cs", .probe = SYM53C500_probe, .remove = SYM53C500_detach, .id_table = sym53c500_ids, diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index ecc45c8b4e6..4b8765785ae 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -4165,6 +4165,7 @@ static const struct file_operations pmcraid_fops = { #ifdef CONFIG_COMPAT .compat_ioctl = pmcraid_chr_ioctl, #endif + .llseek = noop_llseek, }; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1e4bff69525..9946fac5425 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3948,6 +3948,7 @@ static struct pci_driver qla2xxx_pci_driver = { static struct file_operations apidev_fops = { .owner = THIS_MODULE, + .llseek = noop_llseek, }; /** diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index ad0ed212db4..348fba0a897 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -1046,13 +1046,13 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, /* If the user actually wanted this page, we can skip the rest */ if (page == 0) - return -EINVAL; + return 0; for (i = 0; i < min((int)buf[3], buf_len - 4); i++) if (buf[i + 4] == page) goto found; - if (i < buf[3] && i > buf_len) + if (i < buf[3] && i >= buf_len - 4) /* ran off the end of the buffer, give us benefit of doubt */ goto found; /* The device claims it doesn't support the requested page */ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ee02d3838a0..8041fe1ab17 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -968,11 +968,13 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, */ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) { - int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask); + struct request *rq = cmd->request; + + int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask); if (error) goto err_exit; - if (blk_bidi_rq(cmd->request)) { + if (blk_bidi_rq(rq)) { struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc( scsi_sdb_cache, GFP_ATOMIC); if (!bidi_sdb) { @@ -980,28 +982,28 @@ int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask) goto err_exit; } - cmd->request->next_rq->special = bidi_sdb; - error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb, - GFP_ATOMIC); + rq->next_rq->special = bidi_sdb; + error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC); if (error) goto err_exit; } - if (blk_integrity_rq(cmd->request)) { + if (blk_integrity_rq(rq)) { struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; int ivecs, count; BUG_ON(prot_sdb == NULL); - ivecs = blk_rq_count_integrity_sg(cmd->request); + ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) { error = BLKPREP_DEFER; goto err_exit; } - count = blk_rq_map_integrity_sg(cmd->request, + count = blk_rq_map_integrity_sg(rq->q, rq->bio, prot_sdb->table.sgl); BUG_ON(unlikely(count > ivecs)); + BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q))); cmd->prot_sdb = prot_sdb; cmd->prot_sdb->table.nents = count; @@ -1625,6 +1627,14 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, SCSI_MAX_SG_CHAIN_SEGMENTS)); + if (scsi_host_prot_dma(shost)) { + shost->sg_prot_tablesize = + min_not_zero(shost->sg_prot_tablesize, + (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); + BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); + blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); + } + blk_queue_max_hw_sectors(q, shost->max_sectors); blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); blk_queue_segment_boundary(q, shost->dma_boundary); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index c3f67373a4f..20ad59dff73 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -251,6 +251,7 @@ shost_rd_attr(host_busy, "%hu\n"); shost_rd_attr(cmd_per_lun, "%hd\n"); shost_rd_attr(can_queue, "%hd\n"); shost_rd_attr(sg_tablesize, "%hu\n"); +shost_rd_attr(sg_prot_tablesize, "%hu\n"); shost_rd_attr(unchecked_isa_dma, "%d\n"); shost_rd_attr(prot_capabilities, "%u\n"); shost_rd_attr(prot_guard_type, "%hd\n"); @@ -262,6 +263,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = { &dev_attr_cmd_per_lun.attr, &dev_attr_can_queue.attr, &dev_attr_sg_tablesize.attr, + &dev_attr_sg_prot_tablesize.attr, &dev_attr_unchecked_isa_dma.attr, &dev_attr_proc_name.attr, &dev_attr_scan.attr, diff --git a/drivers/scsi/scsi_tgt_if.c b/drivers/scsi/scsi_tgt_if.c index a87e21c35ef..0172de19700 100644 --- a/drivers/scsi/scsi_tgt_if.c +++ b/drivers/scsi/scsi_tgt_if.c @@ -22,7 +22,6 @@ #include <linux/miscdevice.h> #include <linux/gfp.h> #include <linux/file.h> -#include <linux/smp_lock.h> #include <net/tcp.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -323,7 +322,6 @@ static int tgt_open(struct inode *inode, struct file *file) { tx_ring.tr_idx = rx_ring.tr_idx = 0; - cycle_kernel_lock(); return 0; } @@ -333,6 +331,7 @@ static const struct file_operations tgt_fops = { .poll = tgt_poll, .write = tgt_write, .mmap = tgt_mmap, + .llseek = noop_llseek, }; static struct miscdevice tgt_miscdev = { diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index ffa0689ee84..20514c47a5a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2109,7 +2109,7 @@ static int sd_revalidate_disk(struct gendisk *disk) struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; unsigned char *buffer; - unsigned ordered; + unsigned flush = 0; SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_revalidate_disk\n")); @@ -2151,17 +2151,15 @@ static int sd_revalidate_disk(struct gendisk *disk) /* * We now have all cache related info, determine how we deal - * with ordered requests. Note that as the current SCSI - * dispatch function can alter request order, we cannot use - * QUEUE_ORDERED_TAG_* even when ordered tag is supported. + * with flush requests. */ - if (sdkp->WCE) - ordered = sdkp->DPOFUA - ? QUEUE_ORDERED_DRAIN_FUA : QUEUE_ORDERED_DRAIN_FLUSH; - else - ordered = QUEUE_ORDERED_DRAIN; + if (sdkp->WCE) { + flush |= REQ_FLUSH; + if (sdkp->DPOFUA) + flush |= REQ_FUA; + } - blk_queue_ordered(sdkp->disk->queue, ordered); + blk_queue_flush(sdkp->disk->queue, flush); set_capacity(disk, sdkp->capacity); kfree(buffer); diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 84be62149c6..0cb39ff2117 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -375,21 +375,20 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s unsigned int i, j; u32 phys, virt; - /* Already remapped? */ - if (rq->cmd_flags & REQ_INTEGRITY) - return 0; - sdkp = rq->bio->bi_bdev->bd_disk->private_data; if (sdkp->protection_type == SD_DIF_TYPE3_PROTECTION) return 0; - rq->cmd_flags |= REQ_INTEGRITY; phys = hw_sector & 0xffffffff; __rq_for_each_bio(bio, rq) { struct bio_vec *iv; + /* Already remapped? */ + if (bio_flagged(bio, BIO_MAPPED_INTEGRITY)) + break; + virt = bio->bi_integrity->bip_sector & 0xffffffff; bip_for_each_vec(iv, bio->bi_integrity, i) { @@ -408,6 +407,8 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s kunmap_atomic(sdt, KM_USER0); } + + bio->bi_flags |= BIO_MAPPED_INTEGRITY; } return 0; diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 78d616315d8..5428d53f5a1 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -49,7 +49,7 @@ static int sg_version_num = 30534; /* 2 digits for each component */ #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/blktrace_api.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include "scsi.h" #include <scsi/scsi_dbg.h> @@ -103,6 +103,8 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ; static int sg_add(struct device *, struct class_interface *); static void sg_remove(struct device *, struct class_interface *); +static DEFINE_MUTEX(sg_mutex); + static DEFINE_IDR(sg_index_idr); static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock file descriptor list for device */ @@ -229,7 +231,7 @@ sg_open(struct inode *inode, struct file *filp) int res; int retval; - lock_kernel(); + mutex_lock(&sg_mutex); nonseekable_open(inode, filp); SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); sdp = sg_get_dev(dev); @@ -314,7 +316,7 @@ sdp_put: sg_put: if (sdp) sg_put_dev(sdp); - unlock_kernel(); + mutex_unlock(&sg_mutex); return retval; } @@ -1092,9 +1094,9 @@ sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { int ret; - lock_kernel(); + mutex_lock(&sg_mutex); ret = sg_ioctl(filp, cmd_in, arg); - unlock_kernel(); + mutex_unlock(&sg_mutex); return ret; } @@ -1351,6 +1353,7 @@ static const struct file_operations sg_fops = { .mmap = sg_mmap, .release = sg_release, .fasync = sg_fasync, + .llseek = no_llseek, }; static struct class *sg_sysfs_class; @@ -1657,7 +1660,7 @@ static int sg_start_req(Sg_request *srp, unsigned char *cmd) if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && !sfp->parentdp->device->host->unchecked_isa_dma && - blk_rq_aligned(q, hp->dxferp, dxfer_len)) + blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) md = NULL; else md = &map_data; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index ba9c3e0387c..e148341079b 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -44,7 +44,6 @@ #include <linux/init.h> #include <linux/blkdev.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/slab.h> #include <asm/uaccess.h> @@ -76,6 +75,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM); CDC_CD_R|CDC_CD_RW|CDC_DVD|CDC_DVD_R|CDC_DVD_RAM|CDC_GENERIC_PACKET| \ CDC_MRW|CDC_MRW_W|CDC_RAM) +static DEFINE_MUTEX(sr_mutex); static int sr_probe(struct device *); static int sr_remove(struct device *); static int sr_done(struct scsi_cmnd *); @@ -470,24 +470,24 @@ static int sr_block_open(struct block_device *bdev, fmode_t mode) struct scsi_cd *cd; int ret = -ENXIO; - lock_kernel(); + mutex_lock(&sr_mutex); cd = scsi_cd_get(bdev->bd_disk); if (cd) { ret = cdrom_open(&cd->cdi, bdev, mode); if (ret) scsi_cd_put(cd); } - unlock_kernel(); + mutex_unlock(&sr_mutex); return ret; } static int sr_block_release(struct gendisk *disk, fmode_t mode) { struct scsi_cd *cd = scsi_cd(disk); - lock_kernel(); + mutex_lock(&sr_mutex); cdrom_release(&cd->cdi, mode); scsi_cd_put(cd); - unlock_kernel(); + mutex_unlock(&sr_mutex); return 0; } @@ -499,7 +499,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, void __user *argp = (void __user *)arg; int ret; - lock_kernel(); + mutex_lock(&sr_mutex); /* * Send SCSI addressing ioctls directly to mid level, send other @@ -529,7 +529,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, ret = scsi_ioctl(sdev, cmd, argp); out: - unlock_kernel(); + mutex_unlock(&sr_mutex); return ret; } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 24211d0efa6..afdc3f5d915 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -39,7 +39,6 @@ static const char *verstr = "20081215"; #include <linux/cdev.h> #include <linux/delay.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <asm/uaccess.h> #include <asm/dma.h> @@ -76,6 +75,7 @@ static const char *verstr = "20081215"; #include "st_options.h" #include "st.h" +static DEFINE_MUTEX(st_mutex); static int buffer_kbs; static int max_sg_segs; static int try_direct_io = TRY_DIRECT_IO; @@ -1180,7 +1180,7 @@ static int st_open(struct inode *inode, struct file *filp) int dev = TAPE_NR(inode); char *name; - lock_kernel(); + mutex_lock(&st_mutex); /* * We really want to do nonseekable_open(inode, filp); here, but some * versions of tar incorrectly call lseek on tapes and bail out if that @@ -1189,7 +1189,7 @@ static int st_open(struct inode *inode, struct file *filp) filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE); if (!(STp = scsi_tape_get(dev))) { - unlock_kernel(); + mutex_unlock(&st_mutex); return -ENXIO; } @@ -1200,7 +1200,7 @@ static int st_open(struct inode *inode, struct file *filp) if (STp->in_use) { write_unlock(&st_dev_arr_lock); scsi_tape_put(STp); - unlock_kernel(); + mutex_unlock(&st_mutex); DEB( printk(ST_DEB_MSG "%s: Device already in use.\n", name); ) return (-EBUSY); } @@ -1249,14 +1249,14 @@ static int st_open(struct inode *inode, struct file *filp) retval = (-EIO); goto err_out; } - unlock_kernel(); + mutex_unlock(&st_mutex); return 0; err_out: normalize_buffer(STp->buffer); STp->in_use = 0; scsi_tape_put(STp); - unlock_kernel(); + mutex_unlock(&st_mutex); return retval; } diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 12900f7083b..3198c5335f0 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -458,6 +458,7 @@ config SERIAL_SAMSUNG_UARTS int depends on ARM && PLAT_SAMSUNG default 2 if ARCH_S3C2400 + default 6 if ARCH_S5P6450 default 4 if SERIAL_SAMSUNG_UARTS_4 default 3 help @@ -526,12 +527,12 @@ config SERIAL_S3C24A0 Serial port support for the Samsung S3C24A0 SoC config SERIAL_S3C6400 - tristate "Samsung S3C6400/S3C6410/S5P6440/S5PC100 Serial port support" - depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440 || CPU_S5PC100) + tristate "Samsung S3C6400/S3C6410/S5P6440/S5P6450/S5PC100 Serial port support" + depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440 || CPU_S5P6450 || CPU_S5PC100) select SERIAL_SAMSUNG_UARTS_4 default y help - Serial port support for the Samsung S3C6400, S3C6410, S5P6440 + Serial port support for the Samsung S3C6400, S3C6410, S5P6440, S5P6450 and S5PC100 SoCs config SERIAL_S5PV210 diff --git a/drivers/serial/amba-pl010.c b/drivers/serial/amba-pl010.c index 50441ffe8e3..2904aa04412 100644 --- a/drivers/serial/amba-pl010.c +++ b/drivers/serial/amba-pl010.c @@ -472,14 +472,9 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios, spin_unlock_irqrestore(&uap->port.lock, flags); } -static void pl010_set_ldisc(struct uart_port *port) +static void pl010_set_ldisc(struct uart_port *port, int new) { - int line = port->line; - - if (line >= port->state->port.tty->driver->num) - return; - - if (port->state->port.tty->ldisc->ops->num == N_PPS) { + if (new == N_PPS) { port->flags |= UPF_HARDPPS_CD; pl010_enable_ms(port); } else diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c index 93de907b120..ee43efc7bdc 100644 --- a/drivers/serial/ioc3_serial.c +++ b/drivers/serial/ioc3_serial.c @@ -2017,6 +2017,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) struct ioc3_port *port; struct ioc3_port *ports[PORTS_PER_CARD]; int phys_port; + int cnt; DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd)); @@ -2044,6 +2045,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) if (!port) { printk(KERN_WARNING "IOC3 serial memory not available for port\n"); + ret = -ENOMEM; goto out4; } spin_lock_init(&port->ip_lock); @@ -2146,6 +2148,9 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) /* error exits that give back resources */ out4: + for (cnt = 0; cnt < phys_port; cnt++) + kfree(ports[cnt]); + kfree(card_ptr); return ret; } diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c index bc9af503907..dc0967fb9ea 100644 --- a/drivers/serial/mfd.c +++ b/drivers/serial/mfd.c @@ -27,6 +27,7 @@ #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> +#include <linux/slab.h> #include <linux/serial_reg.h> #include <linux/circ_buf.h> #include <linux/delay.h> @@ -227,12 +228,14 @@ static const struct file_operations port_regs_ops = { .owner = THIS_MODULE, .open = hsu_show_regs_open, .read = port_show_regs, + .llseek = default_llseek, }; static const struct file_operations dma_regs_ops = { .owner = THIS_MODULE, .open = hsu_show_regs_open, .read = dma_show_regs, + .llseek = default_llseek, }; static int hsu_debugfs_init(struct hsu_port *hsu) @@ -1423,7 +1426,6 @@ static void hsu_global_init(void) } phsu = hsu; - hsu_debugfs_init(hsu); return; @@ -1435,18 +1437,20 @@ err_free_region: static void serial_hsu_remove(struct pci_dev *pdev) { - struct hsu_port *hsu; - int i; + void *priv = pci_get_drvdata(pdev); + struct uart_hsu_port *up; - hsu = pci_get_drvdata(pdev); - if (!hsu) + if (!priv) return; - for (i = 0; i < 3; i++) - uart_remove_one_port(&serial_hsu_reg, &hsu->port[i].port); + /* For port 0/1/2, priv is the address of uart_hsu_port */ + if (pdev->device != 0x081E) { + up = priv; + uart_remove_one_port(&serial_hsu_reg, &up->port); + } pci_set_drvdata(pdev, NULL); - free_irq(hsu->irq, hsu); + free_irq(pdev->irq, priv); pci_disable_device(pdev); } diff --git a/drivers/serial/mrst_max3110.c b/drivers/serial/mrst_max3110.c index f6ad1ecbff7..51c15f58e01 100644 --- a/drivers/serial/mrst_max3110.c +++ b/drivers/serial/mrst_max3110.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/ioport.h> +#include <linux/irq.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c index b1156ba8ad1..7ac2bf5167c 100644 --- a/drivers/serial/samsung.c +++ b/drivers/serial/samsung.c @@ -1101,7 +1101,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, dbg("resource %p (%lx..%lx)\n", res, res->start, res->end); port->mapbase = res->start; - port->membase = S3C_VA_UART + res->start - (S3C_PA_UART & 0xfff00000); + port->membase = S3C_VA_UART + (res->start & 0xfffff); ret = platform_get_irq(platdev, 0); if (ret < 0) port->irq = 0; diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index 7d475b2a79e..93760b2ea17 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c @@ -45,7 +45,6 @@ #include <asm/io.h> #include <asm/system.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> @@ -183,10 +182,8 @@ static void quirk_config_socket(struct pcmcia_device *link) { struct serial_info *info = link->priv; - if (info->multi) { - link->conf.Present |= PRESENT_EXT_STATUS; - link->conf.ExtStatus = ESR_REQ_ATTN_ENA; - } + if (info->multi) + link->config_flags |= CONF_ENABLE_ESR; } static const struct serial_quirk quirks[] = { @@ -265,13 +262,6 @@ static const struct serial_quirk quirks[] = { static int serial_config(struct pcmcia_device * link); -/*====================================================================== - - After a card is removed, serial_remove() will unregister - the serial device(s), and release the PCMCIA configuration. - -======================================================================*/ - static void serial_remove(struct pcmcia_device *link) { struct serial_info *info = link->priv; @@ -314,14 +304,6 @@ static int serial_resume(struct pcmcia_device *link) return 0; } -/*====================================================================== - - serial_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - -======================================================================*/ - static int serial_probe(struct pcmcia_device *link) { struct serial_info *info; @@ -335,25 +317,13 @@ static int serial_probe(struct pcmcia_device *link) info->p_dev = link; link->priv = info; - link->conf.Attributes = CONF_ENABLE_IRQ; - if (do_sound) { - link->conf.Attributes |= CONF_ENABLE_SPKR; - link->conf.Status = CCSR_AUDIO_ENA; - } - link->conf.IntType = INT_MEMORY_AND_IO; + link->config_flags |= CONF_ENABLE_IRQ; + if (do_sound) + link->config_flags |= CONF_ENABLE_SPKR; return serial_config(link); } -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void serial_detach(struct pcmcia_device *link) { struct serial_info *info = link->priv; @@ -361,11 +331,6 @@ static void serial_detach(struct pcmcia_device *link) dev_dbg(&link->dev, "serial_detach\n"); /* - * Ensure any outstanding scheduled tasks are completed. - */ - flush_scheduled_work(); - - /* * Ensure that the ports have been released. */ serial_remove(link); @@ -430,47 +395,45 @@ static int pfc_config(struct pcmcia_device *p_dev) return -ENODEV; } -static int simple_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int simple_config_check(struct pcmcia_device *p_dev, void *priv_data) { static const int size_table[2] = { 8, 16 }; int *try = priv_data; - if (cf->vpp1.present & (1 << CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = - cf->vpp1.param[CISTPL_POWER_VNOM] / 10000; + if (p_dev->resource[0]->start == 0) + return -ENODEV; - p_dev->io_lines = ((*try & 0x1) == 0) ? - 16 : cf->io.flags & CISTPL_IO_LINES_MASK; + if ((*try & 0x1) == 0) + p_dev->io_lines = 16; - if ((cf->io.nwin > 0) && (cf->io.win[0].len == size_table[(*try >> 1)]) - && (cf->io.win[0].base != 0)) { - p_dev->resource[0]->start = cf->io.win[0].base; - if (!pcmcia_request_io(p_dev)) - return 0; - } - return -EINVAL; + if (p_dev->resource[0]->end != size_table[(*try >> 1)]) + return -ENODEV; + + p_dev->resource[0]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + + return pcmcia_request_io(p_dev); } static int simple_config_check_notpicky(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data) { static const unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; int j; - if ((cf->io.nwin > 0) && ((cf->io.flags & CISTPL_IO_LINES_MASK) <= 3)) { - for (j = 0; j < 5; j++) { - p_dev->resource[0]->start = base[j]; - p_dev->io_lines = base[j] ? 16 : 3; - if (!pcmcia_request_io(p_dev)) - return 0; - } + if (p_dev->io_lines > 3) + return -ENODEV; + + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->end = 8; + + for (j = 0; j < 5; j++) { + p_dev->resource[0]->start = base[j]; + p_dev->io_lines = base[j] ? 16 : 3; + if (!pcmcia_request_io(p_dev)) + return 0; } return -ENODEV; } @@ -480,11 +443,9 @@ static int simple_config(struct pcmcia_device *link) struct serial_info *info = link->priv; int i = -ENODEV, try; - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[0]->end = 8; - /* First pass: look for a config entry that looks normal. * Two tries: without IO aliases, then with aliases */ + link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_SET_IO; for (try = 0; try < 4; try++) if (!pcmcia_loop_config(link, simple_config_check, &try)) goto found_port; @@ -500,7 +461,7 @@ static int simple_config(struct pcmcia_device *link) found_port: if (info->multi && (info->manfid == MANFID_3COM)) - link->conf.ConfigIndex &= ~(0x08); + link->config_index &= ~(0x08); /* * Apply any configuration quirks. @@ -508,51 +469,50 @@ found_port: if (info->quirk && info->quirk->config) info->quirk->config(link); - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i != 0) return -1; return setup_serial(link, info, link->resource[0]->start, link->irq); } -static int multi_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int multi_config_check(struct pcmcia_device *p_dev, void *priv_data) { - int *base2 = priv_data; + int *multi = priv_data; + + if (p_dev->resource[1]->end) + return -EINVAL; /* The quad port cards have bad CIS's, so just look for a window larger than 8 ports and assume it will be right */ - if ((cf->io.nwin == 1) && (cf->io.win[0].len > 8)) { - p_dev->resource[0]->start = cf->io.win[0].base; - p_dev->io_lines = cf->io.flags & CISTPL_IO_LINES_MASK; - if (!pcmcia_request_io(p_dev)) { - *base2 = p_dev->resource[0]->start + 8; - return 0; - } - } - return -ENODEV; + if (p_dev->resource[0]->end <= 8) + return -EINVAL; + + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[0]->end = *multi * 8; + + if (pcmcia_request_io(p_dev)) + return -ENODEV; + return 0; } static int multi_config_check_notpicky(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data) { int *base2 = priv_data; - if (cf->io.nwin == 2) { - p_dev->resource[0]->start = cf->io.win[0].base; - p_dev->resource[1]->start = cf->io.win[1].base; - p_dev->io_lines = cf->io.flags & CISTPL_IO_LINES_MASK; - if (!pcmcia_request_io(p_dev)) { - *base2 = p_dev->resource[1]->start; - return 0; - } - } - return -ENODEV; + if (!p_dev->resource[0]->end || !p_dev->resource[1]->end) + return -ENODEV; + + p_dev->resource[0]->end = p_dev->resource[1]->end = 8; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + + if (pcmcia_request_io(p_dev)) + return -ENODEV; + + *base2 = p_dev->resource[0]->start + 8; + return 0; } static int multi_config(struct pcmcia_device *link) @@ -560,12 +520,12 @@ static int multi_config(struct pcmcia_device *link) struct serial_info *info = link->priv; int i, base2 = 0; + link->config_flags |= CONF_AUTO_SET_IO; /* First, look for a generic full-sized window */ - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - link->resource[0]->end = info->multi * 8; - if (pcmcia_loop_config(link, multi_config_check, &base2)) { + if (!pcmcia_loop_config(link, multi_config_check, &info->multi)) + base2 = link->resource[0]->start + 8; + else { /* If that didn't work, look for two windows */ - link->resource[0]->end = link->resource[1]->end = 8; info->multi = 2; if (pcmcia_loop_config(link, multi_config_check_notpicky, &base2)) { @@ -584,7 +544,7 @@ static int multi_config(struct pcmcia_device *link) if (info->quirk && info->quirk->config) info->quirk->config(link); - i = pcmcia_request_configuration(link, &link->conf); + i = pcmcia_enable_device(link); if (i != 0) return -ENODEV; @@ -596,11 +556,11 @@ static int multi_config(struct pcmcia_device *link) info->prodid == PRODID_POSSIO_GCC)) { int err; - if (link->conf.ConfigIndex == 1 || - link->conf.ConfigIndex == 3) { + if (link->config_index == 1 || + link->config_index == 3) { err = setup_serial(link, info, base2, link->irq); - base2 = link->resource[0]->start;; + base2 = link->resource[0]->start; } else { err = setup_serial(link, info, link->resource[0]->start, link->irq); @@ -624,33 +584,24 @@ static int multi_config(struct pcmcia_device *link) return 0; } -static int serial_check_for_multi(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cf, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int serial_check_for_multi(struct pcmcia_device *p_dev, void *priv_data) { struct serial_info *info = p_dev->priv; - if ((cf->io.nwin == 1) && (cf->io.win[0].len % 8 == 0)) - info->multi = cf->io.win[0].len >> 3; + if (!p_dev->resource[0]->end) + return -EINVAL; + + if ((!p_dev->resource[1]->end) && (p_dev->resource[0]->end % 8 == 0)) + info->multi = p_dev->resource[0]->end >> 3; - if ((cf->io.nwin == 2) && (cf->io.win[0].len == 8) && - (cf->io.win[1].len == 8)) + if ((p_dev->resource[1]->end) && (p_dev->resource[0]->end == 8) + && (p_dev->resource[1]->end == 8)) info->multi = 2; return 0; /* break */ } -/*====================================================================== - - serial_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - serial device available to the system. - -======================================================================*/ - static int serial_config(struct pcmcia_device * link) { struct serial_info *info = link->priv; @@ -894,9 +845,7 @@ MODULE_FIRMWARE("cis/RS-COM-2P.cis"); static struct pcmcia_driver serial_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "serial_cs", - }, + .name = "serial_cs", .probe = serial_probe, .remove = serial_detach, .id_table = serial_ids, diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 91c2f4f3af1..4b9eec68fad 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -143,10 +143,26 @@ config SPI_GPIO GPIO operations, you should be able to leverage that for better speed with a custom version of this driver; see the source code. +config SPI_IMX_VER_IMX1 + def_bool y if SOC_IMX1 + +config SPI_IMX_VER_0_0 + def_bool y if SOC_IMX21 || SOC_IMX27 + +config SPI_IMX_VER_0_4 + def_bool y if ARCH_MX31 + +config SPI_IMX_VER_0_7 + def_bool y if ARCH_MX25 || ARCH_MX35 || ARCH_MX51 + +config SPI_IMX_VER_2_3 + def_bool y if ARCH_MX51 + config SPI_IMX tristate "Freescale i.MX SPI controllers" depends on ARCH_MXC select SPI_BITBANG + default m if IMX_HAVE_PLATFORM_SPI_IMX help This enables using the Freescale i.MX SPI controllers in master mode. @@ -182,12 +198,27 @@ config SPI_MPC512x_PSC This enables using the Freescale MPC5121 Programmable Serial Controller in SPI master mode. -config SPI_MPC8xxx - tristate "Freescale MPC8xxx SPI controller" +config SPI_FSL_LIB + tristate + depends on FSL_SOC + +config SPI_FSL_SPI + tristate "Freescale SPI controller" depends on FSL_SOC + select SPI_FSL_LIB help - This enables using the Freescale MPC8xxx SPI controllers in master - mode. + This enables using the Freescale SPI controllers in master mode. + MPC83xx platform uses the controller in cpu mode or CPM/QE mode. + MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. + +config SPI_FSL_ESPI + tristate "Freescale eSPI controller" + depends on FSL_SOC + select SPI_FSL_LIB + help + This enables using the Freescale eSPI controllers in master mode. + From MPC8536, 85xx platform uses the controller, and all P10xx, + P20xx, P30xx,P40xx, P50xx uses this controller. config SPI_OMAP_UWIRE tristate "OMAP1 MicroWire" @@ -298,6 +329,13 @@ config SPI_STMP3XXX help SPI driver for Freescale STMP37xx/378x SoC SSP interface +config SPI_TOPCLIFF_PCH + tristate "Topcliff PCH SPI Controller" + depends on PCI + help + SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus + used in some x86 embedded processors. + config SPI_TXX9 tristate "Toshiba TXx9 SPI controller" depends on GENERIC_GPIO && CPU_TX49XX diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index e9cbd18217a..557aaadf56b 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -2,9 +2,7 @@ # Makefile for kernel SPI drivers. # -ifeq ($(CONFIG_SPI_DEBUG),y) -EXTRA_CFLAGS += -DDEBUG -endif +ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG # small core, mostly translating board-specific # config declarations into driver model code @@ -34,11 +32,14 @@ obj-$(CONFIG_SPI_PL022) += amba-pl022.o obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o -obj-$(CONFIG_SPI_MPC8xxx) += spi_mpc8xxx.o +obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o +obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o +obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o +obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o obj-$(CONFIG_SPI_TXX9) += spi_txx9.o obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o obj-$(CONFIG_SPI_XILINX_OF) += xilinx_spi_of.o diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index 4c37c4e2864..fb3d1b31772 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c @@ -27,7 +27,6 @@ /* * TODO: * - add timeout on polled transfers - * - add generic DMA framework support */ #include <linux/init.h> @@ -45,6 +44,9 @@ #include <linux/amba/pl022.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> /* * This macro is used to define some register default values. @@ -381,6 +383,14 @@ struct pl022 { enum ssp_reading read; enum ssp_writing write; u32 exp_fifo_level; + /* DMA settings */ +#ifdef CONFIG_DMA_ENGINE + struct dma_chan *dma_rx_channel; + struct dma_chan *dma_tx_channel; + struct sg_table sgt_rx; + struct sg_table sgt_tx; + char *dummypage; +#endif }; /** @@ -406,7 +416,7 @@ struct chip_data { u16 dmacr; u16 cpsr; u8 n_bytes; - u8 enable_dma:1; + bool enable_dma; enum ssp_reading read; enum ssp_writing write; void (*cs_control) (u32 command); @@ -763,6 +773,371 @@ static void *next_transfer(struct pl022 *pl022) } return STATE_DONE; } + +/* + * This DMA functionality is only compiled in if we have + * access to the generic DMA devices/DMA engine. + */ +#ifdef CONFIG_DMA_ENGINE +static void unmap_free_dma_scatter(struct pl022 *pl022) +{ + /* Unmap and free the SG tables */ + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, DMA_FROM_DEVICE); + sg_free_table(&pl022->sgt_rx); + sg_free_table(&pl022->sgt_tx); +} + +static void dma_callback(void *data) +{ + struct pl022 *pl022 = data; + struct spi_message *msg = pl022->cur_msg; + + BUG_ON(!pl022->sgt_rx.sgl); + +#ifdef VERBOSE_DEBUG + /* + * Optionally dump out buffers to inspect contents, this is + * good if you want to convince yourself that the loopback + * read/write contents are the same, when adopting to a new + * DMA engine. + */ + { + struct scatterlist *sg; + unsigned int i; + + dma_sync_sg_for_cpu(&pl022->adev->dev, + pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, + DMA_FROM_DEVICE); + + for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { + dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); + print_hex_dump(KERN_ERR, "SPI RX: ", + DUMP_PREFIX_OFFSET, + 16, + 1, + sg_virt(sg), + sg_dma_len(sg), + 1); + } + for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { + dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); + print_hex_dump(KERN_ERR, "SPI TX: ", + DUMP_PREFIX_OFFSET, + 16, + 1, + sg_virt(sg), + sg_dma_len(sg), + 1); + } + } +#endif + + unmap_free_dma_scatter(pl022); + + /* Update total bytes transfered */ + msg->actual_length += pl022->cur_transfer->len; + if (pl022->cur_transfer->cs_change) + pl022->cur_chip-> + cs_control(SSP_CHIP_DESELECT); + + /* Move to next transfer */ + msg->state = next_transfer(pl022); + tasklet_schedule(&pl022->pump_transfers); +} + +static void setup_dma_scatter(struct pl022 *pl022, + void *buffer, + unsigned int length, + struct sg_table *sgtab) +{ + struct scatterlist *sg; + int bytesleft = length; + void *bufp = buffer; + int mapbytes; + int i; + + if (buffer) { + for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { + /* + * If there are less bytes left than what fits + * in the current page (plus page alignment offset) + * we just feed in this, else we stuff in as much + * as we can. + */ + if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) + mapbytes = bytesleft; + else + mapbytes = PAGE_SIZE - offset_in_page(bufp); + sg_set_page(sg, virt_to_page(bufp), + mapbytes, offset_in_page(bufp)); + bufp += mapbytes; + bytesleft -= mapbytes; + dev_dbg(&pl022->adev->dev, + "set RX/TX target page @ %p, %d bytes, %d left\n", + bufp, mapbytes, bytesleft); + } + } else { + /* Map the dummy buffer on every page */ + for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { + if (bytesleft < PAGE_SIZE) + mapbytes = bytesleft; + else + mapbytes = PAGE_SIZE; + sg_set_page(sg, virt_to_page(pl022->dummypage), + mapbytes, 0); + bytesleft -= mapbytes; + dev_dbg(&pl022->adev->dev, + "set RX/TX to dummy page %d bytes, %d left\n", + mapbytes, bytesleft); + + } + } + BUG_ON(bytesleft); +} + +/** + * configure_dma - configures the channels for the next transfer + * @pl022: SSP driver's private data structure + */ +static int configure_dma(struct pl022 *pl022) +{ + struct dma_slave_config rx_conf = { + .src_addr = SSP_DR(pl022->phybase), + .direction = DMA_FROM_DEVICE, + .src_maxburst = pl022->vendor->fifodepth >> 1, + }; + struct dma_slave_config tx_conf = { + .dst_addr = SSP_DR(pl022->phybase), + .direction = DMA_TO_DEVICE, + .dst_maxburst = pl022->vendor->fifodepth >> 1, + }; + unsigned int pages; + int ret; + int sglen; + struct dma_chan *rxchan = pl022->dma_rx_channel; + struct dma_chan *txchan = pl022->dma_tx_channel; + struct dma_async_tx_descriptor *rxdesc; + struct dma_async_tx_descriptor *txdesc; + dma_cookie_t cookie; + + /* Check that the channels are available */ + if (!rxchan || !txchan) + return -ENODEV; + + switch (pl022->read) { + case READING_NULL: + /* Use the same as for writing */ + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + break; + case READING_U8: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + case READING_U16: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + case READING_U32: + rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + break; + } + + switch (pl022->write) { + case WRITING_NULL: + /* Use the same as for reading */ + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + break; + case WRITING_U8: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + break; + case WRITING_U16: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + break; + case WRITING_U32: + tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;; + break; + } + + /* SPI pecularity: we need to read and write the same width */ + if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + rx_conf.src_addr_width = tx_conf.dst_addr_width; + if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) + tx_conf.dst_addr_width = rx_conf.src_addr_width; + BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); + + rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, + (unsigned long) &rx_conf); + txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, + (unsigned long) &tx_conf); + + /* Create sglists for the transfers */ + pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; + dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); + + ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); + if (ret) + goto err_alloc_rx_sg; + + ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); + if (ret) + goto err_alloc_tx_sg; + + /* Fill in the scatterlists for the RX+TX buffers */ + setup_dma_scatter(pl022, pl022->rx, + pl022->cur_transfer->len, &pl022->sgt_rx); + setup_dma_scatter(pl022, pl022->tx, + pl022->cur_transfer->len, &pl022->sgt_tx); + + /* Map DMA buffers */ + sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, DMA_FROM_DEVICE); + if (!sglen) + goto err_rx_sgmap; + + sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); + if (!sglen) + goto err_tx_sgmap; + + /* Send both scatterlists */ + rxdesc = rxchan->device->device_prep_slave_sg(rxchan, + pl022->sgt_rx.sgl, + pl022->sgt_rx.nents, + DMA_FROM_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + goto err_rxdesc; + + txdesc = txchan->device->device_prep_slave_sg(txchan, + pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, + DMA_TO_DEVICE, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + goto err_txdesc; + + /* Put the callback on the RX transfer only, that should finish last */ + rxdesc->callback = dma_callback; + rxdesc->callback_param = pl022; + + /* Submit and fire RX and TX with TX last so we're ready to read! */ + cookie = rxdesc->tx_submit(rxdesc); + if (dma_submit_error(cookie)) + goto err_submit_rx; + cookie = txdesc->tx_submit(txdesc); + if (dma_submit_error(cookie)) + goto err_submit_tx; + rxchan->device->device_issue_pending(rxchan); + txchan->device->device_issue_pending(txchan); + + return 0; + +err_submit_tx: +err_submit_rx: +err_txdesc: + txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); +err_rxdesc: + rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, + pl022->sgt_tx.nents, DMA_TO_DEVICE); +err_tx_sgmap: + dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, + pl022->sgt_tx.nents, DMA_FROM_DEVICE); +err_rx_sgmap: + sg_free_table(&pl022->sgt_tx); +err_alloc_tx_sg: + sg_free_table(&pl022->sgt_rx); +err_alloc_rx_sg: + return -ENOMEM; +} + +static int __init pl022_dma_probe(struct pl022 *pl022) +{ + dma_cap_mask_t mask; + + /* Try to acquire a generic DMA engine slave channel */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + /* + * We need both RX and TX channels to do DMA, else do none + * of them. + */ + pl022->dma_rx_channel = dma_request_channel(mask, + pl022->master_info->dma_filter, + pl022->master_info->dma_rx_param); + if (!pl022->dma_rx_channel) { + dev_err(&pl022->adev->dev, "no RX DMA channel!\n"); + goto err_no_rxchan; + } + + pl022->dma_tx_channel = dma_request_channel(mask, + pl022->master_info->dma_filter, + pl022->master_info->dma_tx_param); + if (!pl022->dma_tx_channel) { + dev_err(&pl022->adev->dev, "no TX DMA channel!\n"); + goto err_no_txchan; + } + + pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pl022->dummypage) { + dev_err(&pl022->adev->dev, "no DMA dummypage!\n"); + goto err_no_dummypage; + } + + dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", + dma_chan_name(pl022->dma_rx_channel), + dma_chan_name(pl022->dma_tx_channel)); + + return 0; + +err_no_dummypage: + dma_release_channel(pl022->dma_tx_channel); +err_no_txchan: + dma_release_channel(pl022->dma_rx_channel); + pl022->dma_rx_channel = NULL; +err_no_rxchan: + return -ENODEV; +} + +static void terminate_dma(struct pl022 *pl022) +{ + struct dma_chan *rxchan = pl022->dma_rx_channel; + struct dma_chan *txchan = pl022->dma_tx_channel; + + rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); + txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); + unmap_free_dma_scatter(pl022); +} + +static void pl022_dma_remove(struct pl022 *pl022) +{ + if (pl022->busy) + terminate_dma(pl022); + if (pl022->dma_tx_channel) + dma_release_channel(pl022->dma_tx_channel); + if (pl022->dma_rx_channel) + dma_release_channel(pl022->dma_rx_channel); + kfree(pl022->dummypage); +} + +#else +static inline int configure_dma(struct pl022 *pl022) +{ + return -ENODEV; +} + +static inline int pl022_dma_probe(struct pl022 *pl022) +{ + return 0; +} + +static inline void pl022_dma_remove(struct pl022 *pl022) +{ +} +#endif + /** * pl022_interrupt_handler - Interrupt handler for SSP controller * @@ -794,14 +1169,17 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) if (unlikely(!irq_status)) return IRQ_NONE; - /* This handles the error code interrupts */ + /* + * This handles the FIFO interrupts, the timeout + * interrupts are flatly ignored, they cannot be + * trusted. + */ if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { /* * Overrun interrupt - bail out since our Data has been * corrupted */ - dev_err(&pl022->adev->dev, - "FIFO overrun\n"); + dev_err(&pl022->adev->dev, "FIFO overrun\n"); if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) dev_err(&pl022->adev->dev, "RXFIFO is full\n"); @@ -896,8 +1274,8 @@ static int set_up_next_transfer(struct pl022 *pl022, } /** - * pump_transfers - Tasklet function which schedules next interrupt transfer - * when running in interrupt transfer mode. + * pump_transfers - Tasklet function which schedules next transfer + * when running in interrupt or DMA transfer mode. * @data: SSP driver private data structure * */ @@ -954,65 +1332,23 @@ static void pump_transfers(unsigned long data) } /* Flush the FIFOs and let's go! */ flush(pl022); - writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); -} - -/** - * NOT IMPLEMENTED - * configure_dma - It configures the DMA pipes for DMA transfers - * @data: SSP driver's private data structure - * - */ -static int configure_dma(void *data) -{ - struct pl022 *pl022 = data; - dev_dbg(&pl022->adev->dev, "configure DMA\n"); - return -ENOTSUPP; -} - -/** - * do_dma_transfer - It handles transfers of the current message - * if it is DMA xfer. - * NOT FULLY IMPLEMENTED - * @data: SSP driver's private data structure - */ -static void do_dma_transfer(void *data) -{ - struct pl022 *pl022 = data; - - if (configure_dma(data)) { - dev_dbg(&pl022->adev->dev, "configuration of DMA Failed!\n"); - goto err_config_dma; - } - /* TODO: Implememt DMA setup of pipes here */ - - /* Enable target chip, set up transfer */ - pl022->cur_chip->cs_control(SSP_CHIP_SELECT); - if (set_up_next_transfer(pl022, pl022->cur_transfer)) { - /* Error path */ - pl022->cur_msg->state = STATE_ERROR; - pl022->cur_msg->status = -EIO; - giveback(pl022); + if (pl022->cur_chip->enable_dma) { + if (configure_dma(pl022)) { + dev_dbg(&pl022->adev->dev, + "configuration of DMA failed, fall back to interrupt mode\n"); + goto err_config_dma; + } return; } - /* Enable SSP */ - writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), - SSP_CR1(pl022->virtbase)); - - /* TODO: Enable the DMA transfer here */ - return; - err_config_dma: - pl022->cur_msg->state = STATE_ERROR; - pl022->cur_msg->status = -EIO; - giveback(pl022); - return; +err_config_dma: + writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); } -static void do_interrupt_transfer(void *data) +static void do_interrupt_dma_transfer(struct pl022 *pl022) { - struct pl022 *pl022 = data; + u32 irqflags = ENABLE_ALL_INTERRUPTS; /* Enable target chip */ pl022->cur_chip->cs_control(SSP_CHIP_SELECT); @@ -1023,15 +1359,26 @@ static void do_interrupt_transfer(void *data) giveback(pl022); return; } + /* If we're using DMA, set up DMA here */ + if (pl022->cur_chip->enable_dma) { + /* Configure DMA transfer */ + if (configure_dma(pl022)) { + dev_dbg(&pl022->adev->dev, + "configuration of DMA failed, fall back to interrupt mode\n"); + goto err_config_dma; + } + /* Disable interrupts in DMA mode, IRQ from DMA controller */ + irqflags = DISABLE_ALL_INTERRUPTS; + } +err_config_dma: /* Enable SSP, turn on interrupts */ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); - writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); + writew(irqflags, SSP_IMSC(pl022->virtbase)); } -static void do_polling_transfer(void *data) +static void do_polling_transfer(struct pl022 *pl022) { - struct pl022 *pl022 = data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; @@ -1101,7 +1448,7 @@ static void do_polling_transfer(void *data) * * This function checks if there is any spi message in the queue that * needs processing and delegate control to appropriate function - * do_polling_transfer()/do_interrupt_transfer()/do_dma_transfer() + * do_polling_transfer()/do_interrupt_dma_transfer() * based on the kind of the transfer * */ @@ -1150,10 +1497,8 @@ static void pump_messages(struct work_struct *work) if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) do_polling_transfer(pl022); - else if (pl022->cur_chip->xfer_type == INTERRUPT_TRANSFER) - do_interrupt_transfer(pl022); else - do_dma_transfer(pl022); + do_interrupt_dma_transfer(pl022); } @@ -1248,100 +1593,56 @@ static int destroy_queue(struct pl022 *pl022) } static int verify_controller_parameters(struct pl022 *pl022, - struct pl022_config_chip *chip_info) + struct pl022_config_chip const *chip_info) { - if ((chip_info->lbm != LOOPBACK_ENABLED) - && (chip_info->lbm != LOOPBACK_DISABLED)) { - dev_err(chip_info->dev, - "loopback Mode is configured incorrectly\n"); - return -EINVAL; - } if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "interface is configured incorrectly\n"); return -EINVAL; } if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && (!pl022->vendor->unidir)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "unidirectional mode not supported in this " "hardware version\n"); return -EINVAL; } if ((chip_info->hierarchy != SSP_MASTER) && (chip_info->hierarchy != SSP_SLAVE)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "hierarchy is configured incorrectly\n"); return -EINVAL; } - if (((chip_info->clk_freq).cpsdvsr < CPSDVR_MIN) - || ((chip_info->clk_freq).cpsdvsr > CPSDVR_MAX)) { - dev_err(chip_info->dev, - "cpsdvsr is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->endian_rx != SSP_RX_MSB) - && (chip_info->endian_rx != SSP_RX_LSB)) { - dev_err(chip_info->dev, - "RX FIFO endianess is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->endian_tx != SSP_TX_MSB) - && (chip_info->endian_tx != SSP_TX_LSB)) { - dev_err(chip_info->dev, - "TX FIFO endianess is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->data_size < SSP_DATA_BITS_4) - || (chip_info->data_size > SSP_DATA_BITS_32)) { - dev_err(chip_info->dev, - "DATA Size is configured incorrectly\n"); - return -EINVAL; - } if ((chip_info->com_mode != INTERRUPT_TRANSFER) && (chip_info->com_mode != DMA_TRANSFER) && (chip_info->com_mode != POLLING_TRANSFER)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "Communication mode is configured incorrectly\n"); return -EINVAL; } if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "RX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "TX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } - if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { - if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE) - && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) { - dev_err(chip_info->dev, - "Clock Phase is configured incorrectly\n"); - return -EINVAL; - } - if ((chip_info->clk_pol != SSP_CLK_POL_IDLE_LOW) - && (chip_info->clk_pol != SSP_CLK_POL_IDLE_HIGH)) { - dev_err(chip_info->dev, - "Clock Polarity is configured incorrectly\n"); - return -EINVAL; - } - } if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { if ((chip_info->ctrl_len < SSP_BITS_4) || (chip_info->ctrl_len > SSP_BITS_32)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "CTRL LEN is configured incorrectly\n"); return -EINVAL; } if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "Wait State is configured incorrectly\n"); return -EINVAL; } @@ -1350,24 +1651,20 @@ static int verify_controller_parameters(struct pl022 *pl022, if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) && (chip_info->duplex != - SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) - dev_err(chip_info->dev, + SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { + dev_err(&pl022->adev->dev, "Microwire duplex mode is configured incorrectly\n"); return -EINVAL; + } } else { if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) - dev_err(chip_info->dev, + dev_err(&pl022->adev->dev, "Microwire half duplex mode requested," " but this is only available in the" " ST version of PL022\n"); return -EINVAL; } } - if (chip_info->cs_control == NULL) { - dev_warn(chip_info->dev, - "Chip Select Function is NULL for this chip\n"); - chip_info->cs_control = null_cs_control; - } return 0; } @@ -1467,22 +1764,24 @@ static int calculate_effective_freq(struct pl022 *pl022, return 0; } -/** - * NOT IMPLEMENTED - * process_dma_info - Processes the DMA info provided by client drivers - * @chip_info: chip info provided by client device - * @chip: Runtime state maintained by the SSP controller for each spi device - * - * This function processes and stores DMA config provided by client driver - * into the runtime state maintained by the SSP controller driver + +/* + * A piece of default chip info unless the platform + * supplies it. */ -static int process_dma_info(struct pl022_config_chip *chip_info, - struct chip_data *chip) -{ - dev_err(chip_info->dev, - "cannot process DMA info, DMA not implemented!\n"); - return -ENOTSUPP; -} +static const struct pl022_config_chip pl022_default_chip_info = { + .com_mode = POLLING_TRANSFER, + .iface = SSP_INTERFACE_MOTOROLA_SPI, + .hierarchy = SSP_SLAVE, + .slave_tx_disable = DO_NOT_DRIVE_TX, + .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, + .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, + .ctrl_len = SSP_BITS_8, + .wait_state = SSP_MWIRE_WAIT_ZERO, + .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, + .cs_control = null_cs_control, +}; + /** * pl022_setup - setup function registered to SPI master framework @@ -1496,23 +1795,15 @@ static int process_dma_info(struct pl022_config_chip *chip_info, * controller hardware here, that is not done until the actual transfer * commence. */ - -/* FIXME: JUST GUESSING the spi->mode bits understood by this driver */ -#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ - | SPI_LSB_FIRST | SPI_LOOP) - static int pl022_setup(struct spi_device *spi) { - struct pl022_config_chip *chip_info; + struct pl022_config_chip const *chip_info; struct chip_data *chip; + struct ssp_clock_params clk_freq; int status = 0; struct pl022 *pl022 = spi_master_get_devdata(spi->master); - - if (spi->mode & ~MODEBITS) { - dev_dbg(&spi->dev, "unsupported mode bits %x\n", - spi->mode & ~MODEBITS); - return -EINVAL; - } + unsigned int bits = spi->bits_per_word; + u32 tmp; if (!spi->max_speed_hz) return -EINVAL; @@ -1535,48 +1826,13 @@ static int pl022_setup(struct spi_device *spi) chip_info = spi->controller_data; if (chip_info == NULL) { + chip_info = &pl022_default_chip_info; /* spi_board_info.controller_data not is supplied */ dev_dbg(&spi->dev, "using default controller_data settings\n"); - - chip_info = - kzalloc(sizeof(struct pl022_config_chip), GFP_KERNEL); - - if (!chip_info) { - dev_err(&spi->dev, - "cannot allocate controller data\n"); - status = -ENOMEM; - goto err_first_setup; - } - - dev_dbg(&spi->dev, "allocated memory for controller data\n"); - - /* Pointer back to the SPI device */ - chip_info->dev = &spi->dev; - /* - * Set controller data default values: - * Polling is supported by default - */ - chip_info->lbm = LOOPBACK_DISABLED; - chip_info->com_mode = POLLING_TRANSFER; - chip_info->iface = SSP_INTERFACE_MOTOROLA_SPI; - chip_info->hierarchy = SSP_SLAVE; - chip_info->slave_tx_disable = DO_NOT_DRIVE_TX; - chip_info->endian_tx = SSP_TX_LSB; - chip_info->endian_rx = SSP_RX_LSB; - chip_info->data_size = SSP_DATA_BITS_12; - chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; - chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; - chip_info->clk_phase = SSP_CLK_SECOND_EDGE; - chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; - chip_info->ctrl_len = SSP_BITS_8; - chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; - chip_info->duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX; - chip_info->cs_control = null_cs_control; - } else { + } else dev_dbg(&spi->dev, "using user supplied controller_data settings\n"); - } /* * We can override with custom divisors, else we use the board @@ -1586,29 +1842,48 @@ static int pl022_setup(struct spi_device *spi) && (0 == chip_info->clk_freq.scr)) { status = calculate_effective_freq(pl022, spi->max_speed_hz, - &chip_info->clk_freq); + &clk_freq); if (status < 0) goto err_config_params; } else { - if ((chip_info->clk_freq.cpsdvsr % 2) != 0) - chip_info->clk_freq.cpsdvsr = - chip_info->clk_freq.cpsdvsr - 1; + memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); + if ((clk_freq.cpsdvsr % 2) != 0) + clk_freq.cpsdvsr = + clk_freq.cpsdvsr - 1; + } + if ((clk_freq.cpsdvsr < CPSDVR_MIN) + || (clk_freq.cpsdvsr > CPSDVR_MAX)) { + dev_err(&spi->dev, + "cpsdvsr is configured incorrectly\n"); + goto err_config_params; } + + status = verify_controller_parameters(pl022, chip_info); if (status) { dev_err(&spi->dev, "controller data is incorrect"); goto err_config_params; } + /* Now set controller state based on controller data */ chip->xfer_type = chip_info->com_mode; - chip->cs_control = chip_info->cs_control; - - if (chip_info->data_size <= 8) { - dev_dbg(&spi->dev, "1 <= n <=8 bits per word\n"); + if (!chip_info->cs_control) { + chip->cs_control = null_cs_control; + dev_warn(&spi->dev, + "chip select function is NULL for this chip\n"); + } else + chip->cs_control = chip_info->cs_control; + + if (bits <= 3) { + /* PL022 doesn't support less than 4-bits */ + status = -ENOTSUPP; + goto err_config_params; + } else if (bits <= 8) { + dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); chip->n_bytes = 1; chip->read = READING_U8; chip->write = WRITING_U8; - } else if (chip_info->data_size <= 16) { + } else if (bits <= 16) { dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); chip->n_bytes = 2; chip->read = READING_U16; @@ -1625,6 +1900,7 @@ static int pl022_setup(struct spi_device *spi) dev_err(&spi->dev, "a standard pl022 can only handle " "1 <= n <= 16 bit words\n"); + status = -ENOTSUPP; goto err_config_params; } } @@ -1636,9 +1912,8 @@ static int pl022_setup(struct spi_device *spi) chip->cpsr = 0; if ((chip_info->com_mode == DMA_TRANSFER) && ((pl022->master_info)->enable_dma)) { - chip->enable_dma = 1; + chip->enable_dma = true; dev_dbg(&spi->dev, "DMA mode set in controller state\n"); - status = process_dma_info(chip_info, chip); if (status < 0) goto err_config_params; SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, @@ -1646,7 +1921,7 @@ static int pl022_setup(struct spi_device *spi) SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, SSP_DMACR_MASK_TXDMAE, 1); } else { - chip->enable_dma = 0; + chip->enable_dma = false; dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0); @@ -1654,10 +1929,12 @@ static int pl022_setup(struct spi_device *spi) SSP_DMACR_MASK_TXDMAE, 1); } - chip->cpsr = chip_info->clk_freq.cpsdvsr; + chip->cpsr = clk_freq.cpsdvsr; /* Special setup for the ST micro extended control registers */ if (pl022->vendor->extended_cr) { + u32 etx; + if (pl022->vendor->pl023) { /* These bits are only in the PL023 */ SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, @@ -1673,29 +1950,51 @@ static int pl022_setup(struct spi_device *spi) SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT_ST, 6); } - SSP_WRITE_BITS(chip->cr0, chip_info->data_size, + SSP_WRITE_BITS(chip->cr0, bits - 1, SSP_CR0_MASK_DSS_ST, 0); - SSP_WRITE_BITS(chip->cr1, chip_info->endian_rx, - SSP_CR1_MASK_RENDN_ST, 4); - SSP_WRITE_BITS(chip->cr1, chip_info->endian_tx, - SSP_CR1_MASK_TENDN_ST, 5); + + if (spi->mode & SPI_LSB_FIRST) { + tmp = SSP_RX_LSB; + etx = SSP_TX_LSB; + } else { + tmp = SSP_RX_MSB; + etx = SSP_TX_MSB; + } + SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); + SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL_ST, 7); SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL_ST, 10); } else { - SSP_WRITE_BITS(chip->cr0, chip_info->data_size, + SSP_WRITE_BITS(chip->cr0, bits - 1, SSP_CR0_MASK_DSS, 0); SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 4); } + /* Stuff that is common for all versions */ - SSP_WRITE_BITS(chip->cr0, chip_info->clk_pol, SSP_CR0_MASK_SPO, 6); - SSP_WRITE_BITS(chip->cr0, chip_info->clk_phase, SSP_CR0_MASK_SPH, 7); - SSP_WRITE_BITS(chip->cr0, chip_info->clk_freq.scr, SSP_CR0_MASK_SCR, 8); + if (spi->mode & SPI_CPOL) + tmp = SSP_CLK_POL_IDLE_HIGH; + else + tmp = SSP_CLK_POL_IDLE_LOW; + SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); + + if (spi->mode & SPI_CPHA) + tmp = SSP_CLK_SECOND_EDGE; + else + tmp = SSP_CLK_FIRST_EDGE; + SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); + + SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); /* Loopback is available on all versions except PL023 */ - if (!pl022->vendor->pl023) - SSP_WRITE_BITS(chip->cr1, chip_info->lbm, SSP_CR1_MASK_LBM, 0); + if (!pl022->vendor->pl023) { + if (spi->mode & SPI_LOOP) + tmp = LOOPBACK_ENABLED; + else + tmp = LOOPBACK_DISABLED; + SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); + } SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); @@ -1704,7 +2003,7 @@ static int pl022_setup(struct spi_device *spi) spi_set_ctldata(spi, chip); return status; err_config_params: - err_first_setup: + spi_set_ctldata(spi, NULL); kfree(chip); return status; } @@ -1766,12 +2065,21 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) master->setup = pl022_setup; master->transfer = pl022_transfer; + /* + * Supports mode 0-3, loopback, and active low CS. Transfers are + * always MS bit first on the original pl022. + */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; + if (pl022->vendor->extended_cr) + master->mode_bits |= SPI_LSB_FIRST; + dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); status = amba_request_regions(adev, NULL); if (status) goto err_no_ioregion; + pl022->phybase = adev->res.start; pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); if (pl022->virtbase == NULL) { status = -ENOMEM; @@ -1798,6 +2106,14 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); goto err_no_irq; } + + /* Get DMA channels */ + if (platform_info->enable_dma) { + status = pl022_dma_probe(pl022); + if (status != 0) + goto err_no_dma; + } + /* Initialize and start queue */ status = init_queue(pl022); if (status != 0) { @@ -1826,6 +2142,8 @@ pl022_probe(struct amba_device *adev, struct amba_id *id) err_start_queue: err_init_queue: destroy_queue(pl022); + pl022_dma_remove(pl022); + err_no_dma: free_irq(adev->irq[0], pl022); err_no_irq: clk_put(pl022->clk); @@ -1856,6 +2174,7 @@ pl022_remove(struct amba_device *adev) return status; } load_ssp_default_config(pl022); + pl022_dma_remove(pl022); free_irq(adev->irq[0], pl022); clk_disable(pl022->clk); clk_put(pl022->clk); diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index c4e04428992..154529aacc0 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c @@ -654,6 +654,8 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) struct spi_transfer *xfer; unsigned long flags; struct device *controller = spi->master->dev.parent; + u8 bits; + struct atmel_spi_device *asd; as = spi_master_get_devdata(spi->master); @@ -672,8 +674,18 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) return -EINVAL; } + if (xfer->bits_per_word) { + asd = spi->controller_state; + bits = (asd->csr >> 4) & 0xf; + if (bits != xfer->bits_per_word - 8) { + dev_dbg(&spi->dev, "you can't yet change " + "bits_per_word in transfers\n"); + return -ENOPROTOOPT; + } + } + /* FIXME implement these protocol options!! */ - if (xfer->bits_per_word || xfer->speed_hz) { + if (xfer->speed_hz) { dev_dbg(&spi->dev, "no protocol options yet\n"); return -ENOPROTOOPT; } diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index 56247853c29..90439314cf6 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c @@ -131,6 +131,7 @@ static const struct file_operations mrst_spi_regs_ops = { .owner = THIS_MODULE, .open = spi_show_regs_open, .read = spi_show_regs, + .llseek = default_llseek, }; static int mrst_spi_debugfs_init(struct dw_spi *dws) diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c index b3a94ca0a75..2a651e61bfb 100644 --- a/drivers/spi/omap2_mcspi.c +++ b/drivers/spi/omap2_mcspi.c @@ -296,6 +296,19 @@ static int omap2_mcspi_enable_clocks(struct omap2_mcspi *mcspi) return 0; } +static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(1000); + while (!(__raw_readl(reg) & bit)) { + if (time_after(jiffies, timeout)) + return -1; + cpu_relax(); + } + return 0; +} + static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { @@ -309,11 +322,14 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) u32 l; u8 * rx; const u8 * tx; + void __iomem *chstat_reg; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; l = mcspi_cached_chconf0(spi); + chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; + count = xfer->len; c = count; word_len = cs->word_len; @@ -382,6 +398,16 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) if (tx != NULL) { wait_for_completion(&mcspi_dma->dma_tx_completion); dma_unmap_single(NULL, xfer->tx_dma, count, DMA_TO_DEVICE); + + /* for TX_ONLY mode, be sure all words have shifted out */ + if (rx == NULL) { + if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_TXS) < 0) + dev_err(&spi->dev, "TXS timed out\n"); + else if (mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_EOT) < 0) + dev_err(&spi->dev, "EOT timed out\n"); + } } if (rx != NULL) { @@ -435,19 +461,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) return count; } -static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) -{ - unsigned long timeout; - - timeout = jiffies + msecs_to_jiffies(1000); - while (!(__raw_readl(reg) & bit)) { - if (time_after(jiffies, timeout)) - return -1; - cpu_relax(); - } - return 0; -} - static unsigned omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) { @@ -489,10 +502,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "TXS timed out\n"); goto out; } -#ifdef VERBOSE - dev_dbg(&spi->dev, "write-%d %02x\n", + dev_vdbg(&spi->dev, "write-%d %02x\n", word_len, *tx); -#endif __raw_writel(*tx++, tx_reg); } if (rx != NULL) { @@ -506,10 +517,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) (l & OMAP2_MCSPI_CHCONF_TURBO)) { omap2_mcspi_set_enable(spi, 0); *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %02x\n", + dev_vdbg(&spi->dev, "read-%d %02x\n", word_len, *(rx - 1)); -#endif if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS) < 0) { dev_err(&spi->dev, @@ -522,10 +531,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) } *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %02x\n", + dev_vdbg(&spi->dev, "read-%d %02x\n", word_len, *(rx - 1)); -#endif } } while (c); } else if (word_len <= 16) { @@ -542,10 +549,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "TXS timed out\n"); goto out; } -#ifdef VERBOSE - dev_dbg(&spi->dev, "write-%d %04x\n", + dev_vdbg(&spi->dev, "write-%d %04x\n", word_len, *tx); -#endif __raw_writel(*tx++, tx_reg); } if (rx != NULL) { @@ -559,10 +564,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) (l & OMAP2_MCSPI_CHCONF_TURBO)) { omap2_mcspi_set_enable(spi, 0); *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %04x\n", + dev_vdbg(&spi->dev, "read-%d %04x\n", word_len, *(rx - 1)); -#endif if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS) < 0) { dev_err(&spi->dev, @@ -575,10 +578,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) } *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %04x\n", + dev_vdbg(&spi->dev, "read-%d %04x\n", word_len, *(rx - 1)); -#endif } } while (c); } else if (word_len <= 32) { @@ -595,10 +596,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) dev_err(&spi->dev, "TXS timed out\n"); goto out; } -#ifdef VERBOSE - dev_dbg(&spi->dev, "write-%d %08x\n", + dev_vdbg(&spi->dev, "write-%d %08x\n", word_len, *tx); -#endif __raw_writel(*tx++, tx_reg); } if (rx != NULL) { @@ -612,10 +611,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) (l & OMAP2_MCSPI_CHCONF_TURBO)) { omap2_mcspi_set_enable(spi, 0); *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %08x\n", + dev_vdbg(&spi->dev, "read-%d %08x\n", word_len, *(rx - 1)); -#endif if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS) < 0) { dev_err(&spi->dev, @@ -628,10 +625,8 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) } *rx++ = __raw_readl(rx_reg); -#ifdef VERBOSE - dev_dbg(&spi->dev, "read-%d %08x\n", + dev_vdbg(&spi->dev, "read-%d %08x\n", word_len, *(rx - 1)); -#endif } } while (c); } @@ -644,6 +639,12 @@ omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer) } else if (mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_EOT) < 0) dev_err(&spi->dev, "EOT timed out\n"); + + /* disable chan to purge rx datas received in TX_ONLY transfer, + * otherwise these rx datas will affect the direct following + * RX_ONLY transfer. + */ + omap2_mcspi_set_enable(spi, 0); } out: omap2_mcspi_set_enable(spi, 1); diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c index 3aea50da7b2..0b677dc041a 100644 --- a/drivers/spi/orion_spi.c +++ b/drivers/spi/orion_spi.c @@ -404,7 +404,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) goto msg_rejected; } - if ((t != NULL) && t->bits_per_word) + if (t->bits_per_word) bits_per_word = t->bits_per_word; if ((bits_per_word != 8) && (bits_per_word != 16)) { @@ -415,7 +415,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) goto msg_rejected; } /*make sure buffer length is even when working in 16 bit mode*/ - if ((t != NULL) && (t->bits_per_word == 16) && (t->len & 1)) { + if ((t->bits_per_word == 16) && (t->len & 1)) { dev_err(&spi->dev, "message rejected : " "odd data length (%d) while in 16 bit mode\n", diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 0bcf4c1601a..b5a78a1f442 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -23,6 +23,7 @@ #include <linux/init.h> #include <linux/cache.h> #include <linux/mutex.h> +#include <linux/of_device.h> #include <linux/slab.h> #include <linux/mod_devicetable.h> #include <linux/spi/spi.h> @@ -86,6 +87,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv) const struct spi_device *spi = to_spi_device(dev); const struct spi_driver *sdrv = to_spi_driver(drv); + /* Attempt an OF style match */ + if (of_driver_match_device(dev, drv)) + return 1; + if (sdrv->id_table) return !!spi_match_id(sdrv->id_table, spi); diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c index 10a6dc3d37a..ab483a0ec6d 100644 --- a/drivers/spi/spi_bfin5xx.c +++ b/drivers/spi/spi_bfin5xx.c @@ -1,7 +1,7 @@ /* * Blackfin On-Chip SPI Driver * - * Copyright 2004-2007 Analog Devices Inc. + * Copyright 2004-2010 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * @@ -41,13 +41,16 @@ MODULE_LICENSE("GPL"); #define RUNNING_STATE ((void *)1) #define DONE_STATE ((void *)2) #define ERROR_STATE ((void *)-1) -#define QUEUE_RUNNING 0 -#define QUEUE_STOPPED 1 -/* Value to send if no TX value is supplied */ -#define SPI_IDLE_TXVAL 0x0000 +struct bfin_spi_master_data; -struct driver_data { +struct bfin_spi_transfer_ops { + void (*write) (struct bfin_spi_master_data *); + void (*read) (struct bfin_spi_master_data *); + void (*duplex) (struct bfin_spi_master_data *); +}; + +struct bfin_spi_master_data { /* Driver model hookup */ struct platform_device *pdev; @@ -69,7 +72,7 @@ struct driver_data { spinlock_t lock; struct list_head queue; int busy; - int run; + bool running; /* Message Transfer pump */ struct tasklet_struct pump_transfers; @@ -77,7 +80,7 @@ struct driver_data { /* Current message transfer state info */ struct spi_message *cur_msg; struct spi_transfer *cur_transfer; - struct chip_data *cur_chip; + struct bfin_spi_slave_data *cur_chip; size_t len_in_bytes; size_t len; void *tx; @@ -92,38 +95,37 @@ struct driver_data { dma_addr_t rx_dma; dma_addr_t tx_dma; + int irq_requested; + int spi_irq; + size_t rx_map_len; size_t tx_map_len; u8 n_bytes; + u16 ctrl_reg; + u16 flag_reg; + int cs_change; - void (*write) (struct driver_data *); - void (*read) (struct driver_data *); - void (*duplex) (struct driver_data *); + const struct bfin_spi_transfer_ops *ops; }; -struct chip_data { +struct bfin_spi_slave_data { u16 ctl_reg; u16 baud; u16 flag; u8 chip_select_num; - u8 n_bytes; - u8 width; /* 0 or 1 */ u8 enable_dma; - u8 bits_per_word; /* 8 or 16 */ - u8 cs_change_per_word; u16 cs_chg_udelay; /* Some devices require > 255usec delay */ u32 cs_gpio; u16 idle_tx_val; - void (*write) (struct driver_data *); - void (*read) (struct driver_data *); - void (*duplex) (struct driver_data *); + u8 pio_interrupt; /* use spi data irq */ + const struct bfin_spi_transfer_ops *ops; }; #define DEFINE_SPI_REG(reg, off) \ -static inline u16 read_##reg(struct driver_data *drv_data) \ +static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \ { return bfin_read16(drv_data->regs_base + off); } \ -static inline void write_##reg(struct driver_data *drv_data, u16 v) \ +static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \ { bfin_write16(drv_data->regs_base + off, v); } DEFINE_SPI_REG(CTRL, 0x00) @@ -134,7 +136,7 @@ DEFINE_SPI_REG(RDBR, 0x10) DEFINE_SPI_REG(BAUD, 0x14) DEFINE_SPI_REG(SHAW, 0x18) -static void bfin_spi_enable(struct driver_data *drv_data) +static void bfin_spi_enable(struct bfin_spi_master_data *drv_data) { u16 cr; @@ -142,7 +144,7 @@ static void bfin_spi_enable(struct driver_data *drv_data) write_CTRL(drv_data, (cr | BIT_CTL_ENABLE)); } -static void bfin_spi_disable(struct driver_data *drv_data) +static void bfin_spi_disable(struct bfin_spi_master_data *drv_data) { u16 cr; @@ -165,7 +167,7 @@ static u16 hz_to_spi_baud(u32 speed_hz) return spi_baud; } -static int bfin_spi_flush(struct driver_data *drv_data) +static int bfin_spi_flush(struct bfin_spi_master_data *drv_data) { unsigned long limit = loops_per_jiffy << 1; @@ -179,13 +181,12 @@ static int bfin_spi_flush(struct driver_data *drv_data) } /* Chip select operation functions for cs_change flag */ -static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *chip) +static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip) { - if (likely(chip->chip_select_num)) { + if (likely(chip->chip_select_num < MAX_CTRL_CS)) { u16 flag = read_FLAG(drv_data); - flag |= chip->flag; - flag &= ~(chip->flag << 8); + flag &= ~chip->flag; write_FLAG(drv_data, flag); } else { @@ -193,13 +194,13 @@ static void bfin_spi_cs_active(struct driver_data *drv_data, struct chip_data *c } } -static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data *chip) +static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) { - if (likely(chip->chip_select_num)) { + if (likely(chip->chip_select_num < MAX_CTRL_CS)) { u16 flag = read_FLAG(drv_data); - flag &= ~chip->flag; - flag |= (chip->flag << 8); + flag |= chip->flag; write_FLAG(drv_data, flag); } else { @@ -211,16 +212,43 @@ static void bfin_spi_cs_deactive(struct driver_data *drv_data, struct chip_data udelay(chip->cs_chg_udelay); } +/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ +static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) +{ + if (chip->chip_select_num < MAX_CTRL_CS) { + u16 flag = read_FLAG(drv_data); + + flag |= (chip->flag >> 8); + + write_FLAG(drv_data, flag); + } +} + +static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data, + struct bfin_spi_slave_data *chip) +{ + if (chip->chip_select_num < MAX_CTRL_CS) { + u16 flag = read_FLAG(drv_data); + + flag &= ~(chip->flag >> 8); + + write_FLAG(drv_data, flag); + } +} + /* stop controller and re-config current chip*/ -static void bfin_spi_restore_state(struct driver_data *drv_data) +static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data) { - struct chip_data *chip = drv_data->cur_chip; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; /* Clear status and disable clock */ write_STAT(drv_data, BIT_STAT_CLR); bfin_spi_disable(drv_data); dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n"); + SSYNC(); + /* Load the registers */ write_CTRL(drv_data, chip->ctl_reg); write_BAUD(drv_data, chip->baud); @@ -230,49 +258,12 @@ static void bfin_spi_restore_state(struct driver_data *drv_data) } /* used to kick off transfer in rx mode and read unwanted RX data */ -static inline void bfin_spi_dummy_read(struct driver_data *drv_data) +static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data) { (void) read_RDBR(drv_data); } -static void bfin_spi_null_writer(struct driver_data *drv_data) -{ - u8 n_bytes = drv_data->n_bytes; - u16 tx_val = drv_data->cur_chip->idle_tx_val; - - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - write_TDBR(drv_data, tx_val); - drv_data->tx += n_bytes; - /* wait until transfer finished. - checking SPIF or TXS may not guarantee transfer completion */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - /* discard RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - } -} - -static void bfin_spi_null_reader(struct driver_data *drv_data) -{ - u8 n_bytes = drv_data->n_bytes; - u16 tx_val = drv_data->cur_chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - write_TDBR(drv_data, tx_val); - drv_data->rx += n_bytes; - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - bfin_spi_dummy_read(drv_data); - } -} - -static void bfin_spi_u8_writer(struct driver_data *drv_data) +static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data) { /* clear RXS (we check for RXS inside the loop) */ bfin_spi_dummy_read(drv_data); @@ -288,25 +279,7 @@ static void bfin_spi_u8_writer(struct driver_data *drv_data) } } -static void bfin_spi_u8_cs_chg_writer(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); - /* make sure transfer finished before deactiving CS */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - bfin_spi_dummy_read(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u8_reader(struct driver_data *drv_data) +static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; @@ -321,25 +294,7 @@ static void bfin_spi_u8_reader(struct driver_data *drv_data) } } -static void bfin_spi_u8_cs_chg_reader(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - u16 tx_val = chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, tx_val); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u8_duplex(struct driver_data *drv_data) +static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data) { /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); @@ -352,24 +307,13 @@ static void bfin_spi_u8_duplex(struct driver_data *drv_data) } } -static void bfin_spi_u8_cs_chg_duplex(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u8 *) (drv_data->tx++))); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u8 *) (drv_data->rx++) = read_RDBR(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} +static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { + .write = bfin_spi_u8_writer, + .read = bfin_spi_u8_reader, + .duplex = bfin_spi_u8_duplex, +}; -static void bfin_spi_u16_writer(struct driver_data *drv_data) +static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data) { /* clear RXS (we check for RXS inside the loop) */ bfin_spi_dummy_read(drv_data); @@ -386,26 +330,7 @@ static void bfin_spi_u16_writer(struct driver_data *drv_data) } } -static void bfin_spi_u16_cs_chg_writer(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* clear RXS (we check for RXS inside the loop) */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->tx < drv_data->tx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); - drv_data->tx += 2; - /* make sure transfer finished before deactiving CS */ - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - bfin_spi_dummy_read(drv_data); - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u16_reader(struct driver_data *drv_data) +static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data) { u16 tx_val = drv_data->cur_chip->idle_tx_val; @@ -421,26 +346,7 @@ static void bfin_spi_u16_reader(struct driver_data *drv_data) } } -static void bfin_spi_u16_cs_chg_reader(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - u16 tx_val = chip->idle_tx_val; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, tx_val); - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); - drv_data->rx += 2; - bfin_spi_cs_deactive(drv_data, chip); - } -} - -static void bfin_spi_u16_duplex(struct driver_data *drv_data) +static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data) { /* discard old RX data and clear RXS */ bfin_spi_dummy_read(drv_data); @@ -455,27 +361,14 @@ static void bfin_spi_u16_duplex(struct driver_data *drv_data) } } -static void bfin_spi_u16_cs_chg_duplex(struct driver_data *drv_data) -{ - struct chip_data *chip = drv_data->cur_chip; - - /* discard old RX data and clear RXS */ - bfin_spi_dummy_read(drv_data); - - while (drv_data->rx < drv_data->rx_end) { - bfin_spi_cs_active(drv_data, chip); - write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); - drv_data->tx += 2; - while (!(read_STAT(drv_data) & BIT_STAT_RXS)) - cpu_relax(); - *(u16 *) (drv_data->rx) = read_RDBR(drv_data); - drv_data->rx += 2; - bfin_spi_cs_deactive(drv_data, chip); - } -} +static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { + .write = bfin_spi_u16_writer, + .read = bfin_spi_u16_reader, + .duplex = bfin_spi_u16_duplex, +}; -/* test if ther is more transfer to be done */ -static void *bfin_spi_next_transfer(struct driver_data *drv_data) +/* test if there is more transfer to be done */ +static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data) { struct spi_message *msg = drv_data->cur_msg; struct spi_transfer *trans = drv_data->cur_transfer; @@ -494,9 +387,9 @@ static void *bfin_spi_next_transfer(struct driver_data *drv_data) * caller already set message->status; * dma and pio irqs are blocked give finished message back */ -static void bfin_spi_giveback(struct driver_data *drv_data) +static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data) { - struct chip_data *chip = drv_data->cur_chip; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; struct spi_transfer *last_transfer; unsigned long flags; struct spi_message *msg; @@ -525,10 +418,83 @@ static void bfin_spi_giveback(struct driver_data *drv_data) msg->complete(msg->context); } +/* spi data irq handler */ +static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) +{ + struct bfin_spi_master_data *drv_data = dev_id; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; + struct spi_message *msg = drv_data->cur_msg; + int n_bytes = drv_data->n_bytes; + + /* wait until transfer finished. */ + while (!(read_STAT(drv_data) & BIT_STAT_RXS)) + cpu_relax(); + + if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || + (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) { + /* last read */ + if (drv_data->rx) { + dev_dbg(&drv_data->pdev->dev, "last read\n"); + if (n_bytes == 2) + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + else if (n_bytes == 1) + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); + drv_data->rx += n_bytes; + } + + msg->actual_length += drv_data->len_in_bytes; + if (drv_data->cs_change) + bfin_spi_cs_deactive(drv_data, chip); + /* Move to next transfer */ + msg->state = bfin_spi_next_transfer(drv_data); + + disable_irq_nosync(drv_data->spi_irq); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + return IRQ_HANDLED; + } + + if (drv_data->rx && drv_data->tx) { + /* duplex */ + dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); + if (drv_data->n_bytes == 2) { + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + } else if (drv_data->n_bytes == 1) { + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + } + } else if (drv_data->rx) { + /* read */ + dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); + if (drv_data->n_bytes == 2) + *(u16 *) (drv_data->rx) = read_RDBR(drv_data); + else if (drv_data->n_bytes == 1) + *(u8 *) (drv_data->rx) = read_RDBR(drv_data); + write_TDBR(drv_data, chip->idle_tx_val); + } else if (drv_data->tx) { + /* write */ + dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); + bfin_spi_dummy_read(drv_data); + if (drv_data->n_bytes == 2) + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + else if (drv_data->n_bytes == 1) + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + } + + if (drv_data->tx) + drv_data->tx += n_bytes; + if (drv_data->rx) + drv_data->rx += n_bytes; + + return IRQ_HANDLED; +} + static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) { - struct driver_data *drv_data = dev_id; - struct chip_data *chip = drv_data->cur_chip; + struct bfin_spi_master_data *drv_data = dev_id; + struct bfin_spi_slave_data *chip = drv_data->cur_chip; struct spi_message *msg = drv_data->cur_msg; unsigned long timeout; unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel); @@ -540,10 +506,6 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) clear_dma_irqstat(drv_data->dma_channel); - /* Wait for DMA to complete */ - while (get_dma_curr_irqstat(drv_data->dma_channel) & DMA_RUN) - cpu_relax(); - /* * wait for the last transaction shifted out. HRM states: * at this point there may still be data in the SPI DMA FIFO waiting @@ -551,8 +513,8 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) * register until it goes low for 2 successive reads */ if (drv_data->tx != NULL) { - while ((read_STAT(drv_data) & TXS) || - (read_STAT(drv_data) & TXS)) + while ((read_STAT(drv_data) & BIT_STAT_TXS) || + (read_STAT(drv_data) & BIT_STAT_TXS)) cpu_relax(); } @@ -561,14 +523,14 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) dmastat, read_STAT(drv_data)); timeout = jiffies + HZ; - while (!(read_STAT(drv_data) & SPIF)) + while (!(read_STAT(drv_data) & BIT_STAT_SPIF)) if (!time_before(jiffies, timeout)) { dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF"); break; } else cpu_relax(); - if ((dmastat & DMA_ERR) && (spistat & RBSY)) { + if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) { msg->state = ERROR_STATE; dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n"); } else { @@ -588,20 +550,20 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) dev_dbg(&drv_data->pdev->dev, "disable dma channel irq%d\n", drv_data->dma_channel); - dma_disable_irq(drv_data->dma_channel); + dma_disable_irq_nosync(drv_data->dma_channel); return IRQ_HANDLED; } static void bfin_spi_pump_transfers(unsigned long data) { - struct driver_data *drv_data = (struct driver_data *)data; + struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; - struct chip_data *chip = NULL; - u8 width; - u16 cr, dma_width, dma_config; + struct bfin_spi_slave_data *chip = NULL; + unsigned int bits_per_word; + u16 cr, cr_width, dma_width, dma_config; u32 tranf_success = 1; u8 full_duplex = 0; @@ -639,7 +601,7 @@ static void bfin_spi_pump_transfers(unsigned long data) udelay(previous->delay_usecs); } - /* Setup the transfer state based on the type of transfer */ + /* Flush any existing transfers that may be sitting in the hardware */ if (bfin_spi_flush(drv_data) == 0) { dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); message->status = -EIO; @@ -679,52 +641,31 @@ static void bfin_spi_pump_transfers(unsigned long data) drv_data->cs_change = transfer->cs_change; /* Bits per word setup */ - switch (transfer->bits_per_word) { - case 8: + bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; + if (bits_per_word == 8) { drv_data->n_bytes = 1; - width = CFG_SPI_WORDSIZE8; - drv_data->read = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; - drv_data->write = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer; - drv_data->duplex = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex; - break; - - case 16: + drv_data->len = transfer->len; + cr_width = 0; + drv_data->ops = &bfin_bfin_spi_transfer_ops_u8; + } else if (bits_per_word == 16) { drv_data->n_bytes = 2; - width = CFG_SPI_WORDSIZE16; - drv_data->read = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader; - drv_data->write = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer; - drv_data->duplex = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex; - break; - - default: - /* No change, the same as default setting */ - drv_data->n_bytes = chip->n_bytes; - width = chip->width; - drv_data->write = drv_data->tx ? chip->write : bfin_spi_null_writer; - drv_data->read = drv_data->rx ? chip->read : bfin_spi_null_reader; - drv_data->duplex = chip->duplex ? chip->duplex : bfin_spi_null_writer; - break; - } - cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); - cr |= (width << 8); - write_CTRL(drv_data, cr); - - if (width == CFG_SPI_WORDSIZE16) { drv_data->len = (transfer->len) >> 1; + cr_width = BIT_CTL_WORDSIZE; + drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; } else { - drv_data->len = transfer->len; + dev_err(&drv_data->pdev->dev, "transfer: unsupported bits_per_word\n"); + message->status = -EINVAL; + bfin_spi_giveback(drv_data); + return; } + cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE); + cr |= cr_width; + write_CTRL(drv_data, cr); + dev_dbg(&drv_data->pdev->dev, - "transfer: drv_data->write is %p, chip->write is %p, null_wr is %p\n", - drv_data->write, chip->write, bfin_spi_null_writer); + "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n", + drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8); - /* speed and width has been set on per message */ message->state = RUNNING_STATE; dma_config = 0; @@ -735,13 +676,11 @@ static void bfin_spi_pump_transfers(unsigned long data) write_BAUD(drv_data, chip->baud); write_STAT(drv_data, BIT_STAT_CLR); - cr = (read_CTRL(drv_data) & (~BIT_CTL_TIMOD)); - if (drv_data->cs_change) - bfin_spi_cs_active(drv_data, chip); + bfin_spi_cs_active(drv_data, chip); dev_dbg(&drv_data->pdev->dev, "now pumping a transfer: width is %d, len is %d\n", - width, transfer->len); + cr_width, transfer->len); /* * Try to map dma buffer and do a dma transfer. If successful use, @@ -760,7 +699,7 @@ static void bfin_spi_pump_transfers(unsigned long data) /* config dma channel */ dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n"); set_dma_x_count(drv_data->dma_channel, drv_data->len); - if (width == CFG_SPI_WORDSIZE16) { + if (cr_width == BIT_CTL_WORDSIZE) { set_dma_x_modify(drv_data->dma_channel, 2); dma_width = WDSIZE_16; } else { @@ -846,73 +785,100 @@ static void bfin_spi_pump_transfers(unsigned long data) dma_enable_irq(drv_data->dma_channel); local_irq_restore(flags); - } else { - /* IO mode write then read */ - dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); - - /* we always use SPI_WRITE mode. SPI_READ mode - seems to have problems with setting up the - output value in TDBR prior to the transfer. */ - write_CTRL(drv_data, (cr | CFG_SPI_WRITE)); - - if (full_duplex) { - /* full duplex mode */ - BUG_ON((drv_data->tx_end - drv_data->tx) != - (drv_data->rx_end - drv_data->rx)); - dev_dbg(&drv_data->pdev->dev, - "IO duplex: cr is 0x%x\n", cr); - - drv_data->duplex(drv_data); + return; + } - if (drv_data->tx != drv_data->tx_end) - tranf_success = 0; - } else if (drv_data->tx != NULL) { - /* write only half duplex */ - dev_dbg(&drv_data->pdev->dev, - "IO write: cr is 0x%x\n", cr); + /* + * We always use SPI_WRITE mode (transfer starts with TDBR write). + * SPI_READ mode (transfer starts with RDBR read) seems to have + * problems with setting up the output value in TDBR prior to the + * start of the transfer. + */ + write_CTRL(drv_data, cr | BIT_CTL_TXMOD); - drv_data->write(drv_data); + if (chip->pio_interrupt) { + /* SPI irq should have been disabled by now */ - if (drv_data->tx != drv_data->tx_end) - tranf_success = 0; - } else if (drv_data->rx != NULL) { - /* read only half duplex */ - dev_dbg(&drv_data->pdev->dev, - "IO read: cr is 0x%x\n", cr); + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); - drv_data->read(drv_data); - if (drv_data->rx != drv_data->rx_end) - tranf_success = 0; + /* start transfer */ + if (drv_data->tx == NULL) + write_TDBR(drv_data, chip->idle_tx_val); + else { + if (bits_per_word == 8) + write_TDBR(drv_data, (*(u8 *) (drv_data->tx))); + else + write_TDBR(drv_data, (*(u16 *) (drv_data->tx))); + drv_data->tx += drv_data->n_bytes; } - if (!tranf_success) { - dev_dbg(&drv_data->pdev->dev, - "IO write error!\n"); - message->state = ERROR_STATE; - } else { - /* Update total byte transfered */ - message->actual_length += drv_data->len_in_bytes; - /* Move to next transfer of this msg */ - message->state = bfin_spi_next_transfer(drv_data); - if (drv_data->cs_change) - bfin_spi_cs_deactive(drv_data, chip); - } - /* Schedule next transfer tasklet */ - tasklet_schedule(&drv_data->pump_transfers); + /* once TDBR is empty, interrupt is triggered */ + enable_irq(drv_data->spi_irq); + return; + } + + /* IO mode */ + dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n"); + + if (full_duplex) { + /* full duplex mode */ + BUG_ON((drv_data->tx_end - drv_data->tx) != + (drv_data->rx_end - drv_data->rx)); + dev_dbg(&drv_data->pdev->dev, + "IO duplex: cr is 0x%x\n", cr); + + drv_data->ops->duplex(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->tx != NULL) { + /* write only half duplex */ + dev_dbg(&drv_data->pdev->dev, + "IO write: cr is 0x%x\n", cr); + + drv_data->ops->write(drv_data); + + if (drv_data->tx != drv_data->tx_end) + tranf_success = 0; + } else if (drv_data->rx != NULL) { + /* read only half duplex */ + dev_dbg(&drv_data->pdev->dev, + "IO read: cr is 0x%x\n", cr); + + drv_data->ops->read(drv_data); + if (drv_data->rx != drv_data->rx_end) + tranf_success = 0; + } + + if (!tranf_success) { + dev_dbg(&drv_data->pdev->dev, + "IO write error!\n"); + message->state = ERROR_STATE; + } else { + /* Update total byte transfered */ + message->actual_length += drv_data->len_in_bytes; + /* Move to next transfer of this msg */ + message->state = bfin_spi_next_transfer(drv_data); + if (drv_data->cs_change) + bfin_spi_cs_deactive(drv_data, chip); } + + /* Schedule next transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); } /* pop a msg from queue and kick off real transfer */ static void bfin_spi_pump_messages(struct work_struct *work) { - struct driver_data *drv_data; + struct bfin_spi_master_data *drv_data; unsigned long flags; - drv_data = container_of(work, struct driver_data, pump_messages); + drv_data = container_of(work, struct bfin_spi_master_data, pump_messages); /* Lock queue and check for queue work */ spin_lock_irqsave(&drv_data->lock, flags); - if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { + if (list_empty(&drv_data->queue) || !drv_data->running) { /* pumper kicked off but no work to do */ drv_data->busy = 0; spin_unlock_irqrestore(&drv_data->lock, flags); @@ -962,12 +928,12 @@ static void bfin_spi_pump_messages(struct work_struct *work) */ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) { - struct driver_data *drv_data = spi_master_get_devdata(spi->master); + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); - if (drv_data->run == QUEUE_STOPPED) { + if (!drv_data->running) { spin_unlock_irqrestore(&drv_data->lock, flags); return -ESHUTDOWN; } @@ -979,7 +945,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg) dev_dbg(&spi->dev, "adding an msg in transfer() \n"); list_add_tail(&msg->queue, &drv_data->queue); - if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) + if (drv_data->running && !drv_data->busy) queue_work(drv_data->workqueue, &drv_data->pump_messages); spin_unlock_irqrestore(&drv_data->lock, flags); @@ -1003,147 +969,184 @@ static u16 ssel[][MAX_SPI_SSEL] = { P_SPI2_SSEL6, P_SPI2_SSEL7}, }; -/* first setup for new devices */ +/* setup for devices (may be called multiple times -- not just first setup) */ static int bfin_spi_setup(struct spi_device *spi) { - struct bfin5xx_spi_chip *chip_info = NULL; - struct chip_data *chip; - struct driver_data *drv_data = spi_master_get_devdata(spi->master); - int ret; - - if (spi->bits_per_word != 8 && spi->bits_per_word != 16) - return -EINVAL; + struct bfin5xx_spi_chip *chip_info; + struct bfin_spi_slave_data *chip = NULL; + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); + u16 bfin_ctl_reg; + int ret = -EINVAL; /* Only alloc (or use chip_info) on first setup */ + chip_info = NULL; chip = spi_get_ctldata(spi); if (chip == NULL) { - chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); - if (!chip) - return -ENOMEM; + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) { + dev_err(&spi->dev, "cannot allocate chip data\n"); + ret = -ENOMEM; + goto error; + } chip->enable_dma = 0; chip_info = spi->controller_data; } + /* Let people set non-standard bits directly */ + bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO | + BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ; + /* chip_info isn't always needed */ if (chip_info) { /* Make sure people stop trying to set fields via ctl_reg * when they should actually be using common SPI framework. - * Currently we let through: WOM EMISO PSSE GM SZ TIMOD. + * Currently we let through: WOM EMISO PSSE GM SZ. * Not sure if a user actually needs/uses any of these, * but let's assume (for now) they do. */ - if (chip_info->ctl_reg & (SPE|MSTR|CPOL|CPHA|LSBF|SIZE)) { + if (chip_info->ctl_reg & ~bfin_ctl_reg) { dev_err(&spi->dev, "do not set bits in ctl_reg " "that the SPI framework manages\n"); - return -EINVAL; + goto error; } - chip->enable_dma = chip_info->enable_dma != 0 && drv_data->master_info->enable_dma; chip->ctl_reg = chip_info->ctl_reg; - chip->bits_per_word = chip_info->bits_per_word; - chip->cs_change_per_word = chip_info->cs_change_per_word; chip->cs_chg_udelay = chip_info->cs_chg_udelay; - chip->cs_gpio = chip_info->cs_gpio; chip->idle_tx_val = chip_info->idle_tx_val; + chip->pio_interrupt = chip_info->pio_interrupt; + spi->bits_per_word = chip_info->bits_per_word; + } else { + /* force a default base state */ + chip->ctl_reg &= bfin_ctl_reg; + } + + if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { + dev_err(&spi->dev, "%d bits_per_word is not supported\n", + spi->bits_per_word); + goto error; } /* translate common spi framework into our register */ + if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) { + dev_err(&spi->dev, "unsupported spi modes detected\n"); + goto error; + } if (spi->mode & SPI_CPOL) - chip->ctl_reg |= CPOL; + chip->ctl_reg |= BIT_CTL_CPOL; if (spi->mode & SPI_CPHA) - chip->ctl_reg |= CPHA; + chip->ctl_reg |= BIT_CTL_CPHA; if (spi->mode & SPI_LSB_FIRST) - chip->ctl_reg |= LSBF; + chip->ctl_reg |= BIT_CTL_LSBF; /* we dont support running in slave mode (yet?) */ - chip->ctl_reg |= MSTR; + chip->ctl_reg |= BIT_CTL_MASTER; /* + * Notice: for blackfin, the speed_hz is the value of register + * SPI_BAUD, not the real baudrate + */ + chip->baud = hz_to_spi_baud(spi->max_speed_hz); + chip->chip_select_num = spi->chip_select; + if (chip->chip_select_num < MAX_CTRL_CS) { + if (!(spi->mode & SPI_CPHA)) + dev_warn(&spi->dev, "Warning: SPI CPHA not set:" + " Slave Select not under software control!\n" + " See Documentation/blackfin/bfin-spi-notes.txt"); + + chip->flag = (1 << spi->chip_select) << 8; + } else + chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS; + + if (chip->enable_dma && chip->pio_interrupt) { + dev_err(&spi->dev, "enable_dma is set, " + "do not set pio_interrupt\n"); + goto error; + } + /* * if any one SPI chip is registered and wants DMA, request the * DMA channel for it */ if (chip->enable_dma && !drv_data->dma_requested) { /* register dma irq handler */ - if (request_dma(drv_data->dma_channel, "BFIN_SPI_DMA") < 0) { - dev_dbg(&spi->dev, + ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA"); + if (ret) { + dev_err(&spi->dev, "Unable to request BlackFin SPI DMA channel\n"); - return -ENODEV; + goto error; } - if (set_dma_callback(drv_data->dma_channel, - bfin_spi_dma_irq_handler, drv_data) < 0) { - dev_dbg(&spi->dev, "Unable to set dma callback\n"); - return -EPERM; + drv_data->dma_requested = 1; + + ret = set_dma_callback(drv_data->dma_channel, + bfin_spi_dma_irq_handler, drv_data); + if (ret) { + dev_err(&spi->dev, "Unable to set dma callback\n"); + goto error; } dma_disable_irq(drv_data->dma_channel); - drv_data->dma_requested = 1; } - /* - * Notice: for blackfin, the speed_hz is the value of register - * SPI_BAUD, not the real baudrate - */ - chip->baud = hz_to_spi_baud(spi->max_speed_hz); - chip->flag = 1 << (spi->chip_select); - chip->chip_select_num = spi->chip_select; + if (chip->pio_interrupt && !drv_data->irq_requested) { + ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler, + IRQF_DISABLED, "BFIN_SPI", drv_data); + if (ret) { + dev_err(&spi->dev, "Unable to register spi IRQ\n"); + goto error; + } + drv_data->irq_requested = 1; + /* we use write mode, spi irq has to be disabled here */ + disable_irq(drv_data->spi_irq); + } - if (chip->chip_select_num == 0) { + if (chip->chip_select_num >= MAX_CTRL_CS) { ret = gpio_request(chip->cs_gpio, spi->modalias); if (ret) { - if (drv_data->dma_requested) - free_dma(drv_data->dma_channel); - return ret; + dev_err(&spi->dev, "gpio_request() error\n"); + goto pin_error; } gpio_direction_output(chip->cs_gpio, 1); } - switch (chip->bits_per_word) { - case 8: - chip->n_bytes = 1; - chip->width = CFG_SPI_WORDSIZE8; - chip->read = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_reader : bfin_spi_u8_reader; - chip->write = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_writer : bfin_spi_u8_writer; - chip->duplex = chip->cs_change_per_word ? - bfin_spi_u8_cs_chg_duplex : bfin_spi_u8_duplex; - break; - - case 16: - chip->n_bytes = 2; - chip->width = CFG_SPI_WORDSIZE16; - chip->read = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_reader : bfin_spi_u16_reader; - chip->write = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_writer : bfin_spi_u16_writer; - chip->duplex = chip->cs_change_per_word ? - bfin_spi_u16_cs_chg_duplex : bfin_spi_u16_duplex; - break; - - default: - dev_err(&spi->dev, "%d bits_per_word is not supported\n", - chip->bits_per_word); - if (chip_info) - kfree(chip); - return -ENODEV; - } - dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", - spi->modalias, chip->width, chip->enable_dma); + spi->modalias, spi->bits_per_word, chip->enable_dma); dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n", chip->ctl_reg, chip->flag); spi_set_ctldata(spi, chip); dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num); - if ((chip->chip_select_num > 0) - && (chip->chip_select_num <= spi->master->num_chipselect)) - peripheral_request(ssel[spi->master->bus_num] - [chip->chip_select_num-1], spi->modalias); + if (chip->chip_select_num < MAX_CTRL_CS) { + ret = peripheral_request(ssel[spi->master->bus_num] + [chip->chip_select_num-1], spi->modalias); + if (ret) { + dev_err(&spi->dev, "peripheral_request() error\n"); + goto pin_error; + } + } + bfin_spi_cs_enable(drv_data, chip); bfin_spi_cs_deactive(drv_data, chip); return 0; + + pin_error: + if (chip->chip_select_num >= MAX_CTRL_CS) + gpio_free(chip->cs_gpio); + else + peripheral_free(ssel[spi->master->bus_num] + [chip->chip_select_num - 1]); + error: + if (chip) { + if (drv_data->dma_requested) + free_dma(drv_data->dma_channel); + drv_data->dma_requested = 0; + + kfree(chip); + /* prevent free 'chip' twice */ + spi_set_ctldata(spi, NULL); + } + + return ret; } /* @@ -1152,28 +1155,30 @@ static int bfin_spi_setup(struct spi_device *spi) */ static void bfin_spi_cleanup(struct spi_device *spi) { - struct chip_data *chip = spi_get_ctldata(spi); + struct bfin_spi_slave_data *chip = spi_get_ctldata(spi); + struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master); if (!chip) return; - if ((chip->chip_select_num > 0) - && (chip->chip_select_num <= spi->master->num_chipselect)) + if (chip->chip_select_num < MAX_CTRL_CS) { peripheral_free(ssel[spi->master->bus_num] [chip->chip_select_num-1]); - - if (chip->chip_select_num == 0) + bfin_spi_cs_disable(drv_data, chip); + } else gpio_free(chip->cs_gpio); kfree(chip); + /* prevent free 'chip' twice */ + spi_set_ctldata(spi, NULL); } -static inline int bfin_spi_init_queue(struct driver_data *drv_data) +static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data) { INIT_LIST_HEAD(&drv_data->queue); spin_lock_init(&drv_data->lock); - drv_data->run = QUEUE_STOPPED; + drv_data->running = false; drv_data->busy = 0; /* init transfer tasklet */ @@ -1190,18 +1195,18 @@ static inline int bfin_spi_init_queue(struct driver_data *drv_data) return 0; } -static inline int bfin_spi_start_queue(struct driver_data *drv_data) +static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data) { unsigned long flags; spin_lock_irqsave(&drv_data->lock, flags); - if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { + if (drv_data->running || drv_data->busy) { spin_unlock_irqrestore(&drv_data->lock, flags); return -EBUSY; } - drv_data->run = QUEUE_RUNNING; + drv_data->running = true; drv_data->cur_msg = NULL; drv_data->cur_transfer = NULL; drv_data->cur_chip = NULL; @@ -1212,7 +1217,7 @@ static inline int bfin_spi_start_queue(struct driver_data *drv_data) return 0; } -static inline int bfin_spi_stop_queue(struct driver_data *drv_data) +static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data) { unsigned long flags; unsigned limit = 500; @@ -1226,7 +1231,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data) * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ - drv_data->run = QUEUE_STOPPED; + drv_data->running = false; while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { spin_unlock_irqrestore(&drv_data->lock, flags); msleep(10); @@ -1241,7 +1246,7 @@ static inline int bfin_spi_stop_queue(struct driver_data *drv_data) return status; } -static inline int bfin_spi_destroy_queue(struct driver_data *drv_data) +static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data) { int status; @@ -1259,14 +1264,14 @@ static int __init bfin_spi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct bfin5xx_spi_master *platform_info; struct spi_master *master; - struct driver_data *drv_data = 0; + struct bfin_spi_master_data *drv_data; struct resource *res; int status = 0; platform_info = dev->platform_data; /* Allocate master with space for drv_data */ - master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); + master = spi_alloc_master(dev, sizeof(*drv_data)); if (!master) { dev_err(&pdev->dev, "can not alloc spi_master\n"); return -ENOMEM; @@ -1302,11 +1307,19 @@ static int __init bfin_spi_probe(struct platform_device *pdev) goto out_error_ioremap; } - drv_data->dma_channel = platform_get_irq(pdev, 0); - if (drv_data->dma_channel < 0) { + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (res == NULL) { dev_err(dev, "No DMA channel specified\n"); status = -ENOENT; - goto out_error_no_dma_ch; + goto out_error_free_io; + } + drv_data->dma_channel = res->start; + + drv_data->spi_irq = platform_get_irq(pdev, 0); + if (drv_data->spi_irq < 0) { + dev_err(dev, "No spi pio irq specified\n"); + status = -ENOENT; + goto out_error_free_io; } /* Initial and start queue */ @@ -1328,6 +1341,12 @@ static int __init bfin_spi_probe(struct platform_device *pdev) goto out_error_queue_alloc; } + /* Reset SPI registers. If these registers were used by the boot loader, + * the sky may fall on your head if you enable the dma controller. + */ + write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); + write_FLAG(drv_data, 0xFF00); + /* Register with the SPI framework */ platform_set_drvdata(pdev, drv_data); status = spi_register_master(master); @@ -1343,7 +1362,7 @@ static int __init bfin_spi_probe(struct platform_device *pdev) out_error_queue_alloc: bfin_spi_destroy_queue(drv_data); -out_error_no_dma_ch: +out_error_free_io: iounmap((void *) drv_data->regs_base); out_error_ioremap: out_error_get_res: @@ -1355,7 +1374,7 @@ out_error_get_res: /* stop hardware and remove the driver */ static int __devexit bfin_spi_remove(struct platform_device *pdev) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; if (!drv_data) @@ -1375,6 +1394,11 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev) free_dma(drv_data->dma_channel); } + if (drv_data->irq_requested) { + free_irq(drv_data->spi_irq, drv_data); + drv_data->irq_requested = 0; + } + /* Disconnect from the SPI framework */ spi_unregister_master(drv_data->master); @@ -1389,26 +1413,32 @@ static int __devexit bfin_spi_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; status = bfin_spi_stop_queue(drv_data); if (status != 0) return status; - /* stop hardware */ - bfin_spi_disable(drv_data); + drv_data->ctrl_reg = read_CTRL(drv_data); + drv_data->flag_reg = read_FLAG(drv_data); + + /* + * reset SPI_CTL and SPI_FLG registers + */ + write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER); + write_FLAG(drv_data, 0xFF00); return 0; } static int bfin_spi_resume(struct platform_device *pdev) { - struct driver_data *drv_data = platform_get_drvdata(pdev); + struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev); int status = 0; - /* Enable the SPI interface */ - bfin_spi_enable(drv_data); + write_CTRL(drv_data, drv_data->ctrl_reg); + write_FLAG(drv_data, drv_data->flag_reg); /* Start the queue running */ status = bfin_spi_start_queue(drv_data); @@ -1439,7 +1469,7 @@ static int __init bfin_spi_init(void) { return platform_driver_probe(&bfin_spi_driver, bfin_spi_probe); } -module_init(bfin_spi_init); +subsys_initcall(bfin_spi_init); static void __exit bfin_spi_exit(void) { diff --git a/drivers/spi/spi_fsl_espi.c b/drivers/spi/spi_fsl_espi.c new file mode 100644 index 00000000000..e3b4f645196 --- /dev/null +++ b/drivers/spi/spi_fsl_espi.c @@ -0,0 +1,748 @@ +/* + * Freescale eSPI controller driver. + * + * Copyright 2010 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/spi/spi.h> +#include <linux/platform_device.h> +#include <linux/fsl_devices.h> +#include <linux/mm.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_spi.h> +#include <linux/interrupt.h> +#include <linux/err.h> +#include <sysdev/fsl_soc.h> + +#include "spi_fsl_lib.h" + +/* eSPI Controller registers */ +struct fsl_espi_reg { + __be32 mode; /* 0x000 - eSPI mode register */ + __be32 event; /* 0x004 - eSPI event register */ + __be32 mask; /* 0x008 - eSPI mask register */ + __be32 command; /* 0x00c - eSPI command register */ + __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/ + __be32 receive; /* 0x014 - eSPI receive FIFO access register*/ + u8 res[8]; /* 0x018 - 0x01c reserved */ + __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */ +}; + +struct fsl_espi_transfer { + const void *tx_buf; + void *rx_buf; + unsigned len; + unsigned n_tx; + unsigned n_rx; + unsigned actual_length; + int status; +}; + +/* eSPI Controller mode register definitions */ +#define SPMODE_ENABLE (1 << 31) +#define SPMODE_LOOP (1 << 30) +#define SPMODE_TXTHR(x) ((x) << 8) +#define SPMODE_RXTHR(x) ((x) << 0) + +/* eSPI Controller CS mode register definitions */ +#define CSMODE_CI_INACTIVEHIGH (1 << 31) +#define CSMODE_CP_BEGIN_EDGECLK (1 << 30) +#define CSMODE_REV (1 << 29) +#define CSMODE_DIV16 (1 << 28) +#define CSMODE_PM(x) ((x) << 24) +#define CSMODE_POL_1 (1 << 20) +#define CSMODE_LEN(x) ((x) << 16) +#define CSMODE_BEF(x) ((x) << 12) +#define CSMODE_AFT(x) ((x) << 8) +#define CSMODE_CG(x) ((x) << 3) + +/* Default mode/csmode for eSPI controller */ +#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) +#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ + | CSMODE_AFT(0) | CSMODE_CG(1)) + +/* SPIE register values */ +#define SPIE_NE 0x00000200 /* Not empty */ +#define SPIE_NF 0x00000100 /* Not full */ + +/* SPIM register values */ +#define SPIM_NE 0x00000200 /* Not empty */ +#define SPIM_NF 0x00000100 /* Not full */ +#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) +#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) + +/* SPCOM register values */ +#define SPCOM_CS(x) ((x) << 30) +#define SPCOM_TRANLEN(x) ((x) << 0) +#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ + +static void fsl_espi_change_mode(struct spi_device *spi) +{ + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); + struct spi_mpc8xxx_cs *cs = spi->controller_state; + struct fsl_espi_reg *reg_base = mspi->reg_base; + __be32 __iomem *mode = ®_base->csmode[spi->chip_select]; + __be32 __iomem *espi_mode = ®_base->mode; + u32 tmp; + unsigned long flags; + + /* Turn off IRQs locally to minimize time that SPI is disabled. */ + local_irq_save(flags); + + /* Turn off SPI unit prior changing mode */ + tmp = mpc8xxx_spi_read_reg(espi_mode); + mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE); + mpc8xxx_spi_write_reg(mode, cs->hw_mode); + mpc8xxx_spi_write_reg(espi_mode, tmp); + + local_irq_restore(flags); +} + +static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) +{ + u32 data; + u16 data_h; + u16 data_l; + const u32 *tx = mpc8xxx_spi->tx; + + if (!tx) + return 0; + + data = *tx++ << mpc8xxx_spi->tx_shift; + data_l = data & 0xffff; + data_h = (data >> 16) & 0xffff; + swab16s(&data_l); + swab16s(&data_h); + data = data_h | data_l; + + mpc8xxx_spi->tx = tx; + return data; +} + +static int fsl_espi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + int bits_per_word = 0; + u8 pm; + u32 hz = 0; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (t) { + bits_per_word = t->bits_per_word; + hz = t->speed_hz; + } + + /* spi_transfer level calls that work per-word */ + if (!bits_per_word) + bits_per_word = spi->bits_per_word; + + /* Make sure its a bit width we support [4..16] */ + if ((bits_per_word < 4) || (bits_per_word > 16)) + return -EINVAL; + + if (!hz) + hz = spi->max_speed_hz; + + cs->rx_shift = 0; + cs->tx_shift = 0; + cs->get_rx = mpc8xxx_spi_rx_buf_u32; + cs->get_tx = mpc8xxx_spi_tx_buf_u32; + if (bits_per_word <= 8) { + cs->rx_shift = 8 - bits_per_word; + } else if (bits_per_word <= 16) { + cs->rx_shift = 16 - bits_per_word; + if (spi->mode & SPI_LSB_FIRST) + cs->get_tx = fsl_espi_tx_buf_lsb; + } else { + return -EINVAL; + } + + mpc8xxx_spi->rx_shift = cs->rx_shift; + mpc8xxx_spi->tx_shift = cs->tx_shift; + mpc8xxx_spi->get_rx = cs->get_rx; + mpc8xxx_spi->get_tx = cs->get_tx; + + bits_per_word = bits_per_word - 1; + + /* mask out bits we are going to set */ + cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); + + cs->hw_mode |= CSMODE_LEN(bits_per_word); + + if ((mpc8xxx_spi->spibrg / hz) > 64) { + cs->hw_mode |= CSMODE_DIV16; + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1; + + WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. " + "Will use %d Hz instead.\n", dev_name(&spi->dev), + hz, mpc8xxx_spi->spibrg / 1024); + if (pm > 16) + pm = 16; + } else { + pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; + } + if (pm) + pm--; + + cs->hw_mode |= CSMODE_PM(pm); + + fsl_espi_change_mode(spi); + return 0; +} + +static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, + unsigned int len) +{ + u32 word; + struct fsl_espi_reg *reg_base = mspi->reg_base; + + mspi->count = len; + + /* enable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); + + /* transmit word */ + word = mspi->get_tx(mspi); + mpc8xxx_spi_write_reg(®_base->transmit, word); + + return 0; +} + +static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base; + unsigned int len = t->len; + u8 bits_per_word; + int ret; + + bits_per_word = spi->bits_per_word; + if (t->bits_per_word) + bits_per_word = t->bits_per_word; + + mpc8xxx_spi->len = t->len; + len = roundup(len, 4) / 4; + + mpc8xxx_spi->tx = t->tx_buf; + mpc8xxx_spi->rx = t->rx_buf; + + INIT_COMPLETION(mpc8xxx_spi->done); + + /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ + if ((t->len - 1) > SPCOM_TRANLEN_MAX) { + dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" + " beyond the SPCOM[TRANLEN] field\n", t->len); + return -EINVAL; + } + mpc8xxx_spi_write_reg(®_base->command, + (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); + + ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len); + if (ret) + return ret; + + wait_for_completion(&mpc8xxx_spi->done); + + /* disable rx ints */ + mpc8xxx_spi_write_reg(®_base->mask, 0); + + return mpc8xxx_spi->count; +} + +static void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) +{ + if (cmd[1] && cmd[2] && cmd[3]) { + cmd[1] = (u8)(addr >> 16); + cmd[2] = (u8)(addr >> 8); + cmd[3] = (u8)(addr >> 0); + } +} + +static unsigned int fsl_espi_cmd2addr(u8 *cmd) +{ + if (cmd[1] && cmd[2] && cmd[3]) + return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; + + return 0; +} + +static void fsl_espi_do_trans(struct spi_message *m, + struct fsl_espi_transfer *tr) +{ + struct spi_device *spi = m->spi; + struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); + struct fsl_espi_transfer *espi_trans = tr; + struct spi_message message; + struct spi_transfer *t, *first, trans; + int status = 0; + + spi_message_init(&message); + memset(&trans, 0, sizeof(trans)); + + first = list_first_entry(&m->transfers, struct spi_transfer, + transfer_list); + list_for_each_entry(t, &m->transfers, transfer_list) { + if ((first->bits_per_word != t->bits_per_word) || + (first->speed_hz != t->speed_hz)) { + espi_trans->status = -EINVAL; + dev_err(mspi->dev, "bits_per_word/speed_hz should be" + " same for the same SPI transfer\n"); + return; + } + + trans.speed_hz = t->speed_hz; + trans.bits_per_word = t->bits_per_word; + trans.delay_usecs = max(first->delay_usecs, t->delay_usecs); + } + + trans.len = espi_trans->len; + trans.tx_buf = espi_trans->tx_buf; + trans.rx_buf = espi_trans->rx_buf; + spi_message_add_tail(&trans, &message); + + list_for_each_entry(t, &message.transfers, transfer_list) { + if (t->bits_per_word || t->speed_hz) { + status = -EINVAL; + + status = fsl_espi_setup_transfer(spi, t); + if (status < 0) + break; + } + + if (t->len) + status = fsl_espi_bufs(spi, t); + + if (status) { + status = -EMSGSIZE; + break; + } + + if (t->delay_usecs) + udelay(t->delay_usecs); + } + + espi_trans->status = status; + fsl_espi_setup_transfer(spi, NULL); +} + +static void fsl_espi_cmd_trans(struct spi_message *m, + struct fsl_espi_transfer *trans, u8 *rx_buff) +{ + struct spi_transfer *t; + u8 *local_buf; + int i = 0; + struct fsl_espi_transfer *espi_trans = trans; + + local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); + if (!local_buf) { + espi_trans->status = -ENOMEM; + return; + } + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) { + memcpy(local_buf + i, t->tx_buf, t->len); + i += t->len; + } + } + + espi_trans->tx_buf = local_buf; + espi_trans->rx_buf = local_buf + espi_trans->n_tx; + fsl_espi_do_trans(m, espi_trans); + + espi_trans->actual_length = espi_trans->len; + kfree(local_buf); +} + +static void fsl_espi_rw_trans(struct spi_message *m, + struct fsl_espi_transfer *trans, u8 *rx_buff) +{ + struct fsl_espi_transfer *espi_trans = trans; + unsigned int n_tx = espi_trans->n_tx; + unsigned int n_rx = espi_trans->n_rx; + struct spi_transfer *t; + u8 *local_buf; + u8 *rx_buf = rx_buff; + unsigned int trans_len; + unsigned int addr; + int i, pos, loop; + + local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); + if (!local_buf) { + espi_trans->status = -ENOMEM; + return; + } + + for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { + trans_len = n_rx - pos; + if (trans_len > SPCOM_TRANLEN_MAX - n_tx) + trans_len = SPCOM_TRANLEN_MAX - n_tx; + + i = 0; + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) { + memcpy(local_buf + i, t->tx_buf, t->len); + i += t->len; + } + } + + addr = fsl_espi_cmd2addr(local_buf); + addr += pos; + fsl_espi_addr2cmd(addr, local_buf); + + espi_trans->n_tx = n_tx; + espi_trans->n_rx = trans_len; + espi_trans->len = trans_len + n_tx; + espi_trans->tx_buf = local_buf; + espi_trans->rx_buf = local_buf + n_tx; + fsl_espi_do_trans(m, espi_trans); + + memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); + + if (loop > 0) + espi_trans->actual_length += espi_trans->len - n_tx; + else + espi_trans->actual_length += espi_trans->len; + } + + kfree(local_buf); +} + +static void fsl_espi_do_one_msg(struct spi_message *m) +{ + struct spi_transfer *t; + u8 *rx_buf = NULL; + unsigned int n_tx = 0; + unsigned int n_rx = 0; + struct fsl_espi_transfer espi_trans; + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->tx_buf) + n_tx += t->len; + if (t->rx_buf) { + n_rx += t->len; + rx_buf = t->rx_buf; + } + } + + espi_trans.n_tx = n_tx; + espi_trans.n_rx = n_rx; + espi_trans.len = n_tx + n_rx; + espi_trans.actual_length = 0; + espi_trans.status = 0; + + if (!rx_buf) + fsl_espi_cmd_trans(m, &espi_trans, NULL); + else + fsl_espi_rw_trans(m, &espi_trans, rx_buf); + + m->actual_length = espi_trans.actual_length; + m->status = espi_trans.status; + m->complete(m->context); +} + +static int fsl_espi_setup(struct spi_device *spi) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_espi_reg *reg_base; + int retval; + u32 hw_mode; + u32 loop_mode; + struct spi_mpc8xxx_cs *cs = spi->controller_state; + + if (!spi->max_speed_hz) + return -EINVAL; + + if (!cs) { + cs = kzalloc(sizeof *cs, GFP_KERNEL); + if (!cs) + return -ENOMEM; + spi->controller_state = cs; + } + + mpc8xxx_spi = spi_master_get_devdata(spi->master); + reg_base = mpc8xxx_spi->reg_base; + + hw_mode = cs->hw_mode; /* Save orginal settings */ + cs->hw_mode = mpc8xxx_spi_read_reg( + ®_base->csmode[spi->chip_select]); + /* mask out bits we are going to set */ + cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH + | CSMODE_REV); + + if (spi->mode & SPI_CPHA) + cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; + if (spi->mode & SPI_CPOL) + cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; + if (!(spi->mode & SPI_LSB_FIRST)) + cs->hw_mode |= CSMODE_REV; + + /* Handle the loop mode */ + loop_mode = mpc8xxx_spi_read_reg(®_base->mode); + loop_mode &= ~SPMODE_LOOP; + if (spi->mode & SPI_LOOP) + loop_mode |= SPMODE_LOOP; + mpc8xxx_spi_write_reg(®_base->mode, loop_mode); + + retval = fsl_espi_setup_transfer(spi, NULL); + if (retval < 0) { + cs->hw_mode = hw_mode; /* Restore settings */ + return retval; + } + return 0; +} + +void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) +{ + struct fsl_espi_reg *reg_base = mspi->reg_base; + + /* We need handle RX first */ + if (events & SPIE_NE) { + u32 rx_data; + + /* Spin until RX is done */ + while (SPIE_RXCNT(events) < min(4, mspi->len)) { + cpu_relax(); + events = mpc8xxx_spi_read_reg(®_base->event); + } + mspi->len -= 4; + + rx_data = mpc8xxx_spi_read_reg(®_base->receive); + + if (mspi->rx) + mspi->get_rx(rx_data, mspi); + } + + if (!(events & SPIE_NF)) { + int ret; + + /* spin until TX is done */ + ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( + ®_base->event)) & SPIE_NF) == 0, 1000, 0); + if (!ret) { + dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); + return; + } + } + + /* Clear the events */ + mpc8xxx_spi_write_reg(®_base->event, events); + + mspi->count -= 1; + if (mspi->count) { + u32 word = mspi->get_tx(mspi); + + mpc8xxx_spi_write_reg(®_base->transmit, word); + } else { + complete(&mspi->done); + } +} + +static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) +{ + struct mpc8xxx_spi *mspi = context_data; + struct fsl_espi_reg *reg_base = mspi->reg_base; + irqreturn_t ret = IRQ_NONE; + u32 events; + + /* Get interrupt events(tx/rx) */ + events = mpc8xxx_spi_read_reg(®_base->event); + if (events) + ret = IRQ_HANDLED; + + dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); + + fsl_espi_cpu_irq(mspi, events); + + return ret; +} + +static void fsl_espi_remove(struct mpc8xxx_spi *mspi) +{ + iounmap(mspi->reg_base); +} + +static struct spi_master * __devinit fsl_espi_probe(struct device *dev, + struct resource *mem, unsigned int irq) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct spi_master *master; + struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_espi_reg *reg_base; + u32 regval; + int i, ret = 0; + + master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); + if (!master) { + ret = -ENOMEM; + goto err; + } + + dev_set_drvdata(dev, master); + + ret = mpc8xxx_spi_probe(dev, mem, irq); + if (ret) + goto err_probe; + + master->setup = fsl_espi_setup; + + mpc8xxx_spi = spi_master_get_devdata(master); + mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; + mpc8xxx_spi->spi_remove = fsl_espi_remove; + + mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); + if (!mpc8xxx_spi->reg_base) { + ret = -ENOMEM; + goto err_probe; + } + + reg_base = mpc8xxx_spi->reg_base; + + /* Register for SPI Interrupt */ + ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq, + 0, "fsl_espi", mpc8xxx_spi); + if (ret) + goto free_irq; + + if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { + mpc8xxx_spi->rx_shift = 16; + mpc8xxx_spi->tx_shift = 24; + } + + /* SPI controller initializations */ + mpc8xxx_spi_write_reg(®_base->mode, 0); + mpc8xxx_spi_write_reg(®_base->mask, 0); + mpc8xxx_spi_write_reg(®_base->command, 0); + mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); + + /* Init eSPI CS mode register */ + for (i = 0; i < pdata->max_chipselect; i++) + mpc8xxx_spi_write_reg(®_base->csmode[i], CSMODE_INIT_VAL); + + /* Enable SPI interface */ + regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; + + mpc8xxx_spi_write_reg(®_base->mode, regval); + + ret = spi_register_master(master); + if (ret < 0) + goto unreg_master; + + dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq); + + return master; + +unreg_master: + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); +free_irq: + iounmap(mpc8xxx_spi->reg_base); +err_probe: + spi_master_put(master); +err: + return ERR_PTR(ret); +} + +static int of_fsl_espi_get_chipselects(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct fsl_spi_platform_data *pdata = dev->platform_data; + const u32 *prop; + int len; + + prop = of_get_property(np, "fsl,espi-num-chipselects", &len); + if (!prop || len < sizeof(*prop)) { + dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); + return -EINVAL; + } + + pdata->max_chipselect = *prop; + pdata->cs_control = NULL; + + return 0; +} + +static int __devinit of_fsl_espi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct spi_master *master; + struct resource mem; + struct resource irq; + int ret = -ENOMEM; + + ret = of_mpc8xxx_spi_probe(ofdev, ofid); + if (ret) + return ret; + + ret = of_fsl_espi_get_chipselects(dev); + if (ret) + goto err; + + ret = of_address_to_resource(np, 0, &mem); + if (ret) + goto err; + + ret = of_irq_to_resource(np, 0, &irq); + if (!ret) { + ret = -EINVAL; + goto err; + } + + master = fsl_espi_probe(dev, &mem, irq.start); + if (IS_ERR(master)) { + ret = PTR_ERR(master); + goto err; + } + + return 0; + +err: + return ret; +} + +static int __devexit of_fsl_espi_remove(struct platform_device *dev) +{ + return mpc8xxx_spi_remove(&dev->dev); +} + +static const struct of_device_id of_fsl_espi_match[] = { + { .compatible = "fsl,mpc8536-espi" }, + {} +}; +MODULE_DEVICE_TABLE(of, of_fsl_espi_match); + +static struct of_platform_driver fsl_espi_driver = { + .driver = { + .name = "fsl_espi", + .owner = THIS_MODULE, + .of_match_table = of_fsl_espi_match, + }, + .probe = of_fsl_espi_probe, + .remove = __devexit_p(of_fsl_espi_remove), +}; + +static int __init fsl_espi_init(void) +{ + return of_register_platform_driver(&fsl_espi_driver); +} +module_init(fsl_espi_init); + +static void __exit fsl_espi_exit(void) +{ + of_unregister_platform_driver(&fsl_espi_driver); +} +module_exit(fsl_espi_exit); + +MODULE_AUTHOR("Mingkai Hu"); +MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_fsl_lib.c b/drivers/spi/spi_fsl_lib.c new file mode 100644 index 00000000000..5cd741fdb5c --- /dev/null +++ b/drivers/spi/spi_fsl_lib.c @@ -0,0 +1,237 @@ +/* + * Freescale SPI/eSPI controller driver library. + * + * Maintainer: Kumar Gala + * + * Copyright (C) 2006 Polycom, Inc. + * + * CPM SPI and QE buffer descriptors mode support: + * Copyright (c) 2009 MontaVista Software, Inc. + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * Copyright 2010 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/fsl_devices.h> +#include <linux/dma-mapping.h> +#include <linux/mm.h> +#include <linux/of_platform.h> +#include <linux/of_spi.h> +#include <sysdev/fsl_soc.h> + +#include "spi_fsl_lib.h" + +#define MPC8XXX_SPI_RX_BUF(type) \ +void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ +{ \ + type *rx = mpc8xxx_spi->rx; \ + *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ + mpc8xxx_spi->rx = rx; \ +} + +#define MPC8XXX_SPI_TX_BUF(type) \ +u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ +{ \ + u32 data; \ + const type *tx = mpc8xxx_spi->tx; \ + if (!tx) \ + return 0; \ + data = *tx++ << mpc8xxx_spi->tx_shift; \ + mpc8xxx_spi->tx = tx; \ + return data; \ +} + +MPC8XXX_SPI_RX_BUF(u8) +MPC8XXX_SPI_RX_BUF(u16) +MPC8XXX_SPI_RX_BUF(u32) +MPC8XXX_SPI_TX_BUF(u8) +MPC8XXX_SPI_TX_BUF(u16) +MPC8XXX_SPI_TX_BUF(u32) + +struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata) +{ + return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); +} + +void mpc8xxx_spi_work(struct work_struct *work) +{ + struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, + work); + + spin_lock_irq(&mpc8xxx_spi->lock); + while (!list_empty(&mpc8xxx_spi->queue)) { + struct spi_message *m = container_of(mpc8xxx_spi->queue.next, + struct spi_message, queue); + + list_del_init(&m->queue); + spin_unlock_irq(&mpc8xxx_spi->lock); + + if (mpc8xxx_spi->spi_do_one_msg) + mpc8xxx_spi->spi_do_one_msg(m); + + spin_lock_irq(&mpc8xxx_spi->lock); + } + spin_unlock_irq(&mpc8xxx_spi->lock); +} + +int mpc8xxx_spi_transfer(struct spi_device *spi, + struct spi_message *m) +{ + struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + unsigned long flags; + + m->actual_length = 0; + m->status = -EINPROGRESS; + + spin_lock_irqsave(&mpc8xxx_spi->lock, flags); + list_add_tail(&m->queue, &mpc8xxx_spi->queue); + queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); + spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); + + return 0; +} + +void mpc8xxx_spi_cleanup(struct spi_device *spi) +{ + kfree(spi->controller_state); +} + +const char *mpc8xxx_spi_strmode(unsigned int flags) +{ + if (flags & SPI_QE_CPU_MODE) { + return "QE CPU"; + } else if (flags & SPI_CPM_MODE) { + if (flags & SPI_QE) + return "QE"; + else if (flags & SPI_CPM2) + return "CPM2"; + else + return "CPM1"; + } + return "CPU"; +} + +int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, + unsigned int irq) +{ + struct fsl_spi_platform_data *pdata = dev->platform_data; + struct spi_master *master; + struct mpc8xxx_spi *mpc8xxx_spi; + int ret = 0; + + master = dev_get_drvdata(dev); + + /* the spi->mode bits understood by this driver: */ + master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH + | SPI_LSB_FIRST | SPI_LOOP; + + master->transfer = mpc8xxx_spi_transfer; + master->cleanup = mpc8xxx_spi_cleanup; + master->dev.of_node = dev->of_node; + + mpc8xxx_spi = spi_master_get_devdata(master); + mpc8xxx_spi->dev = dev; + mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; + mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; + mpc8xxx_spi->flags = pdata->flags; + mpc8xxx_spi->spibrg = pdata->sysclk; + mpc8xxx_spi->irq = irq; + + mpc8xxx_spi->rx_shift = 0; + mpc8xxx_spi->tx_shift = 0; + + init_completion(&mpc8xxx_spi->done); + + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->max_chipselect; + + spin_lock_init(&mpc8xxx_spi->lock); + init_completion(&mpc8xxx_spi->done); + INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); + INIT_LIST_HEAD(&mpc8xxx_spi->queue); + + mpc8xxx_spi->workqueue = create_singlethread_workqueue( + dev_name(master->dev.parent)); + if (mpc8xxx_spi->workqueue == NULL) { + ret = -EBUSY; + goto err; + } + + return 0; + +err: + return ret; +} + +int __devexit mpc8xxx_spi_remove(struct device *dev) +{ + struct mpc8xxx_spi *mpc8xxx_spi; + struct spi_master *master; + + master = dev_get_drvdata(dev); + mpc8xxx_spi = spi_master_get_devdata(master); + + flush_workqueue(mpc8xxx_spi->workqueue); + destroy_workqueue(mpc8xxx_spi->workqueue); + spi_unregister_master(master); + + free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); + + if (mpc8xxx_spi->spi_remove) + mpc8xxx_spi->spi_remove(mpc8xxx_spi); + + return 0; +} + +int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid) +{ + struct device *dev = &ofdev->dev; + struct device_node *np = ofdev->dev.of_node; + struct mpc8xxx_spi_probe_info *pinfo; + struct fsl_spi_platform_data *pdata; + const void *prop; + int ret = -ENOMEM; + + pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + pdata = &pinfo->pdata; + dev->platform_data = pdata; + + /* Allocate bus num dynamically. */ + pdata->bus_num = -1; + + /* SPI controller is either clocked from QE or SoC clock. */ + pdata->sysclk = get_brgfreq(); + if (pdata->sysclk == -1) { + pdata->sysclk = fsl_get_sys_freq(); + if (pdata->sysclk == -1) { + ret = -ENODEV; + goto err; + } + } + + prop = of_get_property(np, "mode", NULL); + if (prop && !strcmp(prop, "cpu-qe")) + pdata->flags = SPI_QE_CPU_MODE; + else if (prop && !strcmp(prop, "qe")) + pdata->flags = SPI_CPM_MODE | SPI_QE; + else if (of_device_is_compatible(np, "fsl,cpm2-spi")) + pdata->flags = SPI_CPM_MODE | SPI_CPM2; + else if (of_device_is_compatible(np, "fsl,cpm1-spi")) + pdata->flags = SPI_CPM_MODE | SPI_CPM1; + + return 0; + +err: + kfree(pinfo); + return ret; +} diff --git a/drivers/spi/spi_fsl_lib.h b/drivers/spi/spi_fsl_lib.h new file mode 100644 index 00000000000..281e060977c --- /dev/null +++ b/drivers/spi/spi_fsl_lib.h @@ -0,0 +1,124 @@ +/* + * Freescale SPI/eSPI controller driver library. + * + * Maintainer: Kumar Gala + * + * Copyright 2010 Freescale Semiconductor, Inc. + * Copyright (C) 2006 Polycom, Inc. + * + * CPM SPI and QE buffer descriptors mode support: + * Copyright (c) 2009 MontaVista Software, Inc. + * Author: Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef __SPI_FSL_LIB_H__ +#define __SPI_FSL_LIB_H__ + +#include <asm/io.h> + +/* SPI/eSPI Controller driver's private data. */ +struct mpc8xxx_spi { + struct device *dev; + void *reg_base; + + /* rx & tx bufs from the spi_transfer */ + const void *tx; + void *rx; +#ifdef CONFIG_SPI_FSL_ESPI + int len; +#endif + + int subblock; + struct spi_pram __iomem *pram; + struct cpm_buf_desc __iomem *tx_bd; + struct cpm_buf_desc __iomem *rx_bd; + + struct spi_transfer *xfer_in_progress; + + /* dma addresses for CPM transfers */ + dma_addr_t tx_dma; + dma_addr_t rx_dma; + bool map_tx_dma; + bool map_rx_dma; + + dma_addr_t dma_dummy_tx; + dma_addr_t dma_dummy_rx; + + /* functions to deal with different sized buffers */ + void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); + u32(*get_tx) (struct mpc8xxx_spi *); + + /* hooks for different controller driver */ + void (*spi_do_one_msg) (struct spi_message *m); + void (*spi_remove) (struct mpc8xxx_spi *mspi); + + unsigned int count; + unsigned int irq; + + unsigned nsecs; /* (clock cycle time)/2 */ + + u32 spibrg; /* SPIBRG input clock */ + u32 rx_shift; /* RX data reg shift when in qe mode */ + u32 tx_shift; /* TX data reg shift when in qe mode */ + + unsigned int flags; + + struct workqueue_struct *workqueue; + struct work_struct work; + + struct list_head queue; + spinlock_t lock; + + struct completion done; +}; + +struct spi_mpc8xxx_cs { + /* functions to deal with different sized buffers */ + void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); + u32 (*get_tx) (struct mpc8xxx_spi *); + u32 rx_shift; /* RX data reg shift when in qe mode */ + u32 tx_shift; /* TX data reg shift when in qe mode */ + u32 hw_mode; /* Holds HW mode register settings */ +}; + +static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) +{ + out_be32(reg, val); +} + +static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) +{ + return in_be32(reg); +} + +struct mpc8xxx_spi_probe_info { + struct fsl_spi_platform_data pdata; + int *gpios; + bool *alow_flags; +}; + +extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi); +extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi); +extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); +extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi); + +extern struct mpc8xxx_spi_probe_info *to_of_pinfo( + struct fsl_spi_platform_data *pdata); +extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi, + struct spi_transfer *t, unsigned int len); +extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m); +extern void mpc8xxx_spi_cleanup(struct spi_device *spi); +extern const char *mpc8xxx_spi_strmode(unsigned int flags); +extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, + unsigned int irq); +extern int mpc8xxx_spi_remove(struct device *dev); +extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid); + +#endif /* __SPI_FSL_LIB_H__ */ diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_fsl_spi.c index d31b57f7baa..7ca52d3ae8f 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_fsl_spi.c @@ -1,9 +1,10 @@ /* - * MPC8xxx SPI controller driver. + * Freescale SPI controller driver. * * Maintainer: Kumar Gala * * Copyright (C) 2006 Polycom, Inc. + * Copyright 2010 Freescale Semiconductor, Inc. * * CPM SPI and QE buffer descriptors mode support: * Copyright (c) 2009 MontaVista Software, Inc. @@ -15,18 +16,11 @@ * option) any later version. */ #include <linux/module.h> -#include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> -#include <linux/bug.h> -#include <linux/errno.h> -#include <linux/err.h> -#include <linux/io.h> -#include <linux/completion.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/irq.h> -#include <linux/device.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/platform_device.h> @@ -38,12 +32,12 @@ #include <linux/of_platform.h> #include <linux/gpio.h> #include <linux/of_gpio.h> -#include <linux/slab.h> #include <sysdev/fsl_soc.h> #include <asm/cpm.h> #include <asm/qe.h> -#include <asm/irq.h> + +#include "spi_fsl_lib.h" /* CPM1 and CPM2 are mutually exclusive. */ #ifdef CONFIG_CPM1 @@ -55,7 +49,7 @@ #endif /* SPI Controller registers */ -struct mpc8xxx_spi_reg { +struct fsl_spi_reg { u8 res1[0x20]; __be32 mode; __be32 event; @@ -80,7 +74,7 @@ struct mpc8xxx_spi_reg { /* * Default for SPI Mode: - * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk + * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk */ #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) @@ -102,112 +96,16 @@ struct mpc8xxx_spi_reg { #define SPI_PRAM_SIZE 0x100 #define SPI_MRBLR ((unsigned int)PAGE_SIZE) -/* SPI Controller driver's private data. */ -struct mpc8xxx_spi { - struct device *dev; - struct mpc8xxx_spi_reg __iomem *base; - - /* rx & tx bufs from the spi_transfer */ - const void *tx; - void *rx; - - int subblock; - struct spi_pram __iomem *pram; - struct cpm_buf_desc __iomem *tx_bd; - struct cpm_buf_desc __iomem *rx_bd; - - struct spi_transfer *xfer_in_progress; - - /* dma addresses for CPM transfers */ - dma_addr_t tx_dma; - dma_addr_t rx_dma; - bool map_tx_dma; - bool map_rx_dma; - - dma_addr_t dma_dummy_tx; - dma_addr_t dma_dummy_rx; - - /* functions to deal with different sized buffers */ - void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); - u32(*get_tx) (struct mpc8xxx_spi *); - - unsigned int count; - unsigned int irq; - - unsigned nsecs; /* (clock cycle time)/2 */ - - u32 spibrg; /* SPIBRG input clock */ - u32 rx_shift; /* RX data reg shift when in qe mode */ - u32 tx_shift; /* TX data reg shift when in qe mode */ - - unsigned int flags; - - struct workqueue_struct *workqueue; - struct work_struct work; - - struct list_head queue; - spinlock_t lock; +static void *fsl_dummy_rx; +static DEFINE_MUTEX(fsl_dummy_rx_lock); +static int fsl_dummy_rx_refcnt; - struct completion done; -}; - -static void *mpc8xxx_dummy_rx; -static DEFINE_MUTEX(mpc8xxx_dummy_rx_lock); -static int mpc8xxx_dummy_rx_refcnt; - -struct spi_mpc8xxx_cs { - /* functions to deal with different sized buffers */ - void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *); - u32 (*get_tx) (struct mpc8xxx_spi *); - u32 rx_shift; /* RX data reg shift when in qe mode */ - u32 tx_shift; /* TX data reg shift when in qe mode */ - u32 hw_mode; /* Holds HW mode register settings */ -}; - -static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) -{ - out_be32(reg, val); -} - -static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) -{ - return in_be32(reg); -} - -#define MPC83XX_SPI_RX_BUF(type) \ -static \ -void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \ -{ \ - type *rx = mpc8xxx_spi->rx; \ - *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \ - mpc8xxx_spi->rx = rx; \ -} - -#define MPC83XX_SPI_TX_BUF(type) \ -static \ -u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \ -{ \ - u32 data; \ - const type *tx = mpc8xxx_spi->tx; \ - if (!tx) \ - return 0; \ - data = *tx++ << mpc8xxx_spi->tx_shift; \ - mpc8xxx_spi->tx = tx; \ - return data; \ -} - -MPC83XX_SPI_RX_BUF(u8) -MPC83XX_SPI_RX_BUF(u16) -MPC83XX_SPI_RX_BUF(u32) -MPC83XX_SPI_TX_BUF(u8) -MPC83XX_SPI_TX_BUF(u16) -MPC83XX_SPI_TX_BUF(u32) - -static void mpc8xxx_spi_change_mode(struct spi_device *spi) +static void fsl_spi_change_mode(struct spi_device *spi) { struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); struct spi_mpc8xxx_cs *cs = spi->controller_state; - __be32 __iomem *mode = &mspi->base->mode; + struct fsl_spi_reg *reg_base = mspi->reg_base; + __be32 __iomem *mode = ®_base->mode; unsigned long flags; if (cs->hw_mode == mpc8xxx_spi_read_reg(mode)) @@ -238,7 +136,7 @@ static void mpc8xxx_spi_change_mode(struct spi_device *spi) local_irq_restore(flags); } -static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) +static void fsl_spi_chipselect(struct spi_device *spi, int value) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; @@ -256,18 +154,17 @@ static void mpc8xxx_spi_chipselect(struct spi_device *spi, int value) mpc8xxx_spi->get_rx = cs->get_rx; mpc8xxx_spi->get_tx = cs->get_tx; - mpc8xxx_spi_change_mode(spi); + fsl_spi_change_mode(spi); if (pdata->cs_control) pdata->cs_control(spi, pol); } } -static int -mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, - struct spi_device *spi, - struct mpc8xxx_spi *mpc8xxx_spi, - int bits_per_word) +static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, + struct spi_device *spi, + struct mpc8xxx_spi *mpc8xxx_spi, + int bits_per_word) { cs->rx_shift = 0; cs->tx_shift = 0; @@ -307,10 +204,9 @@ mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, return bits_per_word; } -static int -mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, - struct spi_device *spi, - int bits_per_word) +static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, + struct spi_device *spi, + int bits_per_word) { /* QE uses Little Endian for words > 8 * so transform all words > 8 into 8 bits @@ -326,13 +222,13 @@ mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs, return bits_per_word; } -static -int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) +static int fsl_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) { struct mpc8xxx_spi *mpc8xxx_spi; - int bits_per_word; + int bits_per_word = 0; u8 pm; - u32 hz; + u32 hz = 0; struct spi_mpc8xxx_cs *cs = spi->controller_state; mpc8xxx_spi = spi_master_get_devdata(spi->master); @@ -340,9 +236,6 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) if (t) { bits_per_word = t->bits_per_word; hz = t->speed_hz; - } else { - bits_per_word = 0; - hz = 0; } /* spi_transfer level calls that work per-word */ @@ -388,43 +281,52 @@ int mpc8xxx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) hz, mpc8xxx_spi->spibrg / 1024); if (pm > 16) pm = 16; - } else + } else { pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1; + } if (pm) pm--; cs->hw_mode |= SPMODE_PM(pm); - mpc8xxx_spi_change_mode(spi); + fsl_spi_change_mode(spi); return 0; } -static void mpc8xxx_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) +static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) { struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; unsigned int xfer_len = min(mspi->count, SPI_MRBLR); unsigned int xfer_ofs; + struct fsl_spi_reg *reg_base = mspi->reg_base; xfer_ofs = mspi->xfer_in_progress->len - mspi->count; - out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); + if (mspi->rx_dma == mspi->dma_dummy_rx) + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); + else + out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); out_be16(&rx_bd->cbd_datlen, 0); out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); - out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); + if (mspi->tx_dma == mspi->dma_dummy_tx) + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); + else + out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); out_be16(&tx_bd->cbd_datlen, xfer_len); out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | BD_SC_LAST); /* start transfer */ - mpc8xxx_spi_write_reg(&mspi->base->command, SPCOM_STR); + mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); } -static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, +static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, bool is_dma_mapped) { struct device *dev = mspi->dev; + struct fsl_spi_reg *reg_base = mspi->reg_base; if (is_dma_mapped) { mspi->map_tx_dma = 0; @@ -469,13 +371,13 @@ static int mpc8xxx_spi_cpm_bufs(struct mpc8xxx_spi *mspi, } /* enable rx ints */ - mpc8xxx_spi_write_reg(&mspi->base->mask, SPIE_RXB); + mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB); mspi->xfer_in_progress = t; mspi->count = t->len; /* start CPM transfers */ - mpc8xxx_spi_cpm_bufs_start(mspi); + fsl_spi_cpm_bufs_start(mspi); return 0; @@ -485,7 +387,7 @@ err_rx_dma: return -ENOMEM; } -static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) +static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; struct spi_transfer *t = mspi->xfer_in_progress; @@ -497,31 +399,34 @@ static void mpc8xxx_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) mspi->xfer_in_progress = NULL; } -static int mpc8xxx_spi_cpu_bufs(struct mpc8xxx_spi *mspi, +static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, unsigned int len) { u32 word; + struct fsl_spi_reg *reg_base = mspi->reg_base; mspi->count = len; /* enable rx ints */ - mpc8xxx_spi_write_reg(&mspi->base->mask, SPIM_NE); + mpc8xxx_spi_write_reg(®_base->mask, SPIM_NE); /* transmit word */ word = mspi->get_tx(mspi); - mpc8xxx_spi_write_reg(&mspi->base->transmit, word); + mpc8xxx_spi_write_reg(®_base->transmit, word); return 0; } -static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t, +static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t, bool is_dma_mapped) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); + struct fsl_spi_reg *reg_base; unsigned int len = t->len; u8 bits_per_word; int ret; + reg_base = mpc8xxx_spi->reg_base; bits_per_word = spi->bits_per_word; if (t->bits_per_word) bits_per_word = t->bits_per_word; @@ -545,24 +450,24 @@ static int mpc8xxx_spi_bufs(struct spi_device *spi, struct spi_transfer *t, INIT_COMPLETION(mpc8xxx_spi->done); if (mpc8xxx_spi->flags & SPI_CPM_MODE) - ret = mpc8xxx_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); + ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped); else - ret = mpc8xxx_spi_cpu_bufs(mpc8xxx_spi, t, len); + ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len); if (ret) return ret; wait_for_completion(&mpc8xxx_spi->done); /* disable rx ints */ - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); + mpc8xxx_spi_write_reg(®_base->mask, 0); if (mpc8xxx_spi->flags & SPI_CPM_MODE) - mpc8xxx_spi_cpm_bufs_complete(mpc8xxx_spi); + fsl_spi_cpm_bufs_complete(mpc8xxx_spi); return mpc8xxx_spi->count; } -static void mpc8xxx_spi_do_one_msg(struct spi_message *m) +static void fsl_spi_do_one_msg(struct spi_message *m) { struct spi_device *spi = m->spi; struct spi_transfer *t; @@ -578,18 +483,18 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m) status = -EINVAL; if (cs_change) - status = mpc8xxx_spi_setup_transfer(spi, t); + status = fsl_spi_setup_transfer(spi, t); if (status < 0) break; } if (cs_change) { - mpc8xxx_spi_chipselect(spi, BITBANG_CS_ACTIVE); + fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE); ndelay(nsecs); } cs_change = t->cs_change; if (t->len) - status = mpc8xxx_spi_bufs(spi, t, m->is_dma_mapped); + status = fsl_spi_bufs(spi, t, m->is_dma_mapped); if (status) { status = -EMSGSIZE; break; @@ -601,7 +506,7 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m) if (cs_change) { ndelay(nsecs); - mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); + fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); ndelay(nsecs); } } @@ -611,35 +516,16 @@ static void mpc8xxx_spi_do_one_msg(struct spi_message *m) if (status || !cs_change) { ndelay(nsecs); - mpc8xxx_spi_chipselect(spi, BITBANG_CS_INACTIVE); + fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); } - mpc8xxx_spi_setup_transfer(spi, NULL); -} - -static void mpc8xxx_spi_work(struct work_struct *work) -{ - struct mpc8xxx_spi *mpc8xxx_spi = container_of(work, struct mpc8xxx_spi, - work); - - spin_lock_irq(&mpc8xxx_spi->lock); - while (!list_empty(&mpc8xxx_spi->queue)) { - struct spi_message *m = container_of(mpc8xxx_spi->queue.next, - struct spi_message, queue); - - list_del_init(&m->queue); - spin_unlock_irq(&mpc8xxx_spi->lock); - - mpc8xxx_spi_do_one_msg(m); - - spin_lock_irq(&mpc8xxx_spi->lock); - } - spin_unlock_irq(&mpc8xxx_spi->lock); + fsl_spi_setup_transfer(spi, NULL); } -static int mpc8xxx_spi_setup(struct spi_device *spi) +static int fsl_spi_setup(struct spi_device *spi) { struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_spi_reg *reg_base; int retval; u32 hw_mode; struct spi_mpc8xxx_cs *cs = spi->controller_state; @@ -655,8 +541,10 @@ static int mpc8xxx_spi_setup(struct spi_device *spi) } mpc8xxx_spi = spi_master_get_devdata(spi->master); + reg_base = mpc8xxx_spi->reg_base; + hw_mode = cs->hw_mode; /* Save original settings */ - cs->hw_mode = mpc8xxx_spi_read_reg(&mpc8xxx_spi->base->mode); + cs->hw_mode = mpc8xxx_spi_read_reg(®_base->mode); /* mask out bits we are going to set */ cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH | SPMODE_REV | SPMODE_LOOP); @@ -670,7 +558,7 @@ static int mpc8xxx_spi_setup(struct spi_device *spi) if (spi->mode & SPI_LOOP) cs->hw_mode |= SPMODE_LOOP; - retval = mpc8xxx_spi_setup_transfer(spi, NULL); + retval = fsl_spi_setup_transfer(spi, NULL); if (retval < 0) { cs->hw_mode = hw_mode; /* Restore settings */ return retval; @@ -678,9 +566,10 @@ static int mpc8xxx_spi_setup(struct spi_device *spi) return 0; } -static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) +static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) { u16 len; + struct fsl_spi_reg *reg_base = mspi->reg_base; dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); @@ -692,20 +581,22 @@ static void mpc8xxx_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) } /* Clear the events */ - mpc8xxx_spi_write_reg(&mspi->base->event, events); + mpc8xxx_spi_write_reg(®_base->event, events); mspi->count -= len; if (mspi->count) - mpc8xxx_spi_cpm_bufs_start(mspi); + fsl_spi_cpm_bufs_start(mspi); else complete(&mspi->done); } -static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) +static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) { + struct fsl_spi_reg *reg_base = mspi->reg_base; + /* We need handle RX first */ if (events & SPIE_NE) { - u32 rx_data = mpc8xxx_spi_read_reg(&mspi->base->receive); + u32 rx_data = mpc8xxx_spi_read_reg(®_base->receive); if (mspi->rx) mspi->get_rx(rx_data, mspi); @@ -714,102 +605,80 @@ static void mpc8xxx_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) if ((events & SPIE_NF) == 0) /* spin until TX is done */ while (((events = - mpc8xxx_spi_read_reg(&mspi->base->event)) & + mpc8xxx_spi_read_reg(®_base->event)) & SPIE_NF) == 0) cpu_relax(); /* Clear the events */ - mpc8xxx_spi_write_reg(&mspi->base->event, events); + mpc8xxx_spi_write_reg(®_base->event, events); mspi->count -= 1; if (mspi->count) { u32 word = mspi->get_tx(mspi); - mpc8xxx_spi_write_reg(&mspi->base->transmit, word); + mpc8xxx_spi_write_reg(®_base->transmit, word); } else { complete(&mspi->done); } } -static irqreturn_t mpc8xxx_spi_irq(s32 irq, void *context_data) +static irqreturn_t fsl_spi_irq(s32 irq, void *context_data) { struct mpc8xxx_spi *mspi = context_data; irqreturn_t ret = IRQ_NONE; u32 events; + struct fsl_spi_reg *reg_base = mspi->reg_base; /* Get interrupt events(tx/rx) */ - events = mpc8xxx_spi_read_reg(&mspi->base->event); + events = mpc8xxx_spi_read_reg(®_base->event); if (events) ret = IRQ_HANDLED; dev_dbg(mspi->dev, "%s: events %x\n", __func__, events); if (mspi->flags & SPI_CPM_MODE) - mpc8xxx_spi_cpm_irq(mspi, events); + fsl_spi_cpm_irq(mspi, events); else - mpc8xxx_spi_cpu_irq(mspi, events); + fsl_spi_cpu_irq(mspi, events); return ret; } -static int mpc8xxx_spi_transfer(struct spi_device *spi, - struct spi_message *m) +static void *fsl_spi_alloc_dummy_rx(void) { - struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); - unsigned long flags; + mutex_lock(&fsl_dummy_rx_lock); - m->actual_length = 0; - m->status = -EINPROGRESS; + if (!fsl_dummy_rx) + fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); + if (fsl_dummy_rx) + fsl_dummy_rx_refcnt++; - spin_lock_irqsave(&mpc8xxx_spi->lock, flags); - list_add_tail(&m->queue, &mpc8xxx_spi->queue); - queue_work(mpc8xxx_spi->workqueue, &mpc8xxx_spi->work); - spin_unlock_irqrestore(&mpc8xxx_spi->lock, flags); + mutex_unlock(&fsl_dummy_rx_lock); - return 0; + return fsl_dummy_rx; } - -static void mpc8xxx_spi_cleanup(struct spi_device *spi) +static void fsl_spi_free_dummy_rx(void) { - kfree(spi->controller_state); -} - -static void *mpc8xxx_spi_alloc_dummy_rx(void) -{ - mutex_lock(&mpc8xxx_dummy_rx_lock); - - if (!mpc8xxx_dummy_rx) - mpc8xxx_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); - if (mpc8xxx_dummy_rx) - mpc8xxx_dummy_rx_refcnt++; - - mutex_unlock(&mpc8xxx_dummy_rx_lock); - - return mpc8xxx_dummy_rx; -} - -static void mpc8xxx_spi_free_dummy_rx(void) -{ - mutex_lock(&mpc8xxx_dummy_rx_lock); + mutex_lock(&fsl_dummy_rx_lock); - switch (mpc8xxx_dummy_rx_refcnt) { + switch (fsl_dummy_rx_refcnt) { case 0: WARN_ON(1); break; case 1: - kfree(mpc8xxx_dummy_rx); - mpc8xxx_dummy_rx = NULL; + kfree(fsl_dummy_rx); + fsl_dummy_rx = NULL; /* fall through */ default: - mpc8xxx_dummy_rx_refcnt--; + fsl_dummy_rx_refcnt--; break; } - mutex_unlock(&mpc8xxx_dummy_rx_lock); + mutex_unlock(&fsl_dummy_rx_lock); } -static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) +static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; struct device_node *np = dev->of_node; @@ -863,7 +732,7 @@ static unsigned long mpc8xxx_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) return pram_ofs; } -static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) +static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; struct device_node *np = dev->of_node; @@ -875,7 +744,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) if (!(mspi->flags & SPI_CPM_MODE)) return 0; - if (!mpc8xxx_spi_alloc_dummy_rx()) + if (!fsl_spi_alloc_dummy_rx()) return -ENOMEM; if (mspi->flags & SPI_QE) { @@ -896,7 +765,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) } } - pram_ofs = mpc8xxx_spi_cpm_get_pram(mspi); + pram_ofs = fsl_spi_cpm_get_pram(mspi); if (IS_ERR_VALUE(pram_ofs)) { dev_err(dev, "can't allocate spi parameter ram\n"); goto err_pram; @@ -916,7 +785,7 @@ static int mpc8xxx_spi_cpm_init(struct mpc8xxx_spi *mspi) goto err_dummy_tx; } - mspi->dma_dummy_rx = dma_map_single(dev, mpc8xxx_dummy_rx, SPI_MRBLR, + mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { dev_err(dev, "unable to map dummy rx buffer\n"); @@ -954,11 +823,11 @@ err_dummy_tx: err_bds: cpm_muram_free(pram_ofs); err_pram: - mpc8xxx_spi_free_dummy_rx(); + fsl_spi_free_dummy_rx(); return -ENOMEM; } -static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi) +static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) { struct device *dev = mspi->dev; @@ -966,30 +835,22 @@ static void mpc8xxx_spi_cpm_free(struct mpc8xxx_spi *mspi) dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); cpm_muram_free(cpm_muram_offset(mspi->pram)); - mpc8xxx_spi_free_dummy_rx(); + fsl_spi_free_dummy_rx(); } -static const char *mpc8xxx_spi_strmode(unsigned int flags) +static void fsl_spi_remove(struct mpc8xxx_spi *mspi) { - if (flags & SPI_QE_CPU_MODE) { - return "QE CPU"; - } else if (flags & SPI_CPM_MODE) { - if (flags & SPI_QE) - return "QE"; - else if (flags & SPI_CPM2) - return "CPM2"; - else - return "CPM1"; - } - return "CPU"; + iounmap(mspi->reg_base); + fsl_spi_cpm_free(mspi); } -static struct spi_master * __devinit -mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) +static struct spi_master * __devinit fsl_spi_probe(struct device *dev, + struct resource *mem, unsigned int irq) { struct fsl_spi_platform_data *pdata = dev->platform_data; struct spi_master *master; struct mpc8xxx_spi *mpc8xxx_spi; + struct fsl_spi_reg *reg_base; u32 regval; int ret = 0; @@ -1001,132 +862,77 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) dev_set_drvdata(dev, master); - /* the spi->mode bits understood by this driver: */ - master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH - | SPI_LSB_FIRST | SPI_LOOP; + ret = mpc8xxx_spi_probe(dev, mem, irq); + if (ret) + goto err_probe; - master->setup = mpc8xxx_spi_setup; - master->transfer = mpc8xxx_spi_transfer; - master->cleanup = mpc8xxx_spi_cleanup; - master->dev.of_node = dev->of_node; + master->setup = fsl_spi_setup; mpc8xxx_spi = spi_master_get_devdata(master); - mpc8xxx_spi->dev = dev; - mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8; - mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8; - mpc8xxx_spi->flags = pdata->flags; - mpc8xxx_spi->spibrg = pdata->sysclk; + mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg; + mpc8xxx_spi->spi_remove = fsl_spi_remove; - ret = mpc8xxx_spi_cpm_init(mpc8xxx_spi); + + ret = fsl_spi_cpm_init(mpc8xxx_spi); if (ret) goto err_cpm_init; - mpc8xxx_spi->rx_shift = 0; - mpc8xxx_spi->tx_shift = 0; if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { mpc8xxx_spi->rx_shift = 16; mpc8xxx_spi->tx_shift = 24; } - init_completion(&mpc8xxx_spi->done); - - mpc8xxx_spi->base = ioremap(mem->start, resource_size(mem)); - if (mpc8xxx_spi->base == NULL) { + mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); + if (mpc8xxx_spi->reg_base == NULL) { ret = -ENOMEM; goto err_ioremap; } - mpc8xxx_spi->irq = irq; - /* Register for SPI Interrupt */ - ret = request_irq(mpc8xxx_spi->irq, mpc8xxx_spi_irq, - 0, "mpc8xxx_spi", mpc8xxx_spi); + ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq, + 0, "fsl_spi", mpc8xxx_spi); if (ret != 0) - goto unmap_io; + goto free_irq; - master->bus_num = pdata->bus_num; - master->num_chipselect = pdata->max_chipselect; + reg_base = mpc8xxx_spi->reg_base; /* SPI controller initializations */ - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, 0); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mask, 0); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->command, 0); - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->event, 0xffffffff); + mpc8xxx_spi_write_reg(®_base->mode, 0); + mpc8xxx_spi_write_reg(®_base->mask, 0); + mpc8xxx_spi_write_reg(®_base->command, 0); + mpc8xxx_spi_write_reg(®_base->event, 0xffffffff); /* Enable SPI interface */ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) regval |= SPMODE_OP; - mpc8xxx_spi_write_reg(&mpc8xxx_spi->base->mode, regval); - spin_lock_init(&mpc8xxx_spi->lock); - init_completion(&mpc8xxx_spi->done); - INIT_WORK(&mpc8xxx_spi->work, mpc8xxx_spi_work); - INIT_LIST_HEAD(&mpc8xxx_spi->queue); - - mpc8xxx_spi->workqueue = create_singlethread_workqueue( - dev_name(master->dev.parent)); - if (mpc8xxx_spi->workqueue == NULL) { - ret = -EBUSY; - goto free_irq; - } + mpc8xxx_spi_write_reg(®_base->mode, regval); ret = spi_register_master(master); if (ret < 0) goto unreg_master; - dev_info(dev, "at 0x%p (irq = %d), %s mode\n", mpc8xxx_spi->base, + dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base, mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags)); return master; unreg_master: - destroy_workqueue(mpc8xxx_spi->workqueue); -free_irq: free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); -unmap_io: - iounmap(mpc8xxx_spi->base); +free_irq: + iounmap(mpc8xxx_spi->reg_base); err_ioremap: - mpc8xxx_spi_cpm_free(mpc8xxx_spi); + fsl_spi_cpm_free(mpc8xxx_spi); err_cpm_init: +err_probe: spi_master_put(master); err: return ERR_PTR(ret); } -static int __devexit mpc8xxx_spi_remove(struct device *dev) -{ - struct mpc8xxx_spi *mpc8xxx_spi; - struct spi_master *master; - - master = dev_get_drvdata(dev); - mpc8xxx_spi = spi_master_get_devdata(master); - - flush_workqueue(mpc8xxx_spi->workqueue); - destroy_workqueue(mpc8xxx_spi->workqueue); - spi_unregister_master(master); - - free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); - iounmap(mpc8xxx_spi->base); - mpc8xxx_spi_cpm_free(mpc8xxx_spi); - - return 0; -} - -struct mpc8xxx_spi_probe_info { - struct fsl_spi_platform_data pdata; - int *gpios; - bool *alow_flags; -}; - -static struct mpc8xxx_spi_probe_info * -to_of_pinfo(struct fsl_spi_platform_data *pdata) -{ - return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata); -} - -static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on) +static void fsl_spi_cs_control(struct spi_device *spi, bool on) { struct device *dev = spi->dev.parent; struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data); @@ -1137,7 +943,7 @@ static void mpc8xxx_spi_cs_control(struct spi_device *spi, bool on) gpio_set_value(gpio, on ^ alow); } -static int of_mpc8xxx_spi_get_chipselects(struct device *dev) +static int of_fsl_spi_get_chipselects(struct device *dev) { struct device_node *np = dev->of_node; struct fsl_spi_platform_data *pdata = dev->platform_data; @@ -1198,7 +1004,7 @@ static int of_mpc8xxx_spi_get_chipselects(struct device *dev) } pdata->max_chipselect = ngpios; - pdata->cs_control = mpc8xxx_spi_cs_control; + pdata->cs_control = fsl_spi_cs_control; return 0; @@ -1217,7 +1023,7 @@ err_alloc_flags: return ret; } -static int of_mpc8xxx_spi_free_chipselects(struct device *dev) +static int of_fsl_spi_free_chipselects(struct device *dev) { struct fsl_spi_platform_data *pdata = dev->platform_data; struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata); @@ -1236,50 +1042,21 @@ static int of_mpc8xxx_spi_free_chipselects(struct device *dev) return 0; } -static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, - const struct of_device_id *ofid) +static int __devinit of_fsl_spi_probe(struct platform_device *ofdev, + const struct of_device_id *ofid) { struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; - struct mpc8xxx_spi_probe_info *pinfo; - struct fsl_spi_platform_data *pdata; struct spi_master *master; struct resource mem; struct resource irq; - const void *prop; int ret = -ENOMEM; - pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL); - if (!pinfo) - return -ENOMEM; - - pdata = &pinfo->pdata; - dev->platform_data = pdata; - - /* Allocate bus num dynamically. */ - pdata->bus_num = -1; - - /* SPI controller is either clocked from QE or SoC clock. */ - pdata->sysclk = get_brgfreq(); - if (pdata->sysclk == -1) { - pdata->sysclk = fsl_get_sys_freq(); - if (pdata->sysclk == -1) { - ret = -ENODEV; - goto err_clk; - } - } + ret = of_mpc8xxx_spi_probe(ofdev, ofid); + if (ret) + return ret; - prop = of_get_property(np, "mode", NULL); - if (prop && !strcmp(prop, "cpu-qe")) - pdata->flags = SPI_QE_CPU_MODE; - else if (prop && !strcmp(prop, "qe")) - pdata->flags = SPI_CPM_MODE | SPI_QE; - else if (of_device_is_compatible(np, "fsl,cpm2-spi")) - pdata->flags = SPI_CPM_MODE | SPI_CPM2; - else if (of_device_is_compatible(np, "fsl,cpm1-spi")) - pdata->flags = SPI_CPM_MODE | SPI_CPM1; - - ret = of_mpc8xxx_spi_get_chipselects(dev); + ret = of_fsl_spi_get_chipselects(dev); if (ret) goto err; @@ -1293,7 +1070,7 @@ static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, goto err; } - master = mpc8xxx_spi_probe(dev, &mem, irq.start); + master = fsl_spi_probe(dev, &mem, irq.start); if (IS_ERR(master)) { ret = PTR_ERR(master); goto err; @@ -1302,42 +1079,40 @@ static int __devinit of_mpc8xxx_spi_probe(struct platform_device *ofdev, return 0; err: - of_mpc8xxx_spi_free_chipselects(dev); -err_clk: - kfree(pinfo); + of_fsl_spi_free_chipselects(dev); return ret; } -static int __devexit of_mpc8xxx_spi_remove(struct platform_device *ofdev) +static int __devexit of_fsl_spi_remove(struct platform_device *ofdev) { int ret; ret = mpc8xxx_spi_remove(&ofdev->dev); if (ret) return ret; - of_mpc8xxx_spi_free_chipselects(&ofdev->dev); + of_fsl_spi_free_chipselects(&ofdev->dev); return 0; } -static const struct of_device_id of_mpc8xxx_spi_match[] = { +static const struct of_device_id of_fsl_spi_match[] = { { .compatible = "fsl,spi" }, - {}, + {} }; -MODULE_DEVICE_TABLE(of, of_mpc8xxx_spi_match); +MODULE_DEVICE_TABLE(of, of_fsl_spi_match); -static struct of_platform_driver of_mpc8xxx_spi_driver = { +static struct of_platform_driver of_fsl_spi_driver = { .driver = { - .name = "mpc8xxx_spi", + .name = "fsl_spi", .owner = THIS_MODULE, - .of_match_table = of_mpc8xxx_spi_match, + .of_match_table = of_fsl_spi_match, }, - .probe = of_mpc8xxx_spi_probe, - .remove = __devexit_p(of_mpc8xxx_spi_remove), + .probe = of_fsl_spi_probe, + .remove = __devexit_p(of_fsl_spi_remove), }; #ifdef CONFIG_MPC832x_RDB /* - * XXX XXX XXX + * XXX XXX XXX * This is "legacy" platform driver, was used by the MPC8323E-RDB boards * only. The driver should go away soon, since newer MPC8323E-RDB's device * tree can work with OpenFirmware driver. But for now we support old trees @@ -1360,7 +1135,7 @@ static int __devinit plat_mpc8xxx_spi_probe(struct platform_device *pdev) if (irq <= 0) return -EINVAL; - master = mpc8xxx_spi_probe(&pdev->dev, mem, irq); + master = fsl_spi_probe(&pdev->dev, mem, irq); if (IS_ERR(master)) return PTR_ERR(master); return 0; @@ -1399,21 +1174,20 @@ static void __init legacy_driver_register(void) {} static void __exit legacy_driver_unregister(void) {} #endif /* CONFIG_MPC832x_RDB */ -static int __init mpc8xxx_spi_init(void) +static int __init fsl_spi_init(void) { legacy_driver_register(); - return of_register_platform_driver(&of_mpc8xxx_spi_driver); + return of_register_platform_driver(&of_fsl_spi_driver); } +module_init(fsl_spi_init); -static void __exit mpc8xxx_spi_exit(void) +static void __exit fsl_spi_exit(void) { - of_unregister_platform_driver(&of_mpc8xxx_spi_driver); + of_unregister_platform_driver(&of_fsl_spi_driver); legacy_driver_unregister(); } - -module_init(mpc8xxx_spi_init); -module_exit(mpc8xxx_spi_exit); +module_exit(fsl_spi_exit); MODULE_AUTHOR("Kumar Gala"); -MODULE_DESCRIPTION("Simple MPC8xxx SPI Driver"); +MODULE_DESCRIPTION("Simple Freescale SPI Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi_gpio.c index e24a63498ac..63e51b011d5 100644 --- a/drivers/spi/spi_gpio.c +++ b/drivers/spi/spi_gpio.c @@ -350,7 +350,7 @@ static int __init spi_gpio_probe(struct platform_device *pdev) spi_gpio->bitbang.master = spi_master_get(master); spi_gpio->bitbang.chipselect = spi_gpio_chipselect; - if ((master_flags & (SPI_MASTER_NO_RX | SPI_MASTER_NO_RX)) == 0) { + if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) { spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0; spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1; spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2; diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi_imx.c index 7972e907747..55a38e2c6c1 100644 --- a/drivers/spi/spi_imx.c +++ b/drivers/spi/spi_imx.c @@ -56,7 +56,28 @@ struct spi_imx_config { unsigned int speed_hz; unsigned int bpw; unsigned int mode; - int cs; + u8 cs; +}; + +enum spi_imx_devtype { + SPI_IMX_VER_IMX1, + SPI_IMX_VER_0_0, + SPI_IMX_VER_0_4, + SPI_IMX_VER_0_5, + SPI_IMX_VER_0_7, + SPI_IMX_VER_2_3, + SPI_IMX_VER_AUTODETECT, +}; + +struct spi_imx_data; + +struct spi_imx_devtype_data { + void (*intctrl)(struct spi_imx_data *, int); + int (*config)(struct spi_imx_data *, struct spi_imx_config *); + void (*trigger)(struct spi_imx_data *); + int (*rx_available)(struct spi_imx_data *); + void (*reset)(struct spi_imx_data *); + unsigned int fifosize; }; struct spi_imx_data { @@ -76,11 +97,7 @@ struct spi_imx_data { const void *tx_buf; unsigned int txfifo; /* number of words pushed in tx FIFO */ - /* SoC specific functions */ - void (*intctrl)(struct spi_imx_data *, int); - int (*config)(struct spi_imx_data *, struct spi_imx_config *); - void (*trigger)(struct spi_imx_data *); - int (*rx_available)(struct spi_imx_data *); + struct spi_imx_devtype_data devtype_data; }; #define MXC_SPI_BUF_RX(type) \ @@ -140,7 +157,7 @@ static unsigned int spi_imx_clkdiv_1(unsigned int fin, return max; } -/* MX1, MX31, MX35 */ +/* MX1, MX31, MX35, MX51 CSPI */ static unsigned int spi_imx_clkdiv_2(unsigned int fin, unsigned int fspi) { @@ -155,6 +172,128 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin, return 7; } +#define SPI_IMX2_3_CTRL 0x08 +#define SPI_IMX2_3_CTRL_ENABLE (1 << 0) +#define SPI_IMX2_3_CTRL_XCH (1 << 2) +#define SPI_IMX2_3_CTRL_MODE(cs) (1 << ((cs) + 4)) +#define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8 +#define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12 +#define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18) +#define SPI_IMX2_3_CTRL_BL_OFFSET 20 + +#define SPI_IMX2_3_CONFIG 0x0c +#define SPI_IMX2_3_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0)) +#define SPI_IMX2_3_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4)) +#define SPI_IMX2_3_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8)) +#define SPI_IMX2_3_CONFIG_SSBPOL(cs) (1 << ((cs) + 12)) + +#define SPI_IMX2_3_INT 0x10 +#define SPI_IMX2_3_INT_TEEN (1 << 0) +#define SPI_IMX2_3_INT_RREN (1 << 3) + +#define SPI_IMX2_3_STAT 0x18 +#define SPI_IMX2_3_STAT_RR (1 << 3) + +/* MX51 eCSPI */ +static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi) +{ + /* + * there are two 4-bit dividers, the pre-divider divides by + * $pre, the post-divider by 2^$post + */ + unsigned int pre, post; + + if (unlikely(fspi > fin)) + return 0; + + post = fls(fin) - fls(fspi); + if (fin > fspi << post) + post++; + + /* now we have: (fin <= fspi << post) with post being minimal */ + + post = max(4U, post) - 4; + if (unlikely(post > 0xf)) { + pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", + __func__, fspi, fin); + return 0xff; + } + + pre = DIV_ROUND_UP(fin, fspi << post) - 1; + + pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", + __func__, fin, fspi, post, pre); + return (pre << SPI_IMX2_3_CTRL_PREDIV_OFFSET) | + (post << SPI_IMX2_3_CTRL_POSTDIV_OFFSET); +} + +static void __maybe_unused spi_imx2_3_intctrl(struct spi_imx_data *spi_imx, int enable) +{ + unsigned val = 0; + + if (enable & MXC_INT_TE) + val |= SPI_IMX2_3_INT_TEEN; + + if (enable & MXC_INT_RR) + val |= SPI_IMX2_3_INT_RREN; + + writel(val, spi_imx->base + SPI_IMX2_3_INT); +} + +static void __maybe_unused spi_imx2_3_trigger(struct spi_imx_data *spi_imx) +{ + u32 reg; + + reg = readl(spi_imx->base + SPI_IMX2_3_CTRL); + reg |= SPI_IMX2_3_CTRL_XCH; + writel(reg, spi_imx->base + SPI_IMX2_3_CTRL); +} + +static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0; + + /* set master mode */ + ctrl |= SPI_IMX2_3_CTRL_MODE(config->cs); + + /* set clock speed */ + ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz); + + /* set chip select to use */ + ctrl |= SPI_IMX2_3_CTRL_CS(config->cs); + + ctrl |= (config->bpw - 1) << SPI_IMX2_3_CTRL_BL_OFFSET; + + cfg |= SPI_IMX2_3_CONFIG_SBBCTRL(config->cs); + + if (config->mode & SPI_CPHA) + cfg |= SPI_IMX2_3_CONFIG_SCLKPHA(config->cs); + + if (config->mode & SPI_CPOL) + cfg |= SPI_IMX2_3_CONFIG_SCLKPOL(config->cs); + + if (config->mode & SPI_CS_HIGH) + cfg |= SPI_IMX2_3_CONFIG_SSBPOL(config->cs); + + writel(ctrl, spi_imx->base + SPI_IMX2_3_CTRL); + writel(cfg, spi_imx->base + SPI_IMX2_3_CONFIG); + + return 0; +} + +static int __maybe_unused spi_imx2_3_rx_available(struct spi_imx_data *spi_imx) +{ + return readl(spi_imx->base + SPI_IMX2_3_STAT) & SPI_IMX2_3_STAT_RR; +} + +static void __maybe_unused spi_imx2_3_reset(struct spi_imx_data *spi_imx) +{ + /* drain receive buffer */ + while (spi_imx2_3_rx_available(spi_imx)) + readl(spi_imx->base + MXC_CSPIRXDATA); +} + #define MX31_INTREG_TEEN (1 << 0) #define MX31_INTREG_RREN (1 << 3) @@ -178,7 +317,7 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin, * the i.MX35 has a slightly different register layout for bits * we do not use here. */ -static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) +static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; @@ -190,7 +329,7 @@ static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable) writel(val, spi_imx->base + MXC_CSPIINT); } -static void mx31_trigger(struct spi_imx_data *spi_imx) +static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; @@ -199,20 +338,16 @@ static void mx31_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MXC_CSPICTRL); } -static int mx31_config(struct spi_imx_data *spi_imx, +static int __maybe_unused spi_imx0_4_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << MX31_CSPICTRL_DR_SHIFT; - if (cpu_is_mx31()) - reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; - else if (cpu_is_mx25() || cpu_is_mx35()) { - reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; - reg |= MX31_CSPICTRL_SSCTL; - } + reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT; if (config->mode & SPI_CPHA) reg |= MX31_CSPICTRL_PHA; @@ -220,23 +355,52 @@ static int mx31_config(struct spi_imx_data *spi_imx, reg |= MX31_CSPICTRL_POL; if (config->mode & SPI_CS_HIGH) reg |= MX31_CSPICTRL_SSPOL; - if (config->cs < 0) { - if (cpu_is_mx31()) - reg |= (config->cs + 32) << MX31_CSPICTRL_CS_SHIFT; - else if (cpu_is_mx25() || cpu_is_mx35()) - reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT; - } + if (cs < 0) + reg |= (cs + 32) << MX31_CSPICTRL_CS_SHIFT; + + writel(reg, spi_imx->base + MXC_CSPICTRL); + + return 0; +} + +static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx, + struct spi_imx_config *config) +{ + unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; + + reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) << + MX31_CSPICTRL_DR_SHIFT; + + reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT; + reg |= MX31_CSPICTRL_SSCTL; + + if (config->mode & SPI_CPHA) + reg |= MX31_CSPICTRL_PHA; + if (config->mode & SPI_CPOL) + reg |= MX31_CSPICTRL_POL; + if (config->mode & SPI_CS_HIGH) + reg |= MX31_CSPICTRL_SSPOL; + if (cs < 0) + reg |= (cs + 32) << MX35_CSPICTRL_CS_SHIFT; writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; } -static int mx31_rx_available(struct spi_imx_data *spi_imx) +static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx) { return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR; } +static void __maybe_unused spi_imx0_4_reset(struct spi_imx_data *spi_imx) +{ + /* drain receive buffer */ + while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR) + readl(spi_imx->base + MXC_CSPIRXDATA); +} + #define MX27_INTREG_RR (1 << 4) #define MX27_INTREG_TEEN (1 << 9) #define MX27_INTREG_RREN (1 << 13) @@ -250,7 +414,7 @@ static int mx31_rx_available(struct spi_imx_data *spi_imx) #define MX27_CSPICTRL_DR_SHIFT 14 #define MX27_CSPICTRL_CS_SHIFT 19 -static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable) +static void __maybe_unused mx27_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; @@ -262,7 +426,7 @@ static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable) writel(val, spi_imx->base + MXC_CSPIINT); } -static void mx27_trigger(struct spi_imx_data *spi_imx) +static void __maybe_unused mx27_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; @@ -271,10 +435,11 @@ static void mx27_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MXC_CSPICTRL); } -static int mx27_config(struct spi_imx_data *spi_imx, +static int __maybe_unused mx27_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; + int cs = spi_imx->chipselect[config->cs]; reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) << MX27_CSPICTRL_DR_SHIFT; @@ -286,19 +451,24 @@ static int mx27_config(struct spi_imx_data *spi_imx, reg |= MX27_CSPICTRL_POL; if (config->mode & SPI_CS_HIGH) reg |= MX27_CSPICTRL_SSPOL; - if (config->cs < 0) - reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT; + if (cs < 0) + reg |= (cs + 32) << MX27_CSPICTRL_CS_SHIFT; writel(reg, spi_imx->base + MXC_CSPICTRL); return 0; } -static int mx27_rx_available(struct spi_imx_data *spi_imx) +static int __maybe_unused mx27_rx_available(struct spi_imx_data *spi_imx) { return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR; } +static void __maybe_unused spi_imx0_0_reset(struct spi_imx_data *spi_imx) +{ + writel(1, spi_imx->base + MXC_RESET); +} + #define MX1_INTREG_RR (1 << 3) #define MX1_INTREG_TEEN (1 << 8) #define MX1_INTREG_RREN (1 << 11) @@ -310,7 +480,7 @@ static int mx27_rx_available(struct spi_imx_data *spi_imx) #define MX1_CSPICTRL_MASTER (1 << 10) #define MX1_CSPICTRL_DR_SHIFT 13 -static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) +static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable) { unsigned int val = 0; @@ -322,7 +492,7 @@ static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable) writel(val, spi_imx->base + MXC_CSPIINT); } -static void mx1_trigger(struct spi_imx_data *spi_imx) +static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx) { unsigned int reg; @@ -331,7 +501,7 @@ static void mx1_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MXC_CSPICTRL); } -static int mx1_config(struct spi_imx_data *spi_imx, +static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; @@ -350,11 +520,73 @@ static int mx1_config(struct spi_imx_data *spi_imx, return 0; } -static int mx1_rx_available(struct spi_imx_data *spi_imx) +static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx) { return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR; } +static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx) +{ + writel(1, spi_imx->base + MXC_RESET); +} + +/* + * These version numbers are taken from the Freescale driver. Unfortunately it + * doesn't support i.MX1, so this entry doesn't match the scheme. :-( + */ +static struct spi_imx_devtype_data spi_imx_devtype_data[] __devinitdata = { +#ifdef CONFIG_SPI_IMX_VER_IMX1 + [SPI_IMX_VER_IMX1] = { + .intctrl = mx1_intctrl, + .config = mx1_config, + .trigger = mx1_trigger, + .rx_available = mx1_rx_available, + .reset = mx1_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_0 + [SPI_IMX_VER_0_0] = { + .intctrl = mx27_intctrl, + .config = mx27_config, + .trigger = mx27_trigger, + .rx_available = mx27_rx_available, + .reset = spi_imx0_0_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_4 + [SPI_IMX_VER_0_4] = { + .intctrl = mx31_intctrl, + .config = spi_imx0_4_config, + .trigger = mx31_trigger, + .rx_available = mx31_rx_available, + .reset = spi_imx0_4_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_0_7 + [SPI_IMX_VER_0_7] = { + .intctrl = mx31_intctrl, + .config = spi_imx0_7_config, + .trigger = mx31_trigger, + .rx_available = mx31_rx_available, + .reset = spi_imx0_4_reset, + .fifosize = 8, + }, +#endif +#ifdef CONFIG_SPI_IMX_VER_2_3 + [SPI_IMX_VER_2_3] = { + .intctrl = spi_imx2_3_intctrl, + .config = spi_imx2_3_config, + .trigger = spi_imx2_3_trigger, + .rx_available = spi_imx2_3_rx_available, + .reset = spi_imx2_3_reset, + .fifosize = 64, + }, +#endif +}; + static void spi_imx_chipselect(struct spi_device *spi, int is_active) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); @@ -370,21 +602,21 @@ static void spi_imx_chipselect(struct spi_device *spi, int is_active) static void spi_imx_push(struct spi_imx_data *spi_imx) { - while (spi_imx->txfifo < 8) { + while (spi_imx->txfifo < spi_imx->devtype_data.fifosize) { if (!spi_imx->count) break; spi_imx->tx(spi_imx); spi_imx->txfifo++; } - spi_imx->trigger(spi_imx); + spi_imx->devtype_data.trigger(spi_imx); } static irqreturn_t spi_imx_isr(int irq, void *dev_id) { struct spi_imx_data *spi_imx = dev_id; - while (spi_imx->rx_available(spi_imx)) { + while (spi_imx->devtype_data.rx_available(spi_imx)) { spi_imx->rx(spi_imx); spi_imx->txfifo--; } @@ -398,11 +630,12 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id) /* No data left to push, but still waiting for rx data, * enable receive data available interrupt. */ - spi_imx->intctrl(spi_imx, MXC_INT_RR); + spi_imx->devtype_data.intctrl( + spi_imx, MXC_INT_RR); return IRQ_HANDLED; } - spi_imx->intctrl(spi_imx, 0); + spi_imx->devtype_data.intctrl(spi_imx, 0); complete(&spi_imx->xfer_done); return IRQ_HANDLED; @@ -417,7 +650,7 @@ static int spi_imx_setupxfer(struct spi_device *spi, config.bpw = t ? t->bits_per_word : spi->bits_per_word; config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; config.mode = spi->mode; - config.cs = spi_imx->chipselect[spi->chip_select]; + config.cs = spi->chip_select; if (!config.speed_hz) config.speed_hz = spi->max_speed_hz; @@ -439,7 +672,7 @@ static int spi_imx_setupxfer(struct spi_device *spi, } else BUG(); - spi_imx->config(spi_imx, &config); + spi_imx->devtype_data.config(spi_imx, &config); return 0; } @@ -458,7 +691,7 @@ static int spi_imx_transfer(struct spi_device *spi, spi_imx_push(spi_imx); - spi_imx->intctrl(spi_imx, MXC_INT_TE); + spi_imx->devtype_data.intctrl(spi_imx, MXC_INT_TE); wait_for_completion(&spi_imx->xfer_done); @@ -485,6 +718,39 @@ static void spi_imx_cleanup(struct spi_device *spi) { } +static struct platform_device_id spi_imx_devtype[] = { + { + .name = DRIVER_NAME, + .driver_data = SPI_IMX_VER_AUTODETECT, + }, { + .name = "imx1-cspi", + .driver_data = SPI_IMX_VER_IMX1, + }, { + .name = "imx21-cspi", + .driver_data = SPI_IMX_VER_0_0, + }, { + .name = "imx25-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx27-cspi", + .driver_data = SPI_IMX_VER_0_0, + }, { + .name = "imx31-cspi", + .driver_data = SPI_IMX_VER_0_4, + }, { + .name = "imx35-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx51-cspi", + .driver_data = SPI_IMX_VER_0_7, + }, { + .name = "imx51-ecspi", + .driver_data = SPI_IMX_VER_2_3, + }, { + /* sentinel */ + } +}; + static int __devinit spi_imx_probe(struct platform_device *pdev) { struct spi_imx_master *mxc_platform_info; @@ -536,6 +802,31 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) init_completion(&spi_imx->xfer_done); + if (pdev->id_entry->driver_data == SPI_IMX_VER_AUTODETECT) { + if (cpu_is_mx25() || cpu_is_mx35()) + spi_imx->devtype_data = + spi_imx_devtype_data[SPI_IMX_VER_0_7]; + else if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) + spi_imx->devtype_data = + spi_imx_devtype_data[SPI_IMX_VER_0_4]; + else if (cpu_is_mx27() || cpu_is_mx21()) + spi_imx->devtype_data = + spi_imx_devtype_data[SPI_IMX_VER_0_0]; + else if (cpu_is_mx1()) + spi_imx->devtype_data = + spi_imx_devtype_data[SPI_IMX_VER_IMX1]; + else + BUG(); + } else + spi_imx->devtype_data = + spi_imx_devtype_data[pdev->id_entry->driver_data]; + + if (!spi_imx->devtype_data.intctrl) { + dev_err(&pdev->dev, "no support for this device compiled in\n"); + ret = -ENODEV; + goto out_gpio_free; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "can't get platform resource\n"); @@ -567,24 +858,6 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) goto out_iounmap; } - if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) { - spi_imx->intctrl = mx31_intctrl; - spi_imx->config = mx31_config; - spi_imx->trigger = mx31_trigger; - spi_imx->rx_available = mx31_rx_available; - } else if (cpu_is_mx27() || cpu_is_mx21()) { - spi_imx->intctrl = mx27_intctrl; - spi_imx->config = mx27_config; - spi_imx->trigger = mx27_trigger; - spi_imx->rx_available = mx27_rx_available; - } else if (cpu_is_mx1()) { - spi_imx->intctrl = mx1_intctrl; - spi_imx->config = mx1_config; - spi_imx->trigger = mx1_trigger; - spi_imx->rx_available = mx1_rx_available; - } else - BUG(); - spi_imx->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(spi_imx->clk)) { dev_err(&pdev->dev, "unable to get clock\n"); @@ -595,15 +868,9 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) clk_enable(spi_imx->clk); spi_imx->spi_clk = clk_get_rate(spi_imx->clk); - if (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27()) - writel(1, spi_imx->base + MXC_RESET); - - /* drain receive buffer */ - if (cpu_is_mx25() || cpu_is_mx31() || cpu_is_mx35()) - while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR) - readl(spi_imx->base + MXC_CSPIRXDATA); + spi_imx->devtype_data.reset(spi_imx); - spi_imx->intctrl(spi_imx, 0); + spi_imx->devtype_data.intctrl(spi_imx, 0); ret = spi_bitbang_start(&spi_imx->bitbang); if (ret) { @@ -668,6 +935,7 @@ static struct platform_driver spi_imx_driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, + .id_table = spi_imx_devtype, .probe = spi_imx_probe, .remove = __devexit_p(spi_imx_remove), }; diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c index c3038da2648..795828b90f4 100644 --- a/drivers/spi/spi_s3c64xx.c +++ b/drivers/spi/spi_s3c64xx.c @@ -261,15 +261,25 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, chcfg |= S3C64XX_SPI_CH_TXCH_ON; if (dma_mode) { modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; - s3c2410_dma_config(sdd->tx_dmach, 1); + s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, xfer->tx_dma, xfer->len); s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); } else { - unsigned char *buf = (unsigned char *) xfer->tx_buf; - int i = 0; - while (i < xfer->len) - writeb(buf[i++], regs + S3C64XX_SPI_TX_DATA); + switch (sdd->cur_bpw) { + case 32: + iowrite32_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len / 4); + break; + case 16: + iowrite16_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len / 2); + break; + default: + iowrite8_rep(regs + S3C64XX_SPI_TX_DATA, + xfer->tx_buf, xfer->len); + break; + } } } @@ -286,7 +296,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | S3C64XX_SPI_PACKET_CNT_EN, regs + S3C64XX_SPI_PACKET_CNT); - s3c2410_dma_config(sdd->rx_dmach, 1); + s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, xfer->rx_dma, xfer->len); s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); @@ -366,20 +376,26 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd, return -EIO; } } else { - unsigned char *buf; - int i; - /* If it was only Tx */ if (xfer->rx_buf == NULL) { sdd->state &= ~TXBUSY; return 0; } - i = 0; - buf = xfer->rx_buf; - while (i < xfer->len) - buf[i++] = readb(regs + S3C64XX_SPI_RX_DATA); - + switch (sdd->cur_bpw) { + case 32: + ioread32_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len / 4); + break; + case 16: + ioread16_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len / 2); + break; + default: + ioread8_rep(regs + S3C64XX_SPI_RX_DATA, + xfer->rx_buf, xfer->len); + break; + } sdd->state &= ~RXBUSY; } @@ -399,13 +415,18 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd, static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) { + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; void __iomem *regs = sdd->regs; u32 val; /* Disable Clock */ - val = readl(regs + S3C64XX_SPI_CLK_CFG); - val &= ~S3C64XX_SPI_ENCLK_ENABLE; - writel(val, regs + S3C64XX_SPI_CLK_CFG); + if (sci->clk_from_cmu) { + clk_disable(sdd->src_clk); + } else { + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val &= ~S3C64XX_SPI_ENCLK_ENABLE; + writel(val, regs + S3C64XX_SPI_CLK_CFG); + } /* Set Polarity and Phase */ val = readl(regs + S3C64XX_SPI_CH_CFG); @@ -429,29 +450,39 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) switch (sdd->cur_bpw) { case 32: val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD; + val |= S3C64XX_SPI_MODE_CH_TSZ_WORD; break; case 16: val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD; + val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD; break; default: val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE; + val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; break; } - val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE; /* Always 8bits wide */ writel(val, regs + S3C64XX_SPI_MODE_CFG); - /* Configure Clock */ - val = readl(regs + S3C64XX_SPI_CLK_CFG); - val &= ~S3C64XX_SPI_PSR_MASK; - val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) - & S3C64XX_SPI_PSR_MASK); - writel(val, regs + S3C64XX_SPI_CLK_CFG); - - /* Enable Clock */ - val = readl(regs + S3C64XX_SPI_CLK_CFG); - val |= S3C64XX_SPI_ENCLK_ENABLE; - writel(val, regs + S3C64XX_SPI_CLK_CFG); + if (sci->clk_from_cmu) { + /* Configure Clock */ + /* There is half-multiplier before the SPI */ + clk_set_rate(sdd->src_clk, sdd->cur_speed * 2); + /* Enable Clock */ + clk_enable(sdd->src_clk); + } else { + /* Configure Clock */ + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val &= ~S3C64XX_SPI_PSR_MASK; + val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1) + & S3C64XX_SPI_PSR_MASK); + writel(val, regs + S3C64XX_SPI_CLK_CFG); + + /* Enable Clock */ + val = readl(regs + S3C64XX_SPI_CLK_CFG); + val |= S3C64XX_SPI_ENCLK_ENABLE; + writel(val, regs + S3C64XX_SPI_CLK_CFG); + } } static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, @@ -499,6 +530,7 @@ static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, struct spi_message *msg) { + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; struct device *dev = &sdd->pdev->dev; struct spi_transfer *xfer; @@ -514,6 +546,9 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, /* Map until end or first fail */ list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) + continue; + if (xfer->tx_buf != NULL) { xfer->tx_dma = dma_map_single(dev, (void *)xfer->tx_buf, xfer->len, @@ -545,6 +580,7 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, struct spi_message *msg) { + struct s3c64xx_spi_info *sci = sdd->cntrlr_info; struct device *dev = &sdd->pdev->dev; struct spi_transfer *xfer; @@ -553,6 +589,9 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd, list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1)) + continue; + if (xfer->rx_buf != NULL && xfer->rx_dma != XFER_DMAADDR_INVALID) dma_unmap_single(dev, xfer->rx_dma, @@ -608,6 +647,14 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd, bpw = xfer->bits_per_word ? : spi->bits_per_word; speed = xfer->speed_hz ? : spi->max_speed_hz; + if (xfer->len % (bpw / 8)) { + dev_err(&spi->dev, + "Xfer length(%u) not a multiple of word size(%u)\n", + xfer->len, bpw / 8); + status = -EIO; + goto out; + } + if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) { sdd->cur_bpw = bpw; sdd->cur_speed = speed; @@ -798,7 +845,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi) struct s3c64xx_spi_driver_data *sdd; struct s3c64xx_spi_info *sci; struct spi_message *msg; - u32 psr, speed; unsigned long flags; int err = 0; @@ -841,32 +887,37 @@ static int s3c64xx_spi_setup(struct spi_device *spi) } /* Check if we can provide the requested rate */ - speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); /* Max possible */ - - if (spi->max_speed_hz > speed) - spi->max_speed_hz = speed; - - psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; - psr &= S3C64XX_SPI_PSR_MASK; - if (psr == S3C64XX_SPI_PSR_MASK) - psr--; + if (!sci->clk_from_cmu) { + u32 psr, speed; + + /* Max possible */ + speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1); + + if (spi->max_speed_hz > speed) + spi->max_speed_hz = speed; + + psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1; + psr &= S3C64XX_SPI_PSR_MASK; + if (psr == S3C64XX_SPI_PSR_MASK) + psr--; + + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + if (spi->max_speed_hz < speed) { + if (psr+1 < S3C64XX_SPI_PSR_MASK) { + psr++; + } else { + err = -EINVAL; + goto setup_exit; + } + } - speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); - if (spi->max_speed_hz < speed) { - if (psr+1 < S3C64XX_SPI_PSR_MASK) { - psr++; - } else { + speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); + if (spi->max_speed_hz >= speed) + spi->max_speed_hz = speed; + else err = -EINVAL; - goto setup_exit; - } } - speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1); - if (spi->max_speed_hz >= speed) - spi->max_speed_hz = speed; - else - err = -EINVAL; - setup_exit: /* setup() returns with device de-selected */ @@ -888,7 +939,8 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) /* Disable Interrupts - we use Polling if not DMA mode */ writel(0, regs + S3C64XX_SPI_INT_EN); - writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, + if (!sci->clk_from_cmu) + writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT, regs + S3C64XX_SPI_CLK_CFG); writel(0, regs + S3C64XX_SPI_MODE_CFG); writel(0, regs + S3C64XX_SPI_PACKET_CNT); diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi_topcliff_pch.c new file mode 100644 index 00000000000..58e187f45ec --- /dev/null +++ b/drivers/spi/spi_topcliff_pch.c @@ -0,0 +1,1303 @@ +/* + * SPI bus driver for the Topcliff PCH used by Intel SoCs + * + * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/wait.h> +#include <linux/spi/spi.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/spi/spidev.h> +#include <linux/module.h> +#include <linux/device.h> + +/* Register offsets */ +#define PCH_SPCR 0x00 /* SPI control register */ +#define PCH_SPBRR 0x04 /* SPI baud rate register */ +#define PCH_SPSR 0x08 /* SPI status register */ +#define PCH_SPDWR 0x0C /* SPI write data register */ +#define PCH_SPDRR 0x10 /* SPI read data register */ +#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */ +#define PCH_SRST 0x1C /* SPI reset register */ + +#define PCH_SPSR_TFD 0x000007C0 +#define PCH_SPSR_RFD 0x0000F800 + +#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11) +#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6) + +#define PCH_RX_THOLD 7 +#define PCH_RX_THOLD_MAX 15 + +#define PCH_MAX_BAUDRATE 5000000 +#define PCH_MAX_FIFO_DEPTH 16 + +#define STATUS_RUNNING 1 +#define STATUS_EXITING 2 +#define PCH_SLEEP_TIME 10 + +#define PCH_ADDRESS_SIZE 0x20 + +#define SSN_LOW 0x02U +#define SSN_NO_CONTROL 0x00U +#define PCH_MAX_CS 0xFF +#define PCI_DEVICE_ID_GE_SPI 0x8816 + +#define SPCR_SPE_BIT (1 << 0) +#define SPCR_MSTR_BIT (1 << 1) +#define SPCR_LSBF_BIT (1 << 4) +#define SPCR_CPHA_BIT (1 << 5) +#define SPCR_CPOL_BIT (1 << 6) +#define SPCR_TFIE_BIT (1 << 8) +#define SPCR_RFIE_BIT (1 << 9) +#define SPCR_FIE_BIT (1 << 10) +#define SPCR_ORIE_BIT (1 << 11) +#define SPCR_MDFIE_BIT (1 << 12) +#define SPCR_FICLR_BIT (1 << 24) +#define SPSR_TFI_BIT (1 << 0) +#define SPSR_RFI_BIT (1 << 1) +#define SPSR_FI_BIT (1 << 2) +#define SPBRR_SIZE_BIT (1 << 10) + +#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT) + +#define SPCR_RFIC_FIELD 20 +#define SPCR_TFIC_FIELD 16 + +#define SPSR_INT_BITS 0x1F +#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1)) +#define MASK_RFIC_SPCR_BITS (~(0xf << 20)) +#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12)) + +#define PCH_CLOCK_HZ 50000000 +#define PCH_MAX_SPBR 1023 + + +/** + * struct pch_spi_data - Holds the SPI channel specific details + * @io_remap_addr: The remapped PCI base address + * @master: Pointer to the SPI master structure + * @work: Reference to work queue handler + * @wk: Workqueue for carrying out execution of the + * requests + * @wait: Wait queue for waking up upon receiving an + * interrupt. + * @transfer_complete: Status of SPI Transfer + * @bcurrent_msg_processing: Status flag for message processing + * @lock: Lock for protecting this structure + * @queue: SPI Message queue + * @status: Status of the SPI driver + * @bpw_len: Length of data to be transferred in bits per + * word + * @transfer_active: Flag showing active transfer + * @tx_index: Transmit data count; for bookkeeping during + * transfer + * @rx_index: Receive data count; for bookkeeping during + * transfer + * @tx_buff: Buffer for data to be transmitted + * @rx_index: Buffer for Received data + * @n_curnt_chip: The chip number that this SPI driver currently + * operates on + * @current_chip: Reference to the current chip that this SPI + * driver currently operates on + * @current_msg: The current message that this SPI driver is + * handling + * @cur_trans: The current transfer that this SPI driver is + * handling + * @board_dat: Reference to the SPI device data structure + */ +struct pch_spi_data { + void __iomem *io_remap_addr; + struct spi_master *master; + struct work_struct work; + struct workqueue_struct *wk; + wait_queue_head_t wait; + u8 transfer_complete; + u8 bcurrent_msg_processing; + spinlock_t lock; + struct list_head queue; + u8 status; + u32 bpw_len; + u8 transfer_active; + u32 tx_index; + u32 rx_index; + u16 *pkt_tx_buff; + u16 *pkt_rx_buff; + u8 n_curnt_chip; + struct spi_device *current_chip; + struct spi_message *current_msg; + struct spi_transfer *cur_trans; + struct pch_spi_board_data *board_dat; +}; + +/** + * struct pch_spi_board_data - Holds the SPI device specific details + * @pdev: Pointer to the PCI device + * @irq_reg_sts: Status of IRQ registration + * @pci_req_sts: Status of pci_request_regions + * @suspend_sts: Status of suspend + * @data: Pointer to SPI channel data structure + */ +struct pch_spi_board_data { + struct pci_dev *pdev; + u8 irq_reg_sts; + u8 pci_req_sts; + u8 suspend_sts; + struct pch_spi_data *data; +}; + +static struct pci_device_id pch_spi_pcidev_id[] = { + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)}, + {0,} +}; + +/** + * pch_spi_writereg() - Performs register writes + * @master: Pointer to struct spi_master. + * @idx: Register offset. + * @val: Value to be written to register. + */ +static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val) +{ + struct pch_spi_data *data = spi_master_get_devdata(master); + iowrite32(val, (data->io_remap_addr + idx)); +} + +/** + * pch_spi_readreg() - Performs register reads + * @master: Pointer to struct spi_master. + * @idx: Register offset. + */ +static inline u32 pch_spi_readreg(struct spi_master *master, int idx) +{ + struct pch_spi_data *data = spi_master_get_devdata(master); + return ioread32(data->io_remap_addr + idx); +} + +static inline void pch_spi_setclr_reg(struct spi_master *master, int idx, + u32 set, u32 clr) +{ + u32 tmp = pch_spi_readreg(master, idx); + tmp = (tmp & ~clr) | set; + pch_spi_writereg(master, idx, tmp); +} + +static void pch_spi_set_master_mode(struct spi_master *master) +{ + pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0); +} + +/** + * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs + * @master: Pointer to struct spi_master. + */ +static void pch_spi_clear_fifo(struct spi_master *master) +{ + pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0); + pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT); +} + +static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, + void __iomem *io_remap_addr) +{ + u32 n_read, tx_index, rx_index, bpw_len; + u16 *pkt_rx_buffer, *pkt_tx_buff; + int read_cnt; + u32 reg_spcr_val; + void __iomem *spsr; + void __iomem *spdrr; + void __iomem *spdwr; + + spsr = io_remap_addr + PCH_SPSR; + iowrite32(reg_spsr_val, spsr); + + if (data->transfer_active) { + rx_index = data->rx_index; + tx_index = data->tx_index; + bpw_len = data->bpw_len; + pkt_rx_buffer = data->pkt_rx_buff; + pkt_tx_buff = data->pkt_tx_buff; + + spdrr = io_remap_addr + PCH_SPDRR; + spdwr = io_remap_addr + PCH_SPDWR; + + n_read = PCH_READABLE(reg_spsr_val); + + for (read_cnt = 0; (read_cnt < n_read); read_cnt++) { + pkt_rx_buffer[rx_index++] = ioread32(spdrr); + if (tx_index < bpw_len) + iowrite32(pkt_tx_buff[tx_index++], spdwr); + } + + /* disable RFI if not needed */ + if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) { + reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR); + reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */ + + /* reset rx threshold */ + reg_spcr_val &= MASK_RFIC_SPCR_BITS; + reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD); + iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))), + (io_remap_addr + PCH_SPCR)); + } + + /* update counts */ + data->tx_index = tx_index; + data->rx_index = rx_index; + + } + + /* if transfer complete interrupt */ + if (reg_spsr_val & SPSR_FI_BIT) { + /* disable FI & RFI interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, + SPCR_FIE_BIT | SPCR_TFIE_BIT); + + /* transfer is completed;inform pch_spi_process_messages */ + data->transfer_complete = true; + wake_up(&data->wait); + } +} + +/** + * pch_spi_handler() - Interrupt handler + * @irq: The interrupt number. + * @dev_id: Pointer to struct pch_spi_board_data. + */ +static irqreturn_t pch_spi_handler(int irq, void *dev_id) +{ + u32 reg_spsr_val; + struct pch_spi_data *data; + void __iomem *spsr; + void __iomem *io_remap_addr; + irqreturn_t ret = IRQ_NONE; + struct pch_spi_board_data *board_dat = dev_id; + + if (board_dat->suspend_sts) { + dev_dbg(&board_dat->pdev->dev, + "%s returning due to suspend\n", __func__); + return IRQ_NONE; + } + + data = board_dat->data; + io_remap_addr = data->io_remap_addr; + spsr = io_remap_addr + PCH_SPSR; + + reg_spsr_val = ioread32(spsr); + + /* Check if the interrupt is for SPI device */ + if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { + pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr); + ret = IRQ_HANDLED; + } + + dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n", + __func__, ret); + + return ret; +} + +/** + * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR + * @master: Pointer to struct spi_master. + * @speed_hz: Baud rate. + */ +static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz) +{ + u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2); + + /* if baud rate is less than we can support limit it */ + if (n_spbr > PCH_MAX_SPBR) + n_spbr = PCH_MAX_SPBR; + + pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS); +} + +/** + * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR + * @master: Pointer to struct spi_master. + * @bits_per_word: Bits per word for SPI transfer. + */ +static void pch_spi_set_bits_per_word(struct spi_master *master, + u8 bits_per_word) +{ + if (bits_per_word == 8) + pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT); + else + pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0); +} + +/** + * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer + * @spi: Pointer to struct spi_device. + */ +static void pch_spi_setup_transfer(struct spi_device *spi) +{ + u32 flags = 0; + + dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n", + __func__, pch_spi_readreg(spi->master, PCH_SPBRR), + spi->max_speed_hz); + pch_spi_set_baud_rate(spi->master, spi->max_speed_hz); + + /* set bits per word */ + pch_spi_set_bits_per_word(spi->master, spi->bits_per_word); + + if (!(spi->mode & SPI_LSB_FIRST)) + flags |= SPCR_LSBF_BIT; + if (spi->mode & SPI_CPOL) + flags |= SPCR_CPOL_BIT; + if (spi->mode & SPI_CPHA) + flags |= SPCR_CPHA_BIT; + pch_spi_setclr_reg(spi->master, PCH_SPCR, flags, + (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT)); + + /* Clear the FIFO by toggling FICLR to 1 and back to 0 */ + pch_spi_clear_fifo(spi->master); +} + +/** + * pch_spi_reset() - Clears SPI registers + * @master: Pointer to struct spi_master. + */ +static void pch_spi_reset(struct spi_master *master) +{ + /* write 1 to reset SPI */ + pch_spi_writereg(master, PCH_SRST, 0x1); + + /* clear reset */ + pch_spi_writereg(master, PCH_SRST, 0x0); +} + +static int pch_spi_setup(struct spi_device *pspi) +{ + /* check bits per word */ + if (pspi->bits_per_word == 0) { + pspi->bits_per_word = 8; + dev_dbg(&pspi->dev, "%s 8 bits per word\n", __func__); + } + + if ((pspi->bits_per_word != 8) && (pspi->bits_per_word != 16)) { + dev_err(&pspi->dev, "%s Invalid bits per word\n", __func__); + return -EINVAL; + } + + /* Check baud rate setting */ + /* if baud rate of chip is greater than + max we can support,return error */ + if ((pspi->max_speed_hz) > PCH_MAX_BAUDRATE) + pspi->max_speed_hz = PCH_MAX_BAUDRATE; + + dev_dbg(&pspi->dev, "%s MODE = %x\n", __func__, + (pspi->mode) & (SPI_CPOL | SPI_CPHA)); + + return 0; +} + +static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) +{ + + struct spi_transfer *transfer; + struct pch_spi_data *data = spi_master_get_devdata(pspi->master); + int retval; + unsigned long flags; + + /* validate spi message and baud rate */ + if (unlikely(list_empty(&pmsg->transfers) == 1)) { + dev_err(&pspi->dev, "%s list empty\n", __func__); + retval = -EINVAL; + goto err_out; + } + + if (unlikely(pspi->max_speed_hz == 0)) { + dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n", + __func__, pspi->max_speed_hz); + retval = -EINVAL; + goto err_out; + } + + dev_dbg(&pspi->dev, "%s Transfer List not empty. " + "Transfer Speed is set.\n", __func__); + + /* validate Tx/Rx buffers and Transfer length */ + list_for_each_entry(transfer, &pmsg->transfers, transfer_list) { + if (!transfer->tx_buf && !transfer->rx_buf) { + dev_err(&pspi->dev, + "%s Tx and Rx buffer NULL\n", __func__); + retval = -EINVAL; + goto err_out; + } + + if (!transfer->len) { + dev_err(&pspi->dev, "%s Transfer length invalid\n", + __func__); + retval = -EINVAL; + goto err_out; + } + + dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length" + " valid\n", __func__); + + /* if baud rate hs been specified validate the same */ + if (transfer->speed_hz > PCH_MAX_BAUDRATE) + transfer->speed_hz = PCH_MAX_BAUDRATE; + + /* if bits per word has been specified validate the same */ + if (transfer->bits_per_word) { + if ((transfer->bits_per_word != 8) + && (transfer->bits_per_word != 16)) { + retval = -EINVAL; + dev_err(&pspi->dev, + "%s Invalid bits per word\n", __func__); + goto err_out; + } + } + } + + spin_lock_irqsave(&data->lock, flags); + + /* We won't process any messages if we have been asked to terminate */ + if (data->status == STATUS_EXITING) { + dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__); + retval = -ESHUTDOWN; + goto err_return_spinlock; + } + + /* If suspended ,return -EINVAL */ + if (data->board_dat->suspend_sts) { + dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__); + retval = -EINVAL; + goto err_return_spinlock; + } + + /* set status of message */ + pmsg->actual_length = 0; + dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status); + + pmsg->status = -EINPROGRESS; + + /* add message to queue */ + list_add_tail(&pmsg->queue, &data->queue); + dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__); + + /* schedule work queue to run */ + queue_work(data->wk, &data->work); + dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__); + + retval = 0; + +err_return_spinlock: + spin_unlock_irqrestore(&data->lock, flags); +err_out: + dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval); + return retval; +} + +static inline void pch_spi_select_chip(struct pch_spi_data *data, + struct spi_device *pspi) +{ + if (data->current_chip != NULL) { + if (pspi->chip_select != data->n_curnt_chip) { + dev_dbg(&pspi->dev, "%s : different slave\n", __func__); + data->current_chip = NULL; + } + } + + data->current_chip = pspi; + + data->n_curnt_chip = data->current_chip->chip_select; + + dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__); + pch_spi_setup_transfer(pspi); +} + +static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw, + struct spi_message **ppmsg) +{ + int size; + u32 n_writes; + int j; + struct spi_message *pmsg; + const u8 *tx_buf; + const u16 *tx_sbuf; + + pmsg = *ppmsg; + + /* set baud rate if needed */ + if (data->cur_trans->speed_hz) { + dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__); + pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz); + } + + /* set bits per word if needed */ + if (data->cur_trans->bits_per_word && + (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) { + dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__); + pch_spi_set_bits_per_word(data->master, + data->cur_trans->bits_per_word); + *bpw = data->cur_trans->bits_per_word; + } else { + *bpw = data->current_msg->spi->bits_per_word; + } + + /* reset Tx/Rx index */ + data->tx_index = 0; + data->rx_index = 0; + + data->bpw_len = data->cur_trans->len / (*bpw / 8); + + /* find alloc size */ + size = data->cur_trans->len * sizeof(*data->pkt_tx_buff); + + /* allocate memory for pkt_tx_buff & pkt_rx_buffer */ + data->pkt_tx_buff = kzalloc(size, GFP_KERNEL); + if (data->pkt_tx_buff != NULL) { + data->pkt_rx_buff = kzalloc(size, GFP_KERNEL); + if (!data->pkt_rx_buff) + kfree(data->pkt_tx_buff); + } + + if (!data->pkt_rx_buff) { + /* flush queue and set status of all transfers to -ENOMEM */ + dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__); + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -ENOMEM; + + if (pmsg->complete != 0) + pmsg->complete(pmsg->context); + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + return; + } + + /* copy Tx Data */ + if (data->cur_trans->tx_buf != NULL) { + if (*bpw == 8) { + tx_buf = data->cur_trans->tx_buf; + for (j = 0; j < data->bpw_len; j++) + data->pkt_tx_buff[j] = *tx_buf++; + } else { + tx_sbuf = data->cur_trans->tx_buf; + for (j = 0; j < data->bpw_len; j++) + data->pkt_tx_buff[j] = *tx_sbuf++; + } + } + + /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */ + n_writes = data->bpw_len; + if (n_writes > PCH_MAX_FIFO_DEPTH) + n_writes = PCH_MAX_FIFO_DEPTH; + + dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing " + "0x2 to SSNXCR\n", __func__); + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW); + + for (j = 0; j < n_writes; j++) + pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]); + + /* update tx_index */ + data->tx_index = j; + + /* reset transfer complete flag */ + data->transfer_complete = false; + data->transfer_active = true; +} + + +static void pch_spi_nomore_transfer(struct pch_spi_data *data, + struct spi_message *pmsg) +{ + dev_dbg(&data->master->dev, "%s called\n", __func__); + /* Invoke complete callback + * [To the spi core..indicating end of transfer] */ + data->current_msg->status = 0; + + if (data->current_msg->complete != 0) { + dev_dbg(&data->master->dev, + "%s:Invoking callback of SPI core\n", __func__); + data->current_msg->complete(data->current_msg->context); + } + + /* update status in global variable */ + data->bcurrent_msg_processing = false; + + dev_dbg(&data->master->dev, + "%s:data->bcurrent_msg_processing = false\n", __func__); + + data->current_msg = NULL; + data->cur_trans = NULL; + + /* check if we have items in list and not suspending + * return 1 if list empty */ + if ((list_empty(&data->queue) == 0) && + (!data->board_dat->suspend_sts) && + (data->status != STATUS_EXITING)) { + /* We have some more work to do (either there is more tranint + * bpw;sfer requests in the current message or there are + *more messages) + */ + dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__); + queue_work(data->wk, &data->work); + } else if (data->board_dat->suspend_sts || + data->status == STATUS_EXITING) { + dev_dbg(&data->master->dev, + "%s suspend/remove initiated, flushing queue\n", + __func__); + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -EIO; + + if (pmsg->complete) + pmsg->complete(pmsg->context); + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + } +} + +static void pch_spi_set_ir(struct pch_spi_data *data) +{ + /* enable interrupts */ + if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) { + /* set receive threhold to PCH_RX_THOLD */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + PCH_RX_THOLD << SPCR_TFIC_FIELD, + ~MASK_TFIC_SPCR_BITS); + /* enable FI and RFI interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + SPCR_RFIE_BIT | SPCR_TFIE_BIT, 0); + } else { + /* set receive threhold to maximum */ + pch_spi_setclr_reg(data->master, PCH_SPCR, + PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD, + ~MASK_TFIC_SPCR_BITS); + /* enable FI interrupt */ + pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0); + } + + dev_dbg(&data->master->dev, + "%s:invoking pch_spi_set_enable to enable SPI\n", __func__); + + /* SPI set enable */ + pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0); + + /* Wait until the transfer completes; go to sleep after + initiating the transfer. */ + dev_dbg(&data->master->dev, + "%s:waiting for transfer to get over\n", __func__); + + wait_event_interruptible(data->wait, data->transfer_complete); + + pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); + dev_dbg(&data->master->dev, + "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); + + data->transfer_active = false; + dev_dbg(&data->master->dev, + "%s set data->transfer_active = false\n", __func__); + + /* clear all interrupts */ + pch_spi_writereg(data->master, PCH_SPSR, + pch_spi_readreg(data->master, PCH_SPSR)); + /* disable interrupts */ + pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); +} + +static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw) +{ + int j; + u8 *rx_buf; + u16 *rx_sbuf; + + /* copy Rx Data */ + if (!data->cur_trans->rx_buf) + return; + + if (bpw == 8) { + rx_buf = data->cur_trans->rx_buf; + for (j = 0; j < data->bpw_len; j++) + *rx_buf++ = data->pkt_rx_buff[j] & 0xFF; + } else { + rx_sbuf = data->cur_trans->rx_buf; + for (j = 0; j < data->bpw_len; j++) + *rx_sbuf++ = data->pkt_rx_buff[j]; + } +} + + +static void pch_spi_process_messages(struct work_struct *pwork) +{ + struct spi_message *pmsg; + struct pch_spi_data *data; + int bpw; + + data = container_of(pwork, struct pch_spi_data, work); + dev_dbg(&data->master->dev, "%s data initialized\n", __func__); + + spin_lock(&data->lock); + + /* check if suspend has been initiated;if yes flush queue */ + if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { + dev_dbg(&data->master->dev, + "%s suspend/remove initiated,flushing queue\n", + __func__); + + list_for_each_entry(pmsg, data->queue.next, queue) { + pmsg->status = -EIO; + + if (pmsg->complete != 0) { + spin_unlock(&data->lock); + pmsg->complete(pmsg->context); + spin_lock(&data->lock); + } + + /* delete from queue */ + list_del_init(&pmsg->queue); + } + + spin_unlock(&data->lock); + return; + } + + data->bcurrent_msg_processing = true; + dev_dbg(&data->master->dev, + "%s Set data->bcurrent_msg_processing= true\n", __func__); + + /* Get the message from the queue and delete it from there. */ + data->current_msg = list_entry(data->queue.next, struct spi_message, + queue); + + list_del_init(&data->current_msg->queue); + + data->current_msg->status = 0; + + pch_spi_select_chip(data, data->current_msg->spi); + + spin_unlock(&data->lock); + + do { + /* If we are already processing a message get the next + transfer structure from the message otherwise retrieve + the 1st transfer request from the message. */ + spin_lock(&data->lock); + + if (data->cur_trans == NULL) { + data->cur_trans = + list_entry(data->current_msg->transfers. + next, struct spi_transfer, + transfer_list); + dev_dbg(&data->master->dev, + "%s :Getting 1st transfer message\n", __func__); + } else { + data->cur_trans = + list_entry(data->cur_trans->transfer_list.next, + struct spi_transfer, + transfer_list); + dev_dbg(&data->master->dev, + "%s :Getting next transfer message\n", + __func__); + } + + spin_unlock(&data->lock); + + pch_spi_set_tx(data, &bpw, &pmsg); + + /* Control interrupt*/ + pch_spi_set_ir(data); + + /* Disable SPI transfer */ + pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0, + SPCR_SPE_BIT); + + /* clear FIFO */ + pch_spi_clear_fifo(data->master); + + /* copy Rx Data */ + pch_spi_copy_rx_data(data, bpw); + + /* free memory */ + kfree(data->pkt_rx_buff); + data->pkt_rx_buff = NULL; + + kfree(data->pkt_tx_buff); + data->pkt_tx_buff = NULL; + + /* increment message count */ + data->current_msg->actual_length += data->cur_trans->len; + + dev_dbg(&data->master->dev, + "%s:data->current_msg->actual_length=%d\n", + __func__, data->current_msg->actual_length); + + /* check for delay */ + if (data->cur_trans->delay_usecs) { + dev_dbg(&data->master->dev, "%s:" + "delay in usec=%d\n", __func__, + data->cur_trans->delay_usecs); + udelay(data->cur_trans->delay_usecs); + } + + spin_lock(&data->lock); + + /* No more transfer in this message. */ + if ((data->cur_trans->transfer_list.next) == + &(data->current_msg->transfers)) { + pch_spi_nomore_transfer(data, pmsg); + } + + spin_unlock(&data->lock); + + } while (data->cur_trans != NULL); +} + +static void pch_spi_free_resources(struct pch_spi_board_data *board_dat) +{ + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); + + /* free workqueue */ + if (board_dat->data->wk != NULL) { + destroy_workqueue(board_dat->data->wk); + board_dat->data->wk = NULL; + dev_dbg(&board_dat->pdev->dev, + "%s destroy_workqueue invoked successfully\n", + __func__); + } + + /* disable interrupts & free IRQ */ + if (board_dat->irq_reg_sts) { + /* disable interrupts */ + pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, + PCH_ALL); + + /* free IRQ */ + free_irq(board_dat->pdev->irq, board_dat); + + dev_dbg(&board_dat->pdev->dev, + "%s free_irq invoked successfully\n", __func__); + + board_dat->irq_reg_sts = false; + } + + /* unmap PCI base address */ + if (board_dat->data->io_remap_addr != 0) { + pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr); + + board_dat->data->io_remap_addr = 0; + + dev_dbg(&board_dat->pdev->dev, + "%s pci_iounmap invoked successfully\n", __func__); + } + + /* release PCI region */ + if (board_dat->pci_req_sts) { + pci_release_regions(board_dat->pdev); + dev_dbg(&board_dat->pdev->dev, + "%s pci_release_regions invoked successfully\n", + __func__); + board_dat->pci_req_sts = false; + } +} + +static int pch_spi_get_resources(struct pch_spi_board_data *board_dat) +{ + void __iomem *io_remap_addr; + int retval; + dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__); + + /* create workqueue */ + board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME); + if (!board_dat->data->wk) { + dev_err(&board_dat->pdev->dev, + "%s create_singlet hread_workqueue failed\n", __func__); + retval = -EBUSY; + goto err_return; + } + + dev_dbg(&board_dat->pdev->dev, + "%s create_singlethread_workqueue success\n", __func__); + + retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME); + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s request_region failed\n", __func__); + goto err_return; + } + + board_dat->pci_req_sts = true; + + io_remap_addr = pci_iomap(board_dat->pdev, 1, 0); + if (io_remap_addr == 0) { + dev_err(&board_dat->pdev->dev, + "%s pci_iomap failed\n", __func__); + retval = -ENOMEM; + goto err_return; + } + + /* calculate base address for all channels */ + board_dat->data->io_remap_addr = io_remap_addr; + + /* reset PCH SPI h/w */ + pch_spi_reset(board_dat->data->master); + dev_dbg(&board_dat->pdev->dev, + "%s pch_spi_reset invoked successfully\n", __func__); + + /* register IRQ */ + retval = request_irq(board_dat->pdev->irq, pch_spi_handler, + IRQF_SHARED, KBUILD_MODNAME, board_dat); + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s request_irq failed\n", __func__); + goto err_return; + } + + dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n", + __func__, retval); + + board_dat->irq_reg_sts = true; + dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__); + +err_return: + if (retval != 0) { + dev_err(&board_dat->pdev->dev, + "%s FAIL:invoking pch_spi_free_resources\n", __func__); + pch_spi_free_resources(board_dat); + } + + dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval); + + return retval; +} + +static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + + struct spi_master *master; + + struct pch_spi_board_data *board_dat; + int retval; + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + /* allocate memory for private data */ + board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); + if (board_dat == NULL) { + dev_err(&pdev->dev, + " %s memory allocation for private data failed\n", + __func__); + retval = -ENOMEM; + goto err_kmalloc; + } + + dev_dbg(&pdev->dev, + "%s memory allocation for private data success\n", __func__); + + /* enable PCI device */ + retval = pci_enable_device(pdev); + if (retval != 0) { + dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__); + + goto err_pci_en_device; + } + + dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n", + __func__, retval); + + board_dat->pdev = pdev; + + /* alllocate memory for SPI master */ + master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data)); + if (master == NULL) { + retval = -ENOMEM; + dev_err(&pdev->dev, "%s Fail.\n", __func__); + goto err_spi_alloc_master; + } + + dev_dbg(&pdev->dev, + "%s spi_alloc_master returned non NULL\n", __func__); + + /* initialize members of SPI master */ + master->bus_num = -1; + master->num_chipselect = PCH_MAX_CS; + master->setup = pch_spi_setup; + master->transfer = pch_spi_transfer; + dev_dbg(&pdev->dev, + "%s transfer member of SPI master initialized\n", __func__); + + board_dat->data = spi_master_get_devdata(master); + + board_dat->data->master = master; + board_dat->data->n_curnt_chip = 255; + board_dat->data->board_dat = board_dat; + board_dat->data->status = STATUS_RUNNING; + + INIT_LIST_HEAD(&board_dat->data->queue); + spin_lock_init(&board_dat->data->lock); + INIT_WORK(&board_dat->data->work, pch_spi_process_messages); + init_waitqueue_head(&board_dat->data->wait); + + /* allocate resources for PCH SPI */ + retval = pch_spi_get_resources(board_dat); + if (retval) { + dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval); + goto err_spi_get_resources; + } + + dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n", + __func__, retval); + + /* save private data in dev */ + pci_set_drvdata(pdev, board_dat); + dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__); + + /* set master mode */ + pch_spi_set_master_mode(master); + dev_dbg(&pdev->dev, + "%s invoked pch_spi_set_master_mode\n", __func__); + + /* Register the controller with the SPI core. */ + retval = spi_register_master(master); + if (retval != 0) { + dev_err(&pdev->dev, + "%s spi_register_master FAILED\n", __func__); + goto err_spi_reg_master; + } + + dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n", + __func__, retval); + + + return 0; + +err_spi_reg_master: + spi_unregister_master(master); +err_spi_get_resources: +err_spi_alloc_master: + spi_master_put(master); + pci_disable_device(pdev); +err_pci_en_device: + kfree(board_dat); +err_kmalloc: + return retval; +} + +static void pch_spi_remove(struct pci_dev *pdev) +{ + struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); + int count; + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board_dat) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return; + } + + /* check for any pending messages; no action is taken if the queue + * is still full; but at least we tried. Unload anyway */ + count = 500; + spin_lock(&board_dat->data->lock); + board_dat->data->status = STATUS_EXITING; + while ((list_empty(&board_dat->data->queue) == 0) && --count) { + dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n", + __func__); + spin_unlock(&board_dat->data->lock); + msleep(PCH_SLEEP_TIME); + spin_lock(&board_dat->data->lock); + } + spin_unlock(&board_dat->data->lock); + + /* Free resources allocated for PCH SPI */ + pch_spi_free_resources(board_dat); + + spi_unregister_master(board_dat->data->master); + + /* free memory for private data */ + kfree(board_dat); + + pci_set_drvdata(pdev, NULL); + + /* disable PCI device */ + pci_disable_device(pdev); + + dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__); +} + +#ifdef CONFIG_PM +static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + u8 count; + int retval; + + struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board_dat) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return -EFAULT; + } + + retval = 0; + board_dat->suspend_sts = true; + + /* check if the current message is processed: + Only after thats done the transfer will be suspended */ + count = 255; + while ((--count) > 0) { + if (!(board_dat->data->bcurrent_msg_processing)) { + dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_" + "msg_processing = false\n", __func__); + break; + } else { + dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_" + "processing = true\n", __func__); + } + msleep(PCH_SLEEP_TIME); + } + + /* Free IRQ */ + if (board_dat->irq_reg_sts) { + /* disable all interrupts */ + pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0, + PCH_ALL); + pch_spi_reset(board_dat->data->master); + + free_irq(board_dat->pdev->irq, board_dat); + + board_dat->irq_reg_sts = false; + dev_dbg(&pdev->dev, + "%s free_irq invoked successfully.\n", __func__); + } + + /* save config space */ + retval = pci_save_state(pdev); + + if (retval == 0) { + dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n", + __func__, retval); + /* disable PM notifications */ + pci_enable_wake(pdev, PCI_D3hot, 0); + dev_dbg(&pdev->dev, + "%s pci_enable_wake invoked successfully\n", __func__); + /* disable PCI device */ + pci_disable_device(pdev); + dev_dbg(&pdev->dev, + "%s pci_disable_device invoked successfully\n", + __func__); + /* move device to D3hot state */ + pci_set_power_state(pdev, PCI_D3hot); + dev_dbg(&pdev->dev, + "%s pci_set_power_state invoked successfully\n", + __func__); + } else { + dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__); + } + + dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval); + + return retval; +} + +static int pch_spi_resume(struct pci_dev *pdev) +{ + int retval; + + struct pch_spi_board_data *board = pci_get_drvdata(pdev); + dev_dbg(&pdev->dev, "%s ENTRY\n", __func__); + + if (!board) { + dev_err(&pdev->dev, + "%s pci_get_drvdata returned NULL\n", __func__); + return -EFAULT; + } + + /* move device to DO power state */ + pci_set_power_state(pdev, PCI_D0); + + /* restore state */ + pci_restore_state(pdev); + + retval = pci_enable_device(pdev); + if (retval < 0) { + dev_err(&pdev->dev, + "%s pci_enable_device failed\n", __func__); + } else { + /* disable PM notifications */ + pci_enable_wake(pdev, PCI_D3hot, 0); + + /* register IRQ handler */ + if (!board->irq_reg_sts) { + /* register IRQ */ + retval = request_irq(board->pdev->irq, pch_spi_handler, + IRQF_SHARED, KBUILD_MODNAME, + board); + if (retval < 0) { + dev_err(&pdev->dev, + "%s request_irq failed\n", __func__); + return retval; + } + board->irq_reg_sts = true; + + /* reset PCH SPI h/w */ + pch_spi_reset(board->data->master); + pch_spi_set_master_mode(board->data->master); + + /* set suspend status to false */ + board->suspend_sts = false; + + } + } + + dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval); + + return retval; +} +#else +#define pch_spi_suspend NULL +#define pch_spi_resume NULL + +#endif + +static struct pci_driver pch_spi_pcidev = { + .name = "pch_spi", + .id_table = pch_spi_pcidev_id, + .probe = pch_spi_probe, + .remove = pch_spi_remove, + .suspend = pch_spi_suspend, + .resume = pch_spi_resume, +}; + +static int __init pch_spi_init(void) +{ + return pci_register_driver(&pch_spi_pcidev); +} +module_init(pch_spi_init); + +static void __exit pch_spi_exit(void) +{ + pci_unregister_driver(&pch_spi_pcidev); +} +module_exit(pch_spi_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver"); diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index ea1bec3c9a1..4e6245e6799 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -545,6 +545,7 @@ static const struct file_operations spidev_fops = { .unlocked_ioctl = spidev_ioctl, .open = spidev_open, .release = spidev_release, + .llseek = no_llseek, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index 7892ac16352..c68b3dc19e1 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -20,7 +20,6 @@ #include <linux/mmc/sdio_func.h> #include <linux/slab.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c index 526682d68de..c7345dbf43f 100644 --- a/drivers/ssb/pcmcia.c +++ b/drivers/ssb/pcmcia.c @@ -13,7 +13,6 @@ #include <linux/io.h> #include <linux/etherdevice.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c index 9738cad4ba1..ee079ab9fb2 100644 --- a/drivers/ssb/scan.c +++ b/drivers/ssb/scan.c @@ -17,7 +17,6 @@ #include <linux/pci.h> #include <linux/io.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c index baa8b05b9e8..6e973a79aa2 100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c @@ -30,7 +30,6 @@ #include "hash.h" #include <linux/if_arp.h> -#include <linux/netfilter_bridge.h> #define MIN(x, y) ((x) < (y) ? (x) : (y)) @@ -431,11 +430,6 @@ out: return NOTIFY_DONE; } -static int batman_skb_recv_finish(struct sk_buff *skb) -{ - return NF_ACCEPT; -} - /* receive a packet with the batman ethertype coming on a hard * interface */ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, @@ -456,13 +450,6 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, if (atomic_read(&module_state) != MODULE_ACTIVE) goto err_free; - /* if netfilter/ebtables wants to block incoming batman - * packets then give them a chance to do so here */ - ret = NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, dev, NULL, - batman_skb_recv_finish); - if (ret != 1) - goto err_out; - /* packet should hold at least type and version */ if (unlikely(skb_headlen(skb) < 2)) goto err_free; diff --git a/drivers/staging/batman-adv/send.c b/drivers/staging/batman-adv/send.c index 055edee7b4e..da3c82e47bb 100644 --- a/drivers/staging/batman-adv/send.c +++ b/drivers/staging/batman-adv/send.c @@ -29,7 +29,6 @@ #include "vis.h" #include "aggregation.h" -#include <linux/netfilter_bridge.h> static void send_outstanding_bcast_packet(struct work_struct *work); @@ -92,12 +91,9 @@ int send_skb_packet(struct sk_buff *skb, /* dev_queue_xmit() returns a negative result on error. However on * congestion and traffic shaping, it drops and returns NET_XMIT_DROP - * (which is > 0). This will not be treated as an error. - * Also, if netfilter/ebtables wants to block outgoing batman - * packets then giving them a chance to do so here */ + * (which is > 0). This will not be treated as an error. */ - return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, - dev_queue_xmit); + return dev_queue_xmit(skb); send_skb_err: kfree_skb(skb); return NET_XMIT_DROP; diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index 14091313ceb..fecb89e8c66 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -1922,6 +1922,7 @@ const struct file_operations comedi_fops = { .mmap = comedi_mmap, .poll = comedi_poll, .fasync = comedi_fasync, + .llseek = noop_llseek, }; struct class *comedi_class; diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c index f8ede1182cc..0345b4caba7 100644 --- a/drivers/staging/comedi/drivers/cb_das16_cs.c +++ b/drivers/staging/comedi/drivers/cb_das16_cs.c @@ -37,7 +37,6 @@ Status: experimental #include <linux/delay.h> #include <linux/pci.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -692,10 +691,6 @@ static int das16cs_pcmcia_attach(struct pcmcia_device *link) local->link = link; link->priv = local; - /* Initialize the pcmcia_device structure */ - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - cur_dev = link; das16cs_pcmcia_config(link); @@ -715,37 +710,12 @@ static void das16cs_pcmcia_detach(struct pcmcia_device *link) static int das16cs_pcmcia_config_loop(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data) { - if (cfg->index == 0) + if (p_dev->config_index == 0) return -EINVAL; - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - /* This reserves IO space but doesn't actually enable it */ - return pcmcia_request_io(p_dev); - } - - return 0; + return pcmcia_request_io(p_dev); } static void das16cs_pcmcia_config(struct pcmcia_device *link) @@ -754,6 +724,9 @@ static void das16cs_pcmcia_config(struct pcmcia_device *link) dev_dbg(&link->dev, "das16cs_pcmcia_config\n"); + /* Do we need to allocate an interrupt? */ + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, das16cs_pcmcia_config_loop, NULL); if (ret) { dev_warn(&link->dev, "no configuration found\n"); @@ -763,25 +736,10 @@ static void das16cs_pcmcia_config(struct pcmcia_device *link) if (!link->irq) goto failed; - /* - This actually configures the PCMCIA socket -- setting up - the I/O windows and the interrupt mapping, and putting the - card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; - /* Finally, report what we've done */ - dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %u", link->irq); - if (link->resource[0]) - printk(", io %pR", link->resource[0]); - if (link->resource[1]) - printk(", io %pR", link->resource[1]); - printk("\n"); - return; failed: @@ -832,9 +790,7 @@ struct pcmcia_driver das16cs_driver = { .resume = das16cs_pcmcia_resume, .id_table = das16cs_id_table, .owner = THIS_MODULE, - .drv = { - .name = "cb_das16_cs", - }, + .name = "cb_das16_cs", }; static int __init init_das16cs_pcmcia_cs(void) diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c index 48d9fb1227d..0b32a2df776 100644 --- a/drivers/staging/comedi/drivers/das08_cs.c +++ b/drivers/staging/comedi/drivers/das08_cs.c @@ -48,7 +48,6 @@ Command support does not exist, but could be added for this board. #include "das08.h" /* pcmcia includes */ -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -115,40 +114,15 @@ static void das08_pcmcia_release(struct pcmcia_device *link); static int das08_pcmcia_suspend(struct pcmcia_device *p_dev); static int das08_pcmcia_resume(struct pcmcia_device *p_dev); -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static int das08_pcmcia_attach(struct pcmcia_device *); static void das08_pcmcia_detach(struct pcmcia_device *); -/* - You'll also need to prototype all the functions that will actually - be used to talk to your device. See 'memory_cs' for a good example - of a fully self-sufficient driver; the other drivers rely more or - less on other parts of the kernel. -*/ - struct local_info_t { struct pcmcia_device *link; int stop; struct bus_operations *bus; }; -/*====================================================================== - - das08_pcmcia_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - -======================================================================*/ - static int das08_pcmcia_attach(struct pcmcia_device *link) { struct local_info_t *local; @@ -162,16 +136,6 @@ static int das08_pcmcia_attach(struct pcmcia_device *link) local->link = link; link->priv = local; - /* - General socket configuration defaults can go here. In this - client, we assume very little, and rely on the CIS for almost - everything. In most clients, many details (i.e., number, sizes, - and attributes of IO windows) are fixed by the nature of the - device, and can be hard-wired here. - */ - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - cur_dev = link; das08_pcmcia_config(link); @@ -179,15 +143,6 @@ static int das08_pcmcia_attach(struct pcmcia_device *link) return 0; } /* das08_pcmcia_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void das08_pcmcia_detach(struct pcmcia_device *link) { @@ -203,46 +158,13 @@ static void das08_pcmcia_detach(struct pcmcia_device *link) static int das08_pcmcia_config_loop(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data) { - if (cfg->index == 0) - return -ENODEV; - - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - /* This reserves IO space but doesn't actually enable it */ - return pcmcia_request_io(p_dev); - } - return 0; -} - - -/*====================================================================== - - das08_pcmcia_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. + if (p_dev->config_index == 0) + return -EINVAL; -======================================================================*/ + return pcmcia_request_io(p_dev); +} static void das08_pcmcia_config(struct pcmcia_device *link) { @@ -250,6 +172,8 @@ static void das08_pcmcia_config(struct pcmcia_device *link) dev_dbg(&link->dev, "das08_pcmcia_config\n"); + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, das08_pcmcia_config_loop, NULL); if (ret) { dev_warn(&link->dev, "no configuration found\n"); @@ -259,25 +183,10 @@ static void das08_pcmcia_config(struct pcmcia_device *link) if (!link->irq) goto failed; - /* - This actually configures the PCMCIA socket -- setting up - the I/O windows and the interrupt mapping, and putting the - card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; - /* Finally, report what we've done */ - dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %u", link->irq); - if (link->resource[0]) - printk(", io %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - printk("\n"); - return; failed: @@ -285,32 +194,12 @@ failed: } /* das08_pcmcia_config */ -/*====================================================================== - - After a card is removed, das08_pcmcia_release() will unregister the - device, and release the PCMCIA configuration. If the device is - still open, this will be postponed until it is closed. - -======================================================================*/ - static void das08_pcmcia_release(struct pcmcia_device *link) { dev_dbg(&link->dev, "das08_pcmcia_release\n"); pcmcia_disable_device(link); } /* das08_pcmcia_release */ -/*====================================================================== - - The card status event handler. Mostly, this schedules other - stuff to run after an event is received. - - When a CARD_REMOVAL event is received, we immediately set a - private flag to block future accesses to this device. All the - functions that actually access the device should check this flag - to make sure the card is still present. - -======================================================================*/ - static int das08_pcmcia_suspend(struct pcmcia_device *link) { struct local_info_t *local = link->priv; @@ -348,9 +237,7 @@ struct pcmcia_driver das08_cs_driver = { .resume = das08_pcmcia_resume, .id_table = das08_cs_id_table, .owner = THIS_MODULE, - .drv = { - .name = "pcm-das08", - }, + .name = "pcm-das08", }; static int __init init_das08_pcmcia_cs(void) diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c index cc15666e5cc..6b7372eed90 100644 --- a/drivers/staging/comedi/drivers/ni_daq_700.c +++ b/drivers/staging/comedi/drivers/ni_daq_700.c @@ -47,7 +47,6 @@ IRQ is assigned but not used. #include <linux/ioport.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -435,47 +434,20 @@ static int dio700_detach(struct comedi_device *dev) return 0; }; -/* PCMCIA crap -- watch your words, please! */ - static void dio700_config(struct pcmcia_device *link); static void dio700_release(struct pcmcia_device *link); static int dio700_cs_suspend(struct pcmcia_device *p_dev); static int dio700_cs_resume(struct pcmcia_device *p_dev); -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static int dio700_cs_attach(struct pcmcia_device *); static void dio700_cs_detach(struct pcmcia_device *); -/* - You'll also need to prototype all the functions that will actually - be used to talk to your device. See 'memory_cs' for a good example - of a fully self-sufficient driver; the other drivers rely more or - less on other parts of the kernel. -*/ - struct local_info_t { struct pcmcia_device *link; int stop; struct bus_operations *bus; }; -/*====================================================================== - - dio700_cs_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - -======================================================================*/ - static int dio700_cs_attach(struct pcmcia_device *link) { struct local_info_t *local; @@ -491,16 +463,6 @@ static int dio700_cs_attach(struct pcmcia_device *link) local->link = link; link->priv = local; - /* - General socket configuration defaults can go here. In this - client, we assume very little, and rely on the CIS for almost - everything. In most clients, many details (i.e., number, sizes, - and attributes of IO windows) are fixed by the nature of the - device, and can be hard-wired here. - */ - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - pcmcia_cur_dev = link; dio700_config(link); @@ -508,15 +470,6 @@ static int dio700_cs_attach(struct pcmcia_device *link) return 0; } /* dio700_cs_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void dio700_cs_detach(struct pcmcia_device *link) { @@ -532,65 +485,26 @@ static void dio700_cs_detach(struct pcmcia_device *link) } /* dio700_cs_detach */ -/*====================================================================== - - dio700_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. - -======================================================================*/ - static int dio700_pcmcia_config_loop(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data) { - if (cfg->index == 0) - return -ENODEV; - - /* Does this card need audio output? */ - if (cfg->flags & CISTPL_CFTABLE_AUDIO) { - p_dev->conf.Attributes |= CONF_ENABLE_SPKR; - p_dev->conf.Status = CCSR_AUDIO_ENA; - } - - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - /* This reserves IO space but doesn't actually enable it */ - if (pcmcia_request_io(p_dev) != 0) - return -ENODEV; - } + if (p_dev->config_index == 0) + return -EINVAL; - /* If we got this far, we're cool! */ - return 0; + return pcmcia_request_io(p_dev); } static void dio700_config(struct pcmcia_device *link) { - win_req_t req; int ret; printk(KERN_INFO "ni_daq_700: cs-config\n"); dev_dbg(&link->dev, "dio700_config\n"); + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_AUDIO | + CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, dio700_pcmcia_config_loop, NULL); if (ret) { dev_warn(&link->dev, "no configuration found\n"); @@ -600,25 +514,10 @@ static void dio700_config(struct pcmcia_device *link) if (!link->irq) goto failed; - /* - This actually configures the PCMCIA socket -- setting up - the I/O windows and the interrupt mapping, and putting the - card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret != 0) goto failed; - /* Finally, report what we've done */ - dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %d", link->irq); - if (link->resource[0]) - printk(", io %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - printk("\n"); - return; failed: @@ -634,18 +533,6 @@ static void dio700_release(struct pcmcia_device *link) pcmcia_disable_device(link); } /* dio700_release */ -/*====================================================================== - - The card status event handler. Mostly, this schedules other - stuff to run after an event is received. - - When a CARD_REMOVAL event is received, we immediately set a - private flag to block future accesses to this device. All the - functions that actually access the device should check this flag - to make sure the card is still present. - -======================================================================*/ - static int dio700_cs_suspend(struct pcmcia_device *link) { struct local_info_t *local = link->priv; @@ -685,9 +572,7 @@ struct pcmcia_driver dio700_cs_driver = { .resume = dio700_cs_resume, .id_table = dio700_cs_ids, .owner = THIS_MODULE, - .drv = { - .name = "ni_daq_700", - }, + .name = "ni_daq_700", }; static int __init init_dio700_cs(void) diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c index 773ae2044e0..c9c28584db6 100644 --- a/drivers/staging/comedi/drivers/ni_daq_dio24.c +++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c @@ -48,7 +48,6 @@ the PCMCIA interface. #include "8255.h" -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -187,47 +186,20 @@ static int dio24_detach(struct comedi_device *dev) return 0; }; -/* PCMCIA crap -- watch your words! */ - static void dio24_config(struct pcmcia_device *link); static void dio24_release(struct pcmcia_device *link); static int dio24_cs_suspend(struct pcmcia_device *p_dev); static int dio24_cs_resume(struct pcmcia_device *p_dev); -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static int dio24_cs_attach(struct pcmcia_device *); static void dio24_cs_detach(struct pcmcia_device *); -/* - You'll also need to prototype all the functions that will actually - be used to talk to your device. See 'memory_cs' for a good example - of a fully self-sufficient driver; the other drivers rely more or - less on other parts of the kernel. -*/ - struct local_info_t { struct pcmcia_device *link; int stop; struct bus_operations *bus; }; -/*====================================================================== - - dio24_cs_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - -======================================================================*/ - static int dio24_cs_attach(struct pcmcia_device *link) { struct local_info_t *local; @@ -243,16 +215,6 @@ static int dio24_cs_attach(struct pcmcia_device *link) local->link = link; link->priv = local; - /* - General socket configuration defaults can go here. In this - client, we assume very little, and rely on the CIS for almost - everything. In most clients, many details (i.e., number, sizes, - and attributes of IO windows) are fixed by the nature of the - device, and can be hard-wired here. - */ - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - pcmcia_cur_dev = link; dio24_config(link); @@ -260,15 +222,6 @@ static int dio24_cs_attach(struct pcmcia_device *link) return 0; } /* dio24_cs_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void dio24_cs_detach(struct pcmcia_device *link) { @@ -284,54 +237,13 @@ static void dio24_cs_detach(struct pcmcia_device *link) } /* dio24_cs_detach */ -/*====================================================================== - - dio24_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. - -======================================================================*/ - static int dio24_pcmcia_config_loop(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data) { - if (cfg->index == 0) - return -ENODEV; - - /* Does this card need audio output? */ - if (cfg->flags & CISTPL_CFTABLE_AUDIO) { - p_dev->conf.Attributes |= CONF_ENABLE_SPKR; - p_dev->conf.Status = CCSR_AUDIO_ENA; - } - - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - /* This reserves IO space but doesn't actually enable it */ - if (pcmcia_request_io(p_dev) != 0) - return -ENODEV; - } + if (p_dev->config_index == 0) + return -EINVAL; - /* If we got this far, we're cool! */ - return 0; + return pcmcia_request_io(p_dev); } static void dio24_config(struct pcmcia_device *link) @@ -342,6 +254,9 @@ static void dio24_config(struct pcmcia_device *link) dev_dbg(&link->dev, "dio24_config\n"); + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_AUDIO | + CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, dio24_pcmcia_config_loop, NULL); if (ret) { dev_warn(&link->dev, "no configuration found\n"); @@ -351,25 +266,10 @@ static void dio24_config(struct pcmcia_device *link) if (!link->irq) goto failed; - /* - This actually configures the PCMCIA socket -- setting up - the I/O windows and the interrupt mapping, and putting the - card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; - /* Finally, report what we've done */ - dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %d", link->irq); - if (link->resource[0]) - printk(" & %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - printk("\n"); - return; failed: @@ -385,18 +285,6 @@ static void dio24_release(struct pcmcia_device *link) pcmcia_disable_device(link); } /* dio24_release */ -/*====================================================================== - - The card status event handler. Mostly, this schedules other - stuff to run after an event is received. - - When a CARD_REMOVAL event is received, we immediately set a - private flag to block future accesses to this device. All the - functions that actually access the device should check this flag - to make sure the card is still present. - -======================================================================*/ - static int dio24_cs_suspend(struct pcmcia_device *link) { struct local_info_t *local = link->priv; @@ -435,9 +323,7 @@ struct pcmcia_driver dio24_cs_driver = { .resume = dio24_cs_resume, .id_table = dio24_cs_ids, .owner = THIS_MODULE, - .drv = { - .name = "ni_daq_dio24", - }, + .name = "ni_daq_dio24", }; static int __init init_dio24_cs(void) diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c index 68c4ecbd93a..6facbc8bf77 100644 --- a/drivers/staging/comedi/drivers/ni_labpc_cs.c +++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c @@ -71,7 +71,6 @@ NI manuals: #include "comedi_fc.h" #include "ni_labpc.h" -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -153,59 +152,20 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it) return labpc_common_attach(dev, iobase, irq, 0); } -/*====================================================================*/ - -/* - The event() function is this driver's Card Services event handler. - It will be called by Card Services when an appropriate card status - event is received. The config() and release() entry points are - used to configure or release a socket, in response to card - insertion and ejection events. They are invoked from the dummy - event handler. - - Kernel version 2.6.16 upwards uses suspend() and resume() functions - instead of an event() function. -*/ - static void labpc_config(struct pcmcia_device *link); static void labpc_release(struct pcmcia_device *link); static int labpc_cs_suspend(struct pcmcia_device *p_dev); static int labpc_cs_resume(struct pcmcia_device *p_dev); -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static int labpc_cs_attach(struct pcmcia_device *); static void labpc_cs_detach(struct pcmcia_device *); -/* - You'll also need to prototype all the functions that will actually - be used to talk to your device. See 'memory_cs' for a good example - of a fully self-sufficient driver; the other drivers rely more or - less on other parts of the kernel. -*/ - struct local_info_t { struct pcmcia_device *link; int stop; struct bus_operations *bus; }; -/*====================================================================== - - labpc_cs_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - -======================================================================*/ - static int labpc_cs_attach(struct pcmcia_device *link) { struct local_info_t *local; @@ -219,16 +179,6 @@ static int labpc_cs_attach(struct pcmcia_device *link) local->link = link; link->priv = local; - /* - General socket configuration defaults can go here. In this - client, we assume very little, and rely on the CIS for almost - everything. In most clients, many details (i.e., number, sizes, - and attributes of IO windows) are fixed by the nature of the - device, and can be hard-wired here. - */ - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - pcmcia_cur_dev = link; labpc_config(link); @@ -236,15 +186,6 @@ static int labpc_cs_attach(struct pcmcia_device *link) return 0; } /* labpc_cs_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void labpc_cs_detach(struct pcmcia_device *link) { dev_dbg(&link->dev, "labpc_cs_detach\n"); @@ -263,54 +204,13 @@ static void labpc_cs_detach(struct pcmcia_device *link) } /* labpc_cs_detach */ -/*====================================================================== - - labpc_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. - -======================================================================*/ - static int labpc_pcmcia_config_loop(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, void *priv_data) { - if (cfg->index == 0) - return -ENODEV; - - /* Does this card need audio output? */ - if (cfg->flags & CISTPL_CFTABLE_AUDIO) { - p_dev->conf.Attributes |= CONF_ENABLE_SPKR; - p_dev->conf.Status = CCSR_AUDIO_ENA; - } - - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - /* This reserves IO space but doesn't actually enable it */ - if (pcmcia_request_io(p_dev) != 0) - return -ENODEV; - } + if (p_dev->config_index == 0) + return -EINVAL; - /* If we got this far, we're cool! */ - return 0; + return pcmcia_request_io(p_dev); } @@ -320,6 +220,9 @@ static void labpc_config(struct pcmcia_device *link) dev_dbg(&link->dev, "labpc_config\n"); + link->config_flags |= CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ | + CONF_AUTO_AUDIO | CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, labpc_pcmcia_config_loop, NULL); if (ret) { dev_warn(&link->dev, "no configuration found\n"); @@ -329,25 +232,10 @@ static void labpc_config(struct pcmcia_device *link) if (!link->irq) goto failed; - /* - This actually configures the PCMCIA socket -- setting up - the I/O windows and the interrupt mapping, and putting the - card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; - /* Finally, report what we've done */ - dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %d", link->irq); - if (link->resource[0]) - printk(" & %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - printk("\n"); - return; failed: @@ -362,18 +250,6 @@ static void labpc_release(struct pcmcia_device *link) pcmcia_disable_device(link); } /* labpc_release */ -/*====================================================================== - - The card status event handler. Mostly, this schedules other - stuff to run after an event is received. - - When a CARD_REMOVAL event is received, we immediately set a - private flag to block future accesses to this device. All the - functions that actually access the device should check this flag - to make sure the card is still present. - -======================================================================*/ - static int labpc_cs_suspend(struct pcmcia_device *link) { struct local_info_t *local = link->priv; @@ -391,8 +267,6 @@ static int labpc_cs_resume(struct pcmcia_device *link) return 0; } /* labpc_cs_resume */ -/*====================================================================*/ - static struct pcmcia_device_id labpc_cs_ids[] = { /* N.B. These IDs should match those in labpc_cs_boards (ni_labpc.c) */ PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0103), /* daqcard-1200 */ @@ -411,9 +285,7 @@ struct pcmcia_driver labpc_cs_driver = { .resume = labpc_cs_resume, .id_table = labpc_cs_ids, .owner = THIS_MODULE, - .drv = { - .name = "daqcard-1200", - }, + .name = "daqcard-1200", }; static int __init init_labpc_cs(void) diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c index 1f2426352eb..49563273f60 100644 --- a/drivers/staging/comedi/drivers/ni_mio_cs.c +++ b/drivers/staging/comedi/drivers/ni_mio_cs.c @@ -48,7 +48,6 @@ See the notes in the ni_atmio.o driver. #include "ni_stc.h" #include "8255.h" -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -263,11 +262,6 @@ static struct pcmcia_device *cur_dev = NULL; static int cs_attach(struct pcmcia_device *link) { - link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; - link->resource[0]->end = 16; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - cur_dev = link; mio_cs_config(link); @@ -301,16 +295,12 @@ static int mio_cs_resume(struct pcmcia_device *link) } -static int mio_pcmcia_config_loop(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int mio_pcmcia_config_loop(struct pcmcia_device *p_dev, void *priv_data) { int base, ret; - p_dev->resource[0]->end = cfg->io.win[0].len; - p_dev->io_lines = cfg->io.flags & CISTPL_IO_LINES_MASK; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; for (base = 0x000; base < 0x400; base += 0x20) { p_dev->resource[0]->start = base; @@ -327,6 +317,7 @@ static void mio_cs_config(struct pcmcia_device *link) int ret; DPRINTK("mio_cs_config(link=%p)\n", link); + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; ret = pcmcia_loop_config(link, mio_pcmcia_config_loop, NULL); if (ret) { @@ -337,7 +328,7 @@ static void mio_cs_config(struct pcmcia_device *link) if (!link->irq) dev_info(&link->dev, "no IRQ available\n"); - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); } static int mio_cs_attach(struct comedi_device *dev, struct comedi_devconfig *it) @@ -446,9 +437,7 @@ struct pcmcia_driver ni_mio_cs_driver = { .resume = &mio_cs_resume, .id_table = ni_mio_cs_ids, .owner = THIS_MODULE, - .drv = { - .name = "ni_mio_cs", - }, + .name = "ni_mio_cs", }; int init_module(void) diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c index bf489d7f499..ebba9bb4777 100644 --- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c +++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c @@ -50,7 +50,6 @@ Devices: [Quatech] DAQP-208 (daqp), DAQP-308 #include "../comedidev.h" #include <linux/semaphore.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -969,43 +968,14 @@ static int daqp_detach(struct comedi_device *dev) ======================================================================*/ -/* - The event() function is this driver's Card Services event handler. - It will be called by Card Services when an appropriate card status - event is received. The config() and release() entry points are - used to configure or release a socket, in response to card - insertion and ejection events. - - Kernel version 2.6.16 upwards uses suspend() and resume() functions - instead of an event() function. -*/ - static void daqp_cs_config(struct pcmcia_device *link); static void daqp_cs_release(struct pcmcia_device *link); static int daqp_cs_suspend(struct pcmcia_device *p_dev); static int daqp_cs_resume(struct pcmcia_device *p_dev); -/* - The attach() and detach() entry points are used to create and destroy - "instances" of the driver, where each instance represents everything - needed to manage one actual PCMCIA card. -*/ - static int daqp_cs_attach(struct pcmcia_device *); static void daqp_cs_detach(struct pcmcia_device *); -/*====================================================================== - - daqp_cs_attach() creates an "instance" of the driver, allocating - local data structures for one device. The device is registered - with Card Services. - - The dev_link structure is initialized, but we don't actually - configure the card at this point -- we wait until we receive a - card insertion event. - -======================================================================*/ - static int daqp_cs_attach(struct pcmcia_device *link) { struct local_info_t *local; @@ -1031,30 +1001,11 @@ static int daqp_cs_attach(struct pcmcia_device *link) local->link = link; link->priv = local; - /* - General socket configuration defaults can go here. In this - client, we assume very little, and rely on the CIS for almost - everything. In most clients, many details (i.e., number, sizes, - and attributes of IO windows) are fixed by the nature of the - device, and can be hard-wired here. - */ - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - daqp_cs_config(link); return 0; } /* daqp_cs_attach */ -/*====================================================================== - - This deletes a driver "instance". The device is de-registered - with Card Services. If it has been released, all local data - structures are freed. Otherwise, the structures will be freed - when the device is released. - -======================================================================*/ - static void daqp_cs_detach(struct pcmcia_device *link) { struct local_info_t *dev = link->priv; @@ -1070,45 +1021,11 @@ static void daqp_cs_detach(struct pcmcia_device *link) } /* daqp_cs_detach */ -/*====================================================================== - - daqp_cs_config() is scheduled to run after a CARD_INSERTION event - is received, to configure the PCMCIA socket, and to make the - device available to the system. - -======================================================================*/ - - -static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int daqp_pcmcia_config_loop(struct pcmcia_device *p_dev, void *priv_data) { - if (cfg->index == 0) - return -ENODEV; + if (p_dev->config_index == 0) + return -EINVAL; - /* Do we need to allocate an interrupt? */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; - p_dev->resource[0]->flags |= - pcmcia_io_cfg_data_width(io->flags); - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - if (io->nwin > 1) { - p_dev->resource[1]->flags = p_dev->resource[0]->flags; - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - } - - /* This reserves IO space but doesn't actually enable it */ return pcmcia_request_io(p_dev); } @@ -1118,6 +1035,8 @@ static void daqp_cs_config(struct pcmcia_device *link) dev_dbg(&link->dev, "daqp_cs_config\n"); + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; + ret = pcmcia_loop_config(link, daqp_pcmcia_config_loop, NULL); if (ret) { dev_warn(&link->dev, "no configuration found\n"); @@ -1128,25 +1047,10 @@ static void daqp_cs_config(struct pcmcia_device *link) if (ret) goto failed; - /* - This actually configures the PCMCIA socket -- setting up - the I/O windows and the interrupt mapping, and putting the - card and host interface into "Memory and IO" mode. - */ - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; - /* Finally, report what we've done */ - dev_info(&link->dev, "index 0x%02x", link->conf.ConfigIndex); - if (link->conf.Attributes & CONF_ENABLE_IRQ) - printk(", irq %u", link->irq); - if (link->resource[0]) - printk(" & %pR", link->resource[0]); - if (link->resource[1]) - printk(" & %pR", link->resource[1]); - printk("\n"); - return; failed: @@ -1161,18 +1065,6 @@ static void daqp_cs_release(struct pcmcia_device *link) pcmcia_disable_device(link); } /* daqp_cs_release */ -/*====================================================================== - - The card status event handler. Mostly, this schedules other - stuff to run after an event is received. - - When a CARD_REMOVAL event is received, we immediately set a - private flag to block future accesses to this device. All the - functions that actually access the device should check this flag - to make sure the card is still present. - -======================================================================*/ - static int daqp_cs_suspend(struct pcmcia_device *link) { struct local_info_t *local = link->priv; @@ -1212,9 +1104,7 @@ static struct pcmcia_driver daqp_cs_driver = { .resume = daqp_cs_resume, .id_table = daqp_cs_id_table, .owner = THIS_MODULE, - .drv = { - .name = "quatech_daqp_cs", - }, + .name = "quatech_daqp_cs", }; int __init init_module(void) diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c index fbb80f09a3d..af258991fe7 100644 --- a/drivers/staging/crystalhd/crystalhd_lnx.c +++ b/drivers/staging/crystalhd/crystalhd_lnx.c @@ -351,6 +351,7 @@ static const struct file_operations chd_dec_fops = { .unlocked_ioctl = chd_dec_ioctl, .open = chd_dec_open, .release = chd_dec_close, + .llseek = noop_llseek, }; static int __devinit chd_dec_init_chdev(struct crystalhd_adp *adp) diff --git a/drivers/staging/cx25821/Kconfig b/drivers/staging/cx25821/Kconfig index df7756a95fa..813cb355ac0 100644 --- a/drivers/staging/cx25821/Kconfig +++ b/drivers/staging/cx25821/Kconfig @@ -1,6 +1,7 @@ config VIDEO_CX25821 tristate "Conexant cx25821 support" depends on DVB_CORE && VIDEO_DEV && PCI && I2C && INPUT + depends on BKL # please fix select I2C_ALGOBIT select VIDEO_BTCX select VIDEO_TVEEPROM diff --git a/drivers/staging/dream/camera/msm_camera.c b/drivers/staging/dream/camera/msm_camera.c index 81bd71fd816..de4ab61efd4 100644 --- a/drivers/staging/dream/camera/msm_camera.c +++ b/drivers/staging/dream/camera/msm_camera.c @@ -1941,6 +1941,7 @@ static const struct file_operations msm_fops_config = { .open = msm_open, .unlocked_ioctl = msm_ioctl_config, .release = msm_release_config, + .llseek = no_llseek, }; static const struct file_operations msm_fops_control = { @@ -1948,6 +1949,7 @@ static const struct file_operations msm_fops_control = { .open = msm_open_control, .unlocked_ioctl = msm_ioctl_control, .release = msm_release_control, + .llseek = no_llseek, }; static const struct file_operations msm_fops_frame = { @@ -1956,6 +1958,7 @@ static const struct file_operations msm_fops_frame = { .unlocked_ioctl = msm_ioctl_frame, .release = msm_release_frame, .poll = msm_poll_frame, + .llseek = no_llseek, }; static int msm_setup_cdev(struct msm_device *msm, diff --git a/drivers/staging/dream/pmem.c b/drivers/staging/dream/pmem.c index 7d6bbadd7fc..3640d1f2376 100644 --- a/drivers/staging/dream/pmem.c +++ b/drivers/staging/dream/pmem.c @@ -180,6 +180,7 @@ const struct file_operations pmem_fops = { .mmap = pmem_mmap, .open = pmem_open, .unlocked_ioctl = pmem_ioctl, + .llseek = noop_llseek, }; static int get_id(struct file *file) @@ -1204,6 +1205,7 @@ static ssize_t debug_read(struct file *file, char __user *buf, size_t count, static struct file_operations debug_fops = { .read = debug_read, .open = debug_open, + .llseek = default_llseek, }; #endif diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c index 8197765aae1..28a6f8da947 100644 --- a/drivers/staging/dream/qdsp5/adsp_driver.c +++ b/drivers/staging/dream/qdsp5/adsp_driver.c @@ -582,6 +582,7 @@ static struct file_operations adsp_fops = { .open = adsp_open, .unlocked_ioctl = adsp_ioctl, .release = adsp_release, + .llseek = no_llseek, }; static void adsp_create(struct adsp_device *adev, const char *name, diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c index a373f352238..45f4c78ab6e 100644 --- a/drivers/staging/dream/qdsp5/audio_aac.c +++ b/drivers/staging/dream/qdsp5/audio_aac.c @@ -1030,6 +1030,7 @@ static struct file_operations audio_aac_fops = { .read = audio_read, .write = audio_write, .unlocked_ioctl = audio_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_aac_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c index 07b79d5836e..402bbc13281 100644 --- a/drivers/staging/dream/qdsp5/audio_amrnb.c +++ b/drivers/staging/dream/qdsp5/audio_amrnb.c @@ -841,6 +841,7 @@ static struct file_operations audio_amrnb_fops = { .read = audamrnb_read, .write = audamrnb_write, .unlocked_ioctl = audamrnb_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_amrnb_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c index ad989ee8769..24a89264737 100644 --- a/drivers/staging/dream/qdsp5/audio_evrc.c +++ b/drivers/staging/dream/qdsp5/audio_evrc.c @@ -813,6 +813,7 @@ static struct file_operations audio_evrc_fops = { .read = audevrc_read, .write = audevrc_write, .unlocked_ioctl = audevrc_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_evrc_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c index 6ae48e72d14..b51fa096074 100644 --- a/drivers/staging/dream/qdsp5/audio_in.c +++ b/drivers/staging/dream/qdsp5/audio_in.c @@ -921,12 +921,14 @@ static struct file_operations audio_fops = { .read = audio_in_read, .write = audio_in_write, .unlocked_ioctl = audio_in_ioctl, + .llseek = noop_llseek, }; static struct file_operations audpre_fops = { .owner = THIS_MODULE, .open = audpre_open, .unlocked_ioctl = audpre_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_in_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c index 530e1f35eed..409a19ce603 100644 --- a/drivers/staging/dream/qdsp5/audio_mp3.c +++ b/drivers/staging/dream/qdsp5/audio_mp3.c @@ -948,6 +948,7 @@ static struct file_operations audio_mp3_fops = { .read = audio_read, .write = audio_write, .unlocked_ioctl = audio_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_mp3_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c index 76d7fa5667d..d20e8954156 100644 --- a/drivers/staging/dream/qdsp5/audio_out.c +++ b/drivers/staging/dream/qdsp5/audio_out.c @@ -807,12 +807,14 @@ static struct file_operations audio_fops = { .read = audio_read, .write = audio_write, .unlocked_ioctl = audio_ioctl, + .llseek = noop_llseek, }; static struct file_operations audpp_fops = { .owner = THIS_MODULE, .open = audpp_open, .unlocked_ioctl = audpp_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_misc = { diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c index effa96f34fd..911bab416b8 100644 --- a/drivers/staging/dream/qdsp5/audio_qcelp.c +++ b/drivers/staging/dream/qdsp5/audio_qcelp.c @@ -824,6 +824,7 @@ static struct file_operations audio_qcelp_fops = { .read = audqcelp_read, .write = audqcelp_write, .unlocked_ioctl = audqcelp_ioctl, + .llseek = noop_llseek, }; struct miscdevice audio_qcelp_misc = { diff --git a/drivers/staging/dream/qdsp5/evlog.h b/drivers/staging/dream/qdsp5/evlog.h index 922ce670a32..e5ab86b9dd7 100644 --- a/drivers/staging/dream/qdsp5/evlog.h +++ b/drivers/staging/dream/qdsp5/evlog.h @@ -123,6 +123,7 @@ static int ev_log_open(struct inode *inode, struct file *file) static const struct file_operations ev_log_ops = { .read = ev_log_read, .open = ev_log_open, + .llseek = default_llseek, }; static int ev_log_init(struct ev_log *log) diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c index 037d7ffb7e6..e0f2f7bca29 100644 --- a/drivers/staging/dream/qdsp5/snd.c +++ b/drivers/staging/dream/qdsp5/snd.c @@ -247,6 +247,7 @@ static struct file_operations snd_fops = { .open = snd_open, .release = snd_release, .unlocked_ioctl = snd_ioctl, + .llseek = noop_llseek, }; struct miscdevice snd_misc = { diff --git a/drivers/staging/easycap/Kconfig b/drivers/staging/easycap/Kconfig index bd96f39f273..9d5fe4ddc30 100644 --- a/drivers/staging/easycap/Kconfig +++ b/drivers/staging/easycap/Kconfig @@ -1,6 +1,7 @@ config EASYCAP tristate "EasyCAP USB ID 05e1:0408 support" depends on USB && VIDEO_DEV + depends on BKL # please fix ---help--- This is an integrated audio/video driver for EasyCAP cards with diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c index 4e52105e607..689099b57fd 100644 --- a/drivers/staging/frontier/alphatrack.c +++ b/drivers/staging/frontier/alphatrack.c @@ -641,6 +641,7 @@ static const struct file_operations usb_alphatrack_fops = { .open = usb_alphatrack_open, .release = usb_alphatrack_release, .poll = usb_alphatrack_poll, + .llseek = no_llseek, }; /* diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c index eed74f0fe0b..3d12c1737ed 100644 --- a/drivers/staging/frontier/tranzport.c +++ b/drivers/staging/frontier/tranzport.c @@ -767,6 +767,7 @@ static const struct file_operations usb_tranzport_fops = { .open = usb_tranzport_open, .release = usb_tranzport_release, .poll = usb_tranzport_poll, + .llseek = no_llseek, }; /* diff --git a/drivers/staging/go7007/Kconfig b/drivers/staging/go7007/Kconfig index e47f683a323..75fa4680552 100644 --- a/drivers/staging/go7007/Kconfig +++ b/drivers/staging/go7007/Kconfig @@ -1,6 +1,7 @@ config VIDEO_GO7007 tristate "WIS GO7007 MPEG encoder support" depends on VIDEO_DEV && PCI && I2C && INPUT + depends on BKL # please fix depends on SND select VIDEOBUF_DMA_SG select VIDEO_IR diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c index ff1d24720f1..8284297b30e 100644 --- a/drivers/staging/hv/blkvsc_drv.c +++ b/drivers/staging/hv/blkvsc_drv.c @@ -25,7 +25,7 @@ #include <linux/major.h> #include <linux/delay.h> #include <linux/hdreg.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/slab.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> @@ -124,6 +124,7 @@ struct blkvsc_driver_context { }; /* Static decl */ +static DEFINE_MUTEX(blkvsc_mutex); static int blkvsc_probe(struct device *dev); static int blkvsc_remove(struct device *device); static void blkvsc_shutdown(struct device *device); @@ -1309,7 +1310,7 @@ static int blkvsc_open(struct block_device *bdev, fmode_t mode) DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name); - lock_kernel(); + mutex_lock(&blkvsc_mutex); spin_lock(&blkdev->lock); if (!blkdev->users && blkdev->device_type == DVD_TYPE) { @@ -1321,7 +1322,7 @@ static int blkvsc_open(struct block_device *bdev, fmode_t mode) blkdev->users++; spin_unlock(&blkdev->lock); - unlock_kernel(); + mutex_unlock(&blkvsc_mutex); return 0; } @@ -1332,7 +1333,7 @@ static int blkvsc_release(struct gendisk *disk, fmode_t mode) DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name); - lock_kernel(); + mutex_lock(&blkvsc_mutex); spin_lock(&blkdev->lock); if (blkdev->users == 1) { spin_unlock(&blkdev->lock); @@ -1343,7 +1344,7 @@ static int blkvsc_release(struct gendisk *disk, fmode_t mode) blkdev->users--; spin_unlock(&blkdev->lock); - unlock_kernel(); + mutex_unlock(&blkvsc_mutex); return 0; } diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c index dd4d87a8bca..92a212f064b 100644 --- a/drivers/staging/iio/industrialio-core.c +++ b/drivers/staging/iio/industrialio-core.c @@ -349,6 +349,7 @@ static const struct file_operations iio_event_chrdev_fileops = { .release = iio_event_chrdev_release, .open = iio_event_chrdev_open, .owner = THIS_MODULE, + .llseek = noop_llseek, }; static void iio_event_dev_release(struct device *dev) diff --git a/drivers/staging/iio/industrialio-ring.c b/drivers/staging/iio/industrialio-ring.c index 6ab578e4f5f..1c5f67253b8 100644 --- a/drivers/staging/iio/industrialio-ring.c +++ b/drivers/staging/iio/industrialio-ring.c @@ -133,6 +133,7 @@ static const struct file_operations iio_ring_fileops = { .release = iio_ring_release, .open = iio_ring_open, .owner = THIS_MODULE, + .llseek = noop_llseek, }; /** diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c index 66493253042..ed5c5fe022c 100644 --- a/drivers/staging/lirc/lirc_imon.c +++ b/drivers/staging/lirc/lirc_imon.c @@ -115,7 +115,8 @@ static const struct file_operations display_fops = { .owner = THIS_MODULE, .open = &display_open, .write = &vfd_write, - .release = &display_close + .release = &display_close, + .llseek = noop_llseek, }; /* diff --git a/drivers/staging/lirc/lirc_it87.c b/drivers/staging/lirc/lirc_it87.c index ec11c0e949a..543c5c3bf90 100644 --- a/drivers/staging/lirc/lirc_it87.c +++ b/drivers/staging/lirc/lirc_it87.c @@ -342,6 +342,7 @@ static const struct file_operations lirc_fops = { .unlocked_ioctl = lirc_ioctl, .open = lirc_open, .release = lirc_close, + .llseek = noop_llseek, }; static int set_use_inc(void *data) diff --git a/drivers/staging/lirc/lirc_sasem.c b/drivers/staging/lirc/lirc_sasem.c index 73166c3f581..8f72a84f34e 100644 --- a/drivers/staging/lirc/lirc_sasem.c +++ b/drivers/staging/lirc/lirc_sasem.c @@ -125,6 +125,7 @@ static const struct file_operations vfd_fops = { .write = &vfd_write, .unlocked_ioctl = &vfd_ioctl, .release = &vfd_close, + .llseek = noop_llseek, }; /* USB Device ID for Sasem USB Control Board */ diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c index 9456f8e3f9e..8da38249261 100644 --- a/drivers/staging/lirc/lirc_serial.c +++ b/drivers/staging/lirc/lirc_serial.c @@ -1058,6 +1058,7 @@ static const struct file_operations lirc_fops = { .poll = lirc_dev_fop_poll, .open = lirc_dev_fop_open, .release = lirc_dev_fop_close, + .llseek = no_llseek, }; static struct lirc_driver driver = { diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c index eb08fa7138b..2478871bd95 100644 --- a/drivers/staging/lirc/lirc_sir.c +++ b/drivers/staging/lirc/lirc_sir.c @@ -459,6 +459,7 @@ static const struct file_operations lirc_fops = { .unlocked_ioctl = lirc_ioctl, .open = lirc_dev_fop_open, .release = lirc_dev_fop_close, + .llseek = no_llseek, }; static int set_use_inc(void *data) diff --git a/drivers/staging/memrar/memrar_handler.c b/drivers/staging/memrar/memrar_handler.c index a98b3f1f11e..cfcaa8e5b8e 100644 --- a/drivers/staging/memrar/memrar_handler.c +++ b/drivers/staging/memrar/memrar_handler.c @@ -890,6 +890,7 @@ static const struct file_operations memrar_fops = { .mmap = memrar_mmap, .open = memrar_open, .release = memrar_release, + .llseek = no_llseek, }; static struct miscdevice memrar_miscdev = { diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c index 3221814a856..6885f9a4660 100644 --- a/drivers/staging/panel/panel.c +++ b/drivers/staging/panel/panel.c @@ -1631,6 +1631,7 @@ static const struct file_operations keypad_fops = { .read = keypad_read, /* read */ .open = keypad_open, /* open */ .release = keypad_release, /* close */ + .llseek = default_llseek, }; static struct miscdevice keypad_dev = { diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c index fa21a0fd8e8..c7932da03c5 100644 --- a/drivers/staging/spectra/ffsport.c +++ b/drivers/staging/spectra/ffsport.c @@ -27,7 +27,6 @@ #include <linux/kthread.h> #include <linux/log2.h> #include <linux/init.h> -#include <linux/smp_lock.h> #include <linux/slab.h> /**** Helper functions used for Div, Remainder operation on u64 ****/ @@ -590,14 +589,16 @@ int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode, return -ENOTTY; } +static DEFINE_MUTEX(ffsport_mutex); + int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { int ret; - lock_kernel(); + mutex_lock(&ffsport_mutex); ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg); - unlock_kernel(); + mutex_unlock(&ffsport_mutex); return ret; } diff --git a/drivers/staging/ti-st/st.h b/drivers/staging/ti-st/st.h index 9952579425b..1b3060eb292 100644 --- a/drivers/staging/ti-st/st.h +++ b/drivers/staging/ti-st/st.h @@ -80,5 +80,4 @@ struct st_proto_s { extern long st_register(struct st_proto_s *); extern long st_unregister(enum proto_type); -extern struct platform_device *st_get_plat_device(void); #endif /* ST_H */ diff --git a/drivers/staging/ti-st/st_core.c b/drivers/staging/ti-st/st_core.c index 063c9b1db1a..b85d8bfdf60 100644 --- a/drivers/staging/ti-st/st_core.c +++ b/drivers/staging/ti-st/st_core.c @@ -38,7 +38,6 @@ #include "st_ll.h" #include "st.h" -#define VERBOSE /* strings to be used for rfkill entries and by * ST Core to be used for sysfs debug entry */ @@ -581,7 +580,7 @@ long st_register(struct st_proto_s *new_proto) long err = 0; unsigned long flags = 0; - st_kim_ref(&st_gdata); + st_kim_ref(&st_gdata, 0); pr_info("%s(%d) ", __func__, new_proto->type); if (st_gdata == NULL || new_proto == NULL || new_proto->recv == NULL || new_proto->reg_complete_cb == NULL) { @@ -713,7 +712,7 @@ long st_unregister(enum proto_type type) pr_debug("%s: %d ", __func__, type); - st_kim_ref(&st_gdata); + st_kim_ref(&st_gdata, 0); if (type < ST_BT || type >= ST_MAX) { pr_err(" protocol %d not supported", type); return -EPROTONOSUPPORT; @@ -767,7 +766,7 @@ long st_write(struct sk_buff *skb) #endif long len; - st_kim_ref(&st_gdata); + st_kim_ref(&st_gdata, 0); if (unlikely(skb == NULL || st_gdata == NULL || st_gdata->tty == NULL)) { pr_err("data/tty unavailable to perform write"); @@ -818,7 +817,7 @@ static int st_tty_open(struct tty_struct *tty) struct st_data_s *st_gdata; pr_info("%s ", __func__); - st_kim_ref(&st_gdata); + st_kim_ref(&st_gdata, 0); st_gdata->tty = tty; tty->disc_data = st_gdata; diff --git a/drivers/staging/ti-st/st_core.h b/drivers/staging/ti-st/st_core.h index e0c32d149f5..8601320a679 100644 --- a/drivers/staging/ti-st/st_core.h +++ b/drivers/staging/ti-st/st_core.h @@ -117,7 +117,7 @@ int st_core_init(struct st_data_s **); void st_core_exit(struct st_data_s *); /* ask for reference from KIM */ -void st_kim_ref(struct st_data_s **); +void st_kim_ref(struct st_data_s **, int); #define GPS_STUB_TEST #ifdef GPS_STUB_TEST diff --git a/drivers/staging/ti-st/st_kim.c b/drivers/staging/ti-st/st_kim.c index b4a6c7fdc4e..9e99463f76e 100644 --- a/drivers/staging/ti-st/st_kim.c +++ b/drivers/staging/ti-st/st_kim.c @@ -72,11 +72,26 @@ const unsigned char *protocol_names[] = { PROTO_ENTRY(ST_GPS, "GPS"), }; +#define MAX_ST_DEVICES 3 /* Imagine 1 on each UART for now */ +struct platform_device *st_kim_devices[MAX_ST_DEVICES]; /**********************************************************************/ /* internal functions */ /** + * st_get_plat_device - + * function which returns the reference to the platform device + * requested by id. As of now only 1 such device exists (id=0) + * the context requesting for reference can get the id to be + * requested by a. The protocol driver which is registering or + * b. the tty device which is opened. + */ +static struct platform_device *st_get_plat_device(int id) +{ + return st_kim_devices[id]; +} + +/** * validate_firmware_response - * function to return whether the firmware response was proper * in case of error don't complete so that waiting for proper @@ -353,7 +368,7 @@ void st_kim_chip_toggle(enum proto_type type, enum kim_gpio_state state) struct kim_data_s *kim_gdata; pr_info(" %s ", __func__); - kim_pdev = st_get_plat_device(); + kim_pdev = st_get_plat_device(0); kim_gdata = dev_get_drvdata(&kim_pdev->dev); if (kim_gdata->gpios[type] == -1) { @@ -574,12 +589,12 @@ static int kim_toggle_radio(void *data, bool blocked) * This would enable multiple such platform devices to exist * on a given platform */ -void st_kim_ref(struct st_data_s **core_data) +void st_kim_ref(struct st_data_s **core_data, int id) { struct platform_device *pdev; struct kim_data_s *kim_gdata; /* get kim_gdata reference from platform device */ - pdev = st_get_plat_device(); + pdev = st_get_plat_device(id); kim_gdata = dev_get_drvdata(&pdev->dev); *core_data = kim_gdata->core_data; } @@ -623,6 +638,7 @@ static int kim_probe(struct platform_device *pdev) long *gpios = pdev->dev.platform_data; struct kim_data_s *kim_gdata; + st_kim_devices[pdev->id] = pdev; kim_gdata = kzalloc(sizeof(struct kim_data_s), GFP_ATOMIC); if (!kim_gdata) { pr_err("no mem to allocate"); diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c index 7ee89492a75..7b3a7d04a10 100644 --- a/drivers/staging/tidspbridge/rmgr/drv_interface.c +++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c @@ -144,6 +144,7 @@ static const struct file_operations bridge_fops = { .release = bridge_release, .unlocked_ioctl = bridge_ioctl, .mmap = bridge_mmap, + .llseek = noop_llseek, }; #ifdef CONFIG_PM diff --git a/drivers/staging/tm6000/Kconfig b/drivers/staging/tm6000/Kconfig index c725356cc34..de7ebb99d8f 100644 --- a/drivers/staging/tm6000/Kconfig +++ b/drivers/staging/tm6000/Kconfig @@ -1,6 +1,6 @@ config VIDEO_TM6000 tristate "TV Master TM5600/6000/6010 driver" - depends on VIDEO_DEV && I2C && INPUT && USB && EXPERIMENTAL + depends on VIDEO_DEV && I2C && INPUT && IR_CORE && USB && EXPERIMENTAL select VIDEO_TUNER select MEDIA_TUNER_XC2028 select MEDIA_TUNER_XC5000 diff --git a/drivers/staging/tm6000/tm6000-input.c b/drivers/staging/tm6000/tm6000-input.c index 32f7a0af693..54f7667cc70 100644 --- a/drivers/staging/tm6000/tm6000-input.c +++ b/drivers/staging/tm6000/tm6000-input.c @@ -46,7 +46,7 @@ MODULE_PARM_DESC(enable_ir, "enable ir (default is enable"); } struct tm6000_ir_poll_result { - u8 rc_data[4]; + u16 rc_data; }; struct tm6000_IR { @@ -60,9 +60,9 @@ struct tm6000_IR { int polling; struct delayed_work work; u8 wait:1; + u8 key:1; struct urb *int_urb; u8 *urb_data; - u8 key:1; int (*get_key) (struct tm6000_IR *, struct tm6000_ir_poll_result *); @@ -122,13 +122,14 @@ static void tm6000_ir_urb_received(struct urb *urb) if (urb->status != 0) printk(KERN_INFO "not ready\n"); - else if (urb->actual_length > 0) + else if (urb->actual_length > 0) { memcpy(ir->urb_data, urb->transfer_buffer, urb->actual_length); - dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0], - ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]); + dprintk("data %02x %02x %02x %02x\n", ir->urb_data[0], + ir->urb_data[1], ir->urb_data[2], ir->urb_data[3]); - ir->key = 1; + ir->key = 1; + } rc = usb_submit_urb(urb, GFP_ATOMIC); } @@ -140,30 +141,47 @@ static int default_polling_getkey(struct tm6000_IR *ir, int rc; u8 buf[2]; - if (ir->wait && !&dev->int_in) { - poll_result->rc_data[0] = 0xff; + if (ir->wait && !&dev->int_in) return 0; - } if (&dev->int_in) { - poll_result->rc_data[0] = ir->urb_data[0]; - poll_result->rc_data[1] = ir->urb_data[1]; + if (ir->ir.ir_type == IR_TYPE_RC5) + poll_result->rc_data = ir->urb_data[0]; + else + poll_result->rc_data = ir->urb_data[0] | ir->urb_data[1] << 8; } else { tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 0); msleep(10); tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 2, 1); msleep(10); - rc = tm6000_read_write_usb(dev, USB_DIR_IN | USB_TYPE_VENDOR | - USB_RECIP_DEVICE, REQ_02_GET_IR_CODE, 0, 0, buf, 1); + if (ir->ir.ir_type == IR_TYPE_RC5) { + rc = tm6000_read_write_usb(dev, USB_DIR_IN | + USB_TYPE_VENDOR | USB_RECIP_DEVICE, + REQ_02_GET_IR_CODE, 0, 0, buf, 1); - msleep(10); + msleep(10); - dprintk("read data=%02x\n", buf[0]); - if (rc < 0) - return rc; + dprintk("read data=%02x\n", buf[0]); + if (rc < 0) + return rc; - poll_result->rc_data[0] = buf[0]; + poll_result->rc_data = buf[0]; + } else { + rc = tm6000_read_write_usb(dev, USB_DIR_IN | + USB_TYPE_VENDOR | USB_RECIP_DEVICE, + REQ_02_GET_IR_CODE, 0, 0, buf, 2); + + msleep(10); + + dprintk("read data=%04x\n", buf[0] | buf[1] << 8); + if (rc < 0) + return rc; + + poll_result->rc_data = buf[0] | buf[1] << 8; + } + if ((poll_result->rc_data & 0x00ff) != 0xff) + ir->key = 1; } return 0; } @@ -180,12 +198,11 @@ static void tm6000_ir_handle_key(struct tm6000_IR *ir) return; } - dprintk("ir->get_key result data=%02x %02x\n", - poll_result.rc_data[0], poll_result.rc_data[1]); + dprintk("ir->get_key result data=%04x\n", poll_result.rc_data); - if (poll_result.rc_data[0] != 0xff && ir->key == 1) { + if (ir->key) { ir_input_keydown(ir->input->input_dev, &ir->ir, - poll_result.rc_data[0] | poll_result.rc_data[1] << 8); + (u32)poll_result.rc_data); ir_input_nokey(ir->input->input_dev, &ir->ir); ir->key = 0; diff --git a/drivers/staging/usbip/Kconfig b/drivers/staging/usbip/Kconfig index 2c1d10acb8b..b11ec379b5c 100644 --- a/drivers/staging/usbip/Kconfig +++ b/drivers/staging/usbip/Kconfig @@ -1,6 +1,6 @@ config USB_IP_COMMON tristate "USB IP support (EXPERIMENTAL)" - depends on USB && NET && EXPERIMENTAL + depends on USB && NET && EXPERIMENTAL && BKL default N ---help--- This enables pushing USB packets over IP to allow remote diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c index 0142338bcaf..4bdb8362de8 100644 --- a/drivers/staging/vt6655/wpactl.c +++ b/drivers/staging/vt6655/wpactl.c @@ -766,9 +766,14 @@ static int wpa_set_associate(PSDevice pDevice, DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len); - if (param->u.wpa_associate.wpa_ie && - copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) - return -EINVAL; + if (param->u.wpa_associate.wpa_ie_len) { + if (!param->u.wpa_associate.wpa_ie) + return -EINVAL; + if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE)) + return -EINVAL; + if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie, param->u.wpa_associate.wpa_ie_len)) + return -EFAULT; + } if (param->u.wpa_associate.mode == 1) pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA; diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c index 19c33545865..6555891e149 100644 --- a/drivers/staging/wlags49_h2/wl_cs.c +++ b/drivers/staging/wlags49_h2/wl_cs.c @@ -83,7 +83,6 @@ #include <linux/if_arp.h> #include <linux/ioport.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> @@ -147,10 +146,9 @@ static int wl_adapter_attach(struct pcmcia_device *link) link->resource[0]->end = HCF_NUM_IO_PORTS; link->resource[0]->flags= IO_DATA_PATH_WIDTH_16; - link->conf.Attributes = CONF_ENABLE_IRQ; - link->conf.IntType = INT_MEMORY_AND_IO; - link->conf.ConfigIndex = 5; - link->conf.Present = PRESENT_OPTION; + link->config_flags |= CONF_ENABLE_IRQ; + link->config_index = 5; + link->config_regs = PRESENT_OPTION; link->priv = dev; lp = wl_priv(dev); @@ -165,27 +163,6 @@ static int wl_adapter_attach(struct pcmcia_device *link) -/******************************************************************************* - * wl_adapter_detach() - ******************************************************************************* - * - * DESCRIPTION: - * - * This deletes a driver "instance". The device is de-registered with Card - * Services. If it has been released, then the net device is unregistered, and - * all local data structures are freed. Otherwise, the structures will be - * freed when the device is released. - * - * PARAMETERS: - * - * link - pointer to the dev_link_t structure representing the device to - * detach - * - * RETURNS: - * - * N/A - * - ******************************************************************************/ static void wl_adapter_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; @@ -209,26 +186,6 @@ static void wl_adapter_detach(struct pcmcia_device *link) /*============================================================================*/ -/******************************************************************************* - * wl_adapter_release() - ******************************************************************************* - * - * DESCRIPTION: - * - * After a card is removed, this routine will release the PCMCIA - * configuration. If the device is still open, this will be postponed until it - * is closed. - * - * PARAMETERS: - * - * arg - a u_long representing a pointer to a dev_link_t structure for the - * device to be released. - * - * RETURNS: - * - * N/A - * - ******************************************************************************/ void wl_adapter_release(struct pcmcia_device *link) { DBG_FUNC("wl_adapter_release"); @@ -268,26 +225,6 @@ static int wl_adapter_resume(struct pcmcia_device *link) return 0; } /* wl_adapter_resume */ -/******************************************************************************* - * wl_adapter_insert() - ******************************************************************************* - * - * DESCRIPTION: - * - * wl_adapter_insert() is scheduled to run after a CARD_INSERTION event is - * received, to configure the PCMCIA socket, and to make the ethernet device - * available to the system. - * - * PARAMETERS: - * - * link - pointer to the dev_link_t structure representing the device to - * insert - * - * RETURNS: - * - * N/A - * - ******************************************************************************/ void wl_adapter_insert(struct pcmcia_device *link) { struct net_device *dev; @@ -302,7 +239,7 @@ void wl_adapter_insert(struct pcmcia_device *link) dev = link->priv; /* Do we need to allocate an interrupt? */ - link->conf.Attributes |= CONF_ENABLE_IRQ; + link->config_flags |= CONF_ENABLE_IRQ; link->io_lines = 6; ret = pcmcia_request_io(link); @@ -313,7 +250,7 @@ void wl_adapter_insert(struct pcmcia_device *link) if (ret != 0) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret != 0) goto failed; @@ -457,9 +394,7 @@ MODULE_DEVICE_TABLE(pcmcia, wl_adapter_ids); static struct pcmcia_driver wlags49_driver = { .owner = THIS_MODULE, - .drv = { - .name = DRIVER_NAME, - }, + .name = DRIVER_NAME, .probe = wl_adapter_attach, .remove = wl_adapter_detach, .id_table = wl_adapter_ids, diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h index 02f0a20e178..cd129b3ee6c 100644 --- a/drivers/staging/wlags49_h2/wl_internal.h +++ b/drivers/staging/wlags49_h2/wl_internal.h @@ -69,7 +69,6 @@ ******************************************************************************/ #include <linux/version.h> #ifdef BUS_PCMCIA -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ciscode.h> diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c index 88d0d472142..8e3536acbf4 100644 --- a/drivers/staging/wlags49_h2/wl_main.c +++ b/drivers/staging/wlags49_h2/wl_main.c @@ -414,25 +414,6 @@ extern memimage fw_image; // firmware image to be downloaded #endif /* HCF_STA */ -/******************************************************************************* - * wl_insert() - ******************************************************************************* - * - * DESCRIPTION: - * - * wl_insert() is scheduled to run after a CARD_INSERTION event is - * received, to configure the PCMCIA socket, and to make the ethernet device - * available to the system. - * - * PARAMETERS: - * - * dev - a pointer to the net_device struct of the wireless device - * - * RETURNS: - * - * TRUE or FALSE - * - ******************************************************************************/ int wl_insert( struct net_device *dev ) { int result = 0; diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c index b53deee25d7..0d236f4bb8c 100644 --- a/drivers/telephony/ixj.c +++ b/drivers/telephony/ixj.c @@ -257,7 +257,7 @@ #include <linux/fs.h> /* everything... */ #include <linux/errno.h> /* error codes */ #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/interrupt.h> @@ -277,6 +277,7 @@ #define TYPE(inode) (iminor(inode) >> 4) #define NUM(inode) (iminor(inode) & 0xf) +static DEFINE_MUTEX(ixj_mutex); static int ixjdebug; static int hertz = HZ; static int samplerate = 100; @@ -6655,9 +6656,9 @@ static long do_ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long ar static long ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long arg) { long ret; - lock_kernel(); + mutex_lock(&ixj_mutex); ret = do_ixj_ioctl(file_p, cmd, arg); - unlock_kernel(); + mutex_unlock(&ixj_mutex); return ret; } @@ -6676,7 +6677,8 @@ static const struct file_operations ixj_fops = .poll = ixj_poll, .unlocked_ioctl = ixj_ioctl, .release = ixj_release, - .fasync = ixj_fasync + .fasync = ixj_fasync, + .llseek = default_llseek, }; static int ixj_linetest(IXJ *j) diff --git a/drivers/telephony/ixj_pcmcia.c b/drivers/telephony/ixj_pcmcia.c index a1900e50251..d005b9eeebb 100644 --- a/drivers/telephony/ixj_pcmcia.c +++ b/drivers/telephony/ixj_pcmcia.c @@ -8,7 +8,6 @@ #include <linux/errno.h> /* error codes */ #include <linux/slab.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> @@ -32,9 +31,6 @@ static int ixj_probe(struct pcmcia_device *p_dev) { dev_dbg(&p_dev->dev, "ixj_attach()\n"); /* Create new ixj device */ - p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; - p_dev->conf.IntType = INT_MEMORY_AND_IO; p_dev->priv = kzalloc(sizeof(struct ixj_info_t), GFP_KERNEL); if (!p_dev->priv) { return -ENOMEM; @@ -111,40 +107,31 @@ failed: return; } -static int ixj_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int ixj_config_check(struct pcmcia_device *p_dev, void *priv_data) { - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - p_dev->io_lines = 3; - if (io->nwin == 2) { - p_dev->resource[1]->start = io->win[1].base; - p_dev->resource[1]->end = io->win[1].len; - } - if (!pcmcia_request_io(p_dev)) - return 0; - } - return -ENODEV; + p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->resource[1]->flags &= ~IO_DATA_PATH_WIDTH; + p_dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_8; + p_dev->io_lines = 3; + + return pcmcia_request_io(p_dev); } static int ixj_config(struct pcmcia_device * link) { IXJ *j; ixj_info_t *info; - cistpl_cftable_entry_t dflt = { 0 }; info = link->priv; dev_dbg(&link->dev, "ixj_config\n"); - if (pcmcia_loop_config(link, ixj_config_check, &dflt)) + link->config_flags = CONF_AUTO_SET_IO; + + if (pcmcia_loop_config(link, ixj_config_check, NULL)) goto failed; - if (pcmcia_request_configuration(link, &link->conf)) + if (pcmcia_enable_device(link)) goto failed; /* @@ -178,9 +165,7 @@ MODULE_DEVICE_TABLE(pcmcia, ixj_ids); static struct pcmcia_driver ixj_driver = { .owner = THIS_MODULE, - .drv = { - .name = "ixj_cs", - }, + .name = "ixj_cs", .probe = ixj_probe, .remove = ixj_detach, .id_table = ixj_ids, diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c index f3873f650bb..1915af20117 100644 --- a/drivers/telephony/phonedev.c +++ b/drivers/telephony/phonedev.c @@ -130,6 +130,7 @@ static const struct file_operations phone_fops = { .owner = THIS_MODULE, .open = phone_open, + .llseek = noop_llseek, }; /* diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index bff1afbde5a..4d3a6fd1a15 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -740,6 +740,7 @@ static const struct file_operations uio_fops = { .mmap = uio_mmap, .poll = uio_poll, .fasync = uio_fasync, + .llseek = noop_llseek, }; static int uio_major_init(void) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 094c76b5de1..6ee4451bfe2 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -584,7 +584,8 @@ static const struct file_operations wdm_fops = { .open = wdm_open, .flush = wdm_flush, .release = wdm_release, - .poll = wdm_poll + .poll = wdm_poll, + .llseek = noop_llseek, }; static struct usb_class_driver wdm_class = { diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index e325162859b..9eca4053312 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -1043,6 +1043,7 @@ static const struct file_operations usblp_fops = { .compat_ioctl = usblp_ioctl, .open = usblp_open, .release = usblp_release, + .llseek = noop_llseek, }; static char *usblp_devnode(struct device *dev, mode_t *mode) diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 3e7c1b800eb..6a54634ab82 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -987,6 +987,7 @@ static const struct file_operations fops = { .open = usbtmc_open, .release = usbtmc_release, .unlocked_ioctl = usbtmc_ioctl, + .llseek = default_llseek, }; static struct usb_class_driver usbtmc_class = { diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 7e594449600..9eed5b52d9d 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -91,12 +91,12 @@ config USB_DYNAMIC_MINORS If you are unsure about this, say N here. config USB_SUSPEND - bool "USB runtime power management (suspend/resume and wakeup)" + bool "USB runtime power management (autosuspend) and wakeup" depends on USB && PM_RUNTIME help If you say Y here, you can use driver calls or the sysfs - "power/level" file to suspend or resume individual USB - peripherals and to enable or disable autosuspend (see + "power/control" file to enable or disable autosuspend for + individual USB peripherals (see Documentation/usb/power-management.txt for more details). Also, USB "remote wakeup" signaling is supported, whereby some diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index f06f5dbc8cd..9fe34fb78ef 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -59,6 +59,7 @@ static int usb_open(struct inode * inode, struct file * file) static const struct file_operations usb_fops = { .owner = THIS_MODULE, .open = usb_open, + .llseek = noop_llseek, }; static struct usb_class { @@ -159,9 +160,9 @@ void usb_major_cleanup(void) int usb_register_dev(struct usb_interface *intf, struct usb_class_driver *class_driver) { - int retval = -EINVAL; + int retval; int minor_base = class_driver->minor_base; - int minor = 0; + int minor; char name[20]; char *temp; @@ -173,12 +174,17 @@ int usb_register_dev(struct usb_interface *intf, */ minor_base = 0; #endif - intf->minor = -1; - - dbg ("looking for a minor, starting at %d", minor_base); if (class_driver->fops == NULL) - goto exit; + return -EINVAL; + if (intf->minor >= 0) + return -EADDRINUSE; + + retval = init_usb_class(); + if (retval) + return retval; + + dev_dbg(&intf->dev, "looking for a minor, starting at %d", minor_base); down_write(&minor_rwsem); for (minor = minor_base; minor < MAX_USB_MINORS; ++minor) { @@ -186,20 +192,12 @@ int usb_register_dev(struct usb_interface *intf, continue; usb_minors[minor] = class_driver->fops; - - retval = 0; + intf->minor = minor; break; } up_write(&minor_rwsem); - - if (retval) - goto exit; - - retval = init_usb_class(); - if (retval) - goto exit; - - intf->minor = minor; + if (intf->minor < 0) + return -EXFULL; /* create a usb class device for this usb interface */ snprintf(name, sizeof(name), class_driver->name, minor - minor_base); @@ -213,11 +211,11 @@ int usb_register_dev(struct usb_interface *intf, "%s", temp); if (IS_ERR(intf->usb_dev)) { down_write(&minor_rwsem); - usb_minors[intf->minor] = NULL; + usb_minors[minor] = NULL; + intf->minor = -1; up_write(&minor_rwsem); retval = PTR_ERR(intf->usb_dev); } -exit: return retval; } EXPORT_SYMBOL_GPL(usb_register_dev); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 844683e5038..9f0ce7de0e3 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1802,6 +1802,7 @@ free_interfaces: intf->dev.groups = usb_interface_groups; intf->dev.dma_mask = dev->dev.dma_mask; INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); + intf->minor = -1; device_initialize(&intf->dev); dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum, dev->devpath, diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c index 53e120208e9..2b98bd26364 100644 --- a/drivers/usb/gadget/f_hid.c +++ b/drivers/usb/gadget/f_hid.c @@ -451,6 +451,7 @@ const struct file_operations f_hidg_fops = { .write = f_hidg_write, .read = f_hidg_read, .poll = f_hidg_poll, + .llseek = noop_llseek, }; static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f) diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index fc35406fc80..3f1d771c8be 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -33,7 +33,6 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> -#include <linux/smp_lock.h> #include <linux/device.h> #include <linux/moduleparam.h> diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index cf241c371a7..327a92a137b 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -884,7 +884,8 @@ static const struct file_operations printer_io_operations = { .fsync = printer_fsync, .poll = printer_poll, .unlocked_ioctl = printer_ioctl, - .release = printer_close + .release = printer_close, + .llseek = noop_llseek, }; /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index 76b7fd2d838..86afdc73322 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -369,18 +369,21 @@ static const struct file_operations debug_async_fops = { .open = debug_async_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static const struct file_operations debug_periodic_fops = { .owner = THIS_MODULE, .open = debug_periodic_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static const struct file_operations debug_registers_fops = { .owner = THIS_MODULE, .open = debug_registers_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static const struct file_operations debug_lpm_fops = { .owner = THIS_MODULE, @@ -388,6 +391,7 @@ static const struct file_operations debug_lpm_fops = { .read = debug_lpm_read, .write = debug_lpm_write, .release = debug_lpm_close, + .llseek = noop_llseek, }; static struct dentry *ehci_debug_root; diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 58b72d741d9..a1e8d273103 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -119,6 +119,11 @@ static int ehci_pci_setup(struct usb_hcd *hcd) ehci->broken_periodic = 1; ehci_info(ehci, "using broken periodic workaround\n"); } + if (pdev->device == 0x0806 || pdev->device == 0x0811 + || pdev->device == 0x0829) { + ehci_info(ehci, "disable lpm for langwell/penwell\n"); + ehci->has_lpm = 0; + } break; case PCI_VENDOR_ID_TDI: if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c index 36abd2baa3e..d7d34492934 100644 --- a/drivers/usb/host/ohci-dbg.c +++ b/drivers/usb/host/ohci-dbg.c @@ -413,18 +413,21 @@ static const struct file_operations debug_async_fops = { .open = debug_async_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static const struct file_operations debug_periodic_fops = { .owner = THIS_MODULE, .open = debug_periodic_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static const struct file_operations debug_registers_fops = { .owner = THIS_MODULE, .open = debug_registers_open, .read = debug_output, .release = debug_close, + .llseek = default_llseek, }; static struct dentry *ohci_debug_root; diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c index 41816389477..afef7b0a419 100644 --- a/drivers/usb/host/ohci-pxa27x.c +++ b/drivers/usb/host/ohci-pxa27x.c @@ -24,6 +24,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <mach/ohci.h> +#include <mach/pxa3xx-u2d.h> /* * UHC: USB Host Controller (OHCI-like) register definitions @@ -235,6 +236,9 @@ static int pxa27x_start_hc(struct pxa27x_ohci *ohci, struct device *dev) if (retval < 0) return retval; + if (cpu_is_pxa3xx()) + pxa3xx_u2d_start_hc(&ohci_to_hcd(&ohci->ohci)->self); + uhchr = __raw_readl(ohci->mmio_base + UHCHR) & ~UHCHR_SSE; __raw_writel(uhchr, ohci->mmio_base + UHCHR); __raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, ohci->mmio_base + UHCHIE); @@ -251,6 +255,9 @@ static void pxa27x_stop_hc(struct pxa27x_ohci *ohci, struct device *dev) inf = dev->platform_data; + if (cpu_is_pxa3xx()) + pxa3xx_u2d_stop_hc(&ohci_to_hcd(&ohci->ohci)->self); + if (inf->exit) inf->exit(dev); diff --git a/drivers/usb/host/sl811_cs.c b/drivers/usb/host/sl811_cs.c index 0e13a00eb2e..3775c035a6c 100644 --- a/drivers/usb/host/sl811_cs.c +++ b/drivers/usb/host/sl811_cs.c @@ -20,7 +20,6 @@ #include <linux/ioport.h> #include <linux/platform_device.h> -#include <pcmcia/cs.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> @@ -132,49 +131,12 @@ static void sl811_cs_release(struct pcmcia_device * link) platform_device_unregister(&platform_dev); } -static int sl811_cs_config_check(struct pcmcia_device *p_dev, - cistpl_cftable_entry_t *cfg, - cistpl_cftable_entry_t *dflt, - unsigned int vcc, - void *priv_data) +static int sl811_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { - if (cfg->index == 0) - return -ENODEV; - - /* Use power settings for Vcc and Vpp if present */ - /* Note that the CIS values need to be rescaled */ - if (cfg->vcc.present & (1<<CISTPL_POWER_VNOM)) { - if (cfg->vcc.param[CISTPL_POWER_VNOM]/10000 != vcc) - return -ENODEV; - } else if (dflt->vcc.present & (1<<CISTPL_POWER_VNOM)) { - if (dflt->vcc.param[CISTPL_POWER_VNOM]/10000 != vcc) - return -ENODEV; - } - - if (cfg->vpp1.present & (1<<CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = - cfg->vpp1.param[CISTPL_POWER_VNOM]/10000; - else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM)) - p_dev->conf.Vpp = - dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; - - /* we need an interrupt */ - p_dev->conf.Attributes |= CONF_ENABLE_IRQ; - - /* IO window settings */ - p_dev->resource[0]->end = p_dev->resource[1]->end = 0; - if ((cfg->io.nwin > 0) || (dflt->io.nwin > 0)) { - cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt->io; - p_dev->io_lines = io->flags & CISTPL_IO_LINES_MASK; - - p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; - p_dev->resource[0]->start = io->win[0].base; - p_dev->resource[0]->end = io->win[0].len; - - return pcmcia_request_io(p_dev); - } - pcmcia_disable_device(p_dev); - return -ENODEV; + if (p_dev->config_index == 0) + return -EINVAL; + + return pcmcia_request_io(p_dev); } @@ -185,6 +147,9 @@ static int sl811_cs_config(struct pcmcia_device *link) dev_dbg(&link->dev, "sl811_cs_config\n"); + link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | + CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_IO; + if (pcmcia_loop_config(link, sl811_cs_config_check, NULL)) goto failed; @@ -195,18 +160,10 @@ static int sl811_cs_config(struct pcmcia_device *link) if (!link->irq) goto failed; - ret = pcmcia_request_configuration(link, &link->conf); + ret = pcmcia_enable_device(link); if (ret) goto failed; - dev_info(&link->dev, "index 0x%02x: ", - link->conf.ConfigIndex); - if (link->conf.Vpp) - printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); - printk(", irq %d", link->irq); - printk(", io %pR", link->resource[0]); - printk("\n"); - if (sl811_hc_init(parent, link->resource[0]->start, link->irq) < 0) { failed: @@ -227,9 +184,6 @@ static int sl811_cs_probe(struct pcmcia_device *link) local->p_dev = link; link->priv = local; - link->conf.Attributes = 0; - link->conf.IntType = INT_MEMORY_AND_IO; - return sl811_cs_config(link); } @@ -241,9 +195,7 @@ MODULE_DEVICE_TABLE(pcmcia, sl811_ids); static struct pcmcia_driver sl811_cs_driver = { .owner = THIS_MODULE, - .drv = { - .name = "sl811_cs", - }, + .name = "sl811_cs", .probe = sl811_cs_probe, .remove = sl811_cs_detach, .id_table = sl811_ids, diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c index e192e8f7c56..575b56c79e9 100644 --- a/drivers/usb/image/mdc800.c +++ b/drivers/usb/image/mdc800.c @@ -963,6 +963,7 @@ static const struct file_operations mdc800_device_ops = .write = mdc800_device_write, .open = mdc800_device_open, .release = mdc800_device_release, + .llseek = noop_llseek, }; diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 801324af947..44f8b922505 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -679,6 +679,7 @@ static const struct file_operations adu_fops = { .write = adu_write, .open = adu_open, .release = adu_release, + .llseek = noop_llseek, }; /* diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index a54c3cb804c..c6184b4d169 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -105,6 +105,7 @@ static const struct file_operations idmouse_fops = { .read = idmouse_read, .open = idmouse_open, .release = idmouse_release, + .llseek = default_llseek, }; /* class driver information */ diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index bc88c79875a..9b50db25701 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -730,6 +730,7 @@ static const struct file_operations iowarrior_fops = { .open = iowarrior_open, .release = iowarrior_release, .poll = iowarrior_poll, + .llseek = noop_llseek, }; static char *iowarrior_devnode(struct device *dev, mode_t *mode) diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index dd41d871004..edffef64233 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -613,6 +613,7 @@ static const struct file_operations ld_usb_fops = { .open = ld_usb_open, .release = ld_usb_release, .poll = ld_usb_poll, + .llseek = no_llseek, }; /* diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index cc13ae61712..4e23d3841b4 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -439,6 +439,7 @@ static const struct file_operations usb_rio_fops = { .unlocked_ioctl = ioctl_rio, .open = open_rio, .release = close_rio, + .llseek = noop_llseek, }; static struct usb_class_driver usb_rio_class = { diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c index d00dde19194..51648154bb4 100644 --- a/drivers/usb/misc/usblcd.c +++ b/drivers/usb/misc/usblcd.c @@ -282,6 +282,7 @@ static const struct file_operations lcd_fops = { .open = lcd_open, .unlocked_ioctl = lcd_ioctl, .release = lcd_release, + .llseek = noop_llseek, }; /* diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index 59dc3d351b6..5ab5bb89bae 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -322,6 +322,7 @@ cppi_channel_allocate(struct dma_controller *c, index, transmit ? 'T' : 'R', cppi_ch); cppi_ch->hw_ep = ep; cppi_ch->channel.status = MUSB_DMA_STATUS_FREE; + cppi_ch->channel.max_len = 0x7fffffff; DBG(4, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R'); return &cppi_ch->channel; diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index c79a5e30d43..9e8639d4e86 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -195,15 +195,14 @@ static const struct file_operations musb_regdump_fops = { static int musb_test_mode_open(struct inode *inode, struct file *file) { - file->private_data = inode->i_private; - return single_open(file, musb_test_mode_show, inode->i_private); } static ssize_t musb_test_mode_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { - struct musb *musb = file->private_data; + struct seq_file *s = file->private_data; + struct musb *musb = s->private; u8 test = 0; char buf[18]; diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 6fca870e957..d065e23f123 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -300,6 +300,11 @@ static void txstate(struct musb *musb, struct musb_request *req) #ifndef CONFIG_MUSB_PIO_ONLY if (is_dma_capable() && musb_ep->dma) { struct dma_controller *c = musb->dma_controller; + size_t request_size; + + /* setup DMA, then program endpoint CSR */ + request_size = min_t(size_t, request->length - request->actual, + musb_ep->dma->max_len); use_dma = (request->dma != DMA_ADDR_INVALID); @@ -307,11 +312,6 @@ static void txstate(struct musb *musb, struct musb_request *req) #ifdef CONFIG_USB_INVENTRA_DMA { - size_t request_size; - - /* setup DMA, then program endpoint CSR */ - request_size = min_t(size_t, request->length, - musb_ep->dma->max_len); if (request_size < musb_ep->packet_sz) musb_ep->dma->desired_mode = 0; else @@ -373,8 +373,8 @@ static void txstate(struct musb *musb, struct musb_request *req) use_dma = use_dma && c->channel_program( musb_ep->dma, musb_ep->packet_sz, 0, - request->dma, - request->length); + request->dma + request->actual, + request_size); if (!use_dma) { c->channel_release(musb_ep->dma); musb_ep->dma = NULL; @@ -386,8 +386,8 @@ static void txstate(struct musb *musb, struct musb_request *req) use_dma = use_dma && c->channel_program( musb_ep->dma, musb_ep->packet_sz, request->zero, - request->dma, - request->length); + request->dma + request->actual, + request_size); #endif } #endif @@ -501,26 +501,14 @@ void musb_g_tx(struct musb *musb, u8 epnum) request->zero = 0; } - /* ... or if not, then complete it. */ - musb_g_giveback(musb_ep, request, 0); - - /* - * Kickstart next transfer if appropriate; - * the packet that just completed might not - * be transmitted for hours or days. - * REVISIT for double buffering... - * FIXME revisit for stalls too... - */ - musb_ep_select(mbase, epnum); - csr = musb_readw(epio, MUSB_TXCSR); - if (csr & MUSB_TXCSR_FIFONOTEMPTY) - return; - - request = musb_ep->desc ? next_request(musb_ep) : NULL; - if (!request) { - DBG(4, "%s idle now\n", - musb_ep->end_point.name); - return; + if (request->actual == request->length) { + musb_g_giveback(musb_ep, request, 0); + request = musb_ep->desc ? next_request(musb_ep) : NULL; + if (!request) { + DBG(4, "%s idle now\n", + musb_ep->end_point.name); + return; + } } } @@ -568,11 +556,19 @@ static void rxstate(struct musb *musb, struct musb_request *req) { const u8 epnum = req->epnum; struct usb_request *request = &req->request; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; + struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; unsigned fifo_count = 0; - u16 len = musb_ep->packet_sz; + u16 len; u16 csr = musb_readw(epio, MUSB_RXCSR); + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; + + if (hw_ep->is_shared_fifo) + musb_ep = &hw_ep->ep_in; + else + musb_ep = &hw_ep->ep_out; + + len = musb_ep->packet_sz; /* We shouldn't get here while DMA is active, but we do... */ if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { @@ -647,8 +643,8 @@ static void rxstate(struct musb *musb, struct musb_request *req) */ csr |= MUSB_RXCSR_DMAENAB; -#ifdef USE_MODE1 csr |= MUSB_RXCSR_AUTOCLEAR; +#ifdef USE_MODE1 /* csr |= MUSB_RXCSR_DMAMODE; */ /* this special sequence (enabling and then @@ -663,10 +659,11 @@ static void rxstate(struct musb *musb, struct musb_request *req) if (request->actual < request->length) { int transfer_size = 0; #ifdef USE_MODE1 - transfer_size = min(request->length, + transfer_size = min(request->length - request->actual, channel->max_len); #else - transfer_size = len; + transfer_size = min(request->length - request->actual, + (unsigned)len); #endif if (transfer_size <= musb_ep->packet_sz) musb_ep->dma->desired_mode = 0; @@ -740,9 +737,15 @@ void musb_g_rx(struct musb *musb, u8 epnum) u16 csr; struct usb_request *request; void __iomem *mbase = musb->mregs; - struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_out; + struct musb_ep *musb_ep; void __iomem *epio = musb->endpoints[epnum].regs; struct dma_channel *dma; + struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; + + if (hw_ep->is_shared_fifo) + musb_ep = &hw_ep->ep_in; + else + musb_ep = &hw_ep->ep_out; musb_ep_select(mbase, epnum); @@ -1081,7 +1084,7 @@ struct free_record { /* * Context: controller locked, IRQs blocked. */ -static void musb_ep_restart(struct musb *musb, struct musb_request *req) +void musb_ep_restart(struct musb *musb, struct musb_request *req) { DBG(3, "<== %s request %p len %u on hw_ep%d\n", req->tx ? "TX/IN" : "RX/OUT", diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h index c8b140325d8..572b1da7f2d 100644 --- a/drivers/usb/musb/musb_gadget.h +++ b/drivers/usb/musb/musb_gadget.h @@ -105,4 +105,6 @@ extern void musb_gadget_cleanup(struct musb *); extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); +extern void musb_ep_restart(struct musb *, struct musb_request *); + #endif /* __MUSB_GADGET_H */ diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c index 59bef8f3a35..6dd03f4c5f4 100644 --- a/drivers/usb/musb/musb_gadget_ep0.c +++ b/drivers/usb/musb/musb_gadget_ep0.c @@ -261,6 +261,7 @@ __acquires(musb->lock) ctrlrequest->wIndex & 0x0f; struct musb_ep *musb_ep; struct musb_hw_ep *ep; + struct musb_request *request; void __iomem *regs; int is_in; u16 csr; @@ -302,6 +303,14 @@ __acquires(musb->lock) musb_writew(regs, MUSB_RXCSR, csr); } + /* Maybe start the first request in the queue */ + request = to_musb_request( + next_request(musb_ep)); + if (!musb_ep->busy && request) { + DBG(3, "restarting the request\n"); + musb_ep_restart(musb, request); + } + /* select ep0 again */ musb_ep_select(mbase, 0); } break; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 877d20b1dff..9e65c47cc98 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -660,6 +660,12 @@ static bool musb_tx_dma_program(struct dma_controller *dma, qh->segsize = length; + /* + * Ensure the data reaches to main memory before starting + * DMA transfer + */ + wmb(); + if (!dma->channel_program(channel, pkt_size, mode, urb->transfer_dma + offset, length)) { dma->channel_release(channel); diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c index 05aaac1c386..0bc97698af1 100644 --- a/drivers/usb/otg/twl4030-usb.c +++ b/drivers/usb/otg/twl4030-usb.c @@ -347,11 +347,20 @@ static void twl4030_i2c_access(struct twl4030_usb *twl, int on) } } -static void twl4030_phy_power(struct twl4030_usb *twl, int on) +static void __twl4030_phy_power(struct twl4030_usb *twl, int on) { - u8 pwr; + u8 pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); + + if (on) + pwr &= ~PHY_PWR_PHYPWD; + else + pwr |= PHY_PWR_PHYPWD; - pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); + WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); +} + +static void twl4030_phy_power(struct twl4030_usb *twl, int on) +{ if (on) { regulator_enable(twl->usb3v1); regulator_enable(twl->usb1v8); @@ -365,15 +374,13 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on) twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2); regulator_enable(twl->usb1v5); - pwr &= ~PHY_PWR_PHYPWD; - WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); + __twl4030_phy_power(twl, 1); twl4030_usb_write(twl, PHY_CLK_CTRL, twl4030_usb_read(twl, PHY_CLK_CTRL) | (PHY_CLK_CTRL_CLOCKGATING_EN | PHY_CLK_CTRL_CLK32K_EN)); - } else { - pwr |= PHY_PWR_PHYPWD; - WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0); + } else { + __twl4030_phy_power(twl, 0); regulator_disable(twl->usb1v5); regulator_disable(twl->usb1v8); regulator_disable(twl->usb3v1); @@ -387,19 +394,25 @@ static void twl4030_phy_suspend(struct twl4030_usb *twl, int controller_off) twl4030_phy_power(twl, 0); twl->asleep = 1; + dev_dbg(twl->dev, "%s\n", __func__); } -static void twl4030_phy_resume(struct twl4030_usb *twl) +static void __twl4030_phy_resume(struct twl4030_usb *twl) { - if (!twl->asleep) - return; - twl4030_phy_power(twl, 1); twl4030_i2c_access(twl, 1); twl4030_usb_set_mode(twl, twl->usb_mode); if (twl->usb_mode == T2_USB_MODE_ULPI) twl4030_i2c_access(twl, 0); +} + +static void twl4030_phy_resume(struct twl4030_usb *twl) +{ + if (!twl->asleep) + return; + __twl4030_phy_resume(twl); twl->asleep = 0; + dev_dbg(twl->dev, "%s\n", __func__); } static int twl4030_usb_ldo_init(struct twl4030_usb *twl) @@ -408,8 +421,8 @@ static int twl4030_usb_ldo_init(struct twl4030_usb *twl) twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0xC0, PROTECT_KEY); twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0x0C, PROTECT_KEY); - /* put VUSB3V1 LDO in active state */ - twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2); + /* Keep VUSB3V1 LDO in sleep state until VBUS/ID change detected*/ + /*twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0, VUSB_DEDICATED2);*/ /* input to VUSB3V1 LDO is from VBAT, not VBUS */ twl_i2c_write_u8(TWL4030_MODULE_PM_RECEIVER, 0x14, VUSB_DEDICATED1); @@ -502,6 +515,26 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) return IRQ_HANDLED; } +static void twl4030_usb_phy_init(struct twl4030_usb *twl) +{ + int status; + + status = twl4030_usb_linkstat(twl); + if (status >= 0) { + if (status == USB_EVENT_NONE) { + __twl4030_phy_power(twl, 0); + twl->asleep = 1; + } else { + __twl4030_phy_resume(twl); + twl->asleep = 0; + } + + blocking_notifier_call_chain(&twl->otg.notifier, status, + twl->otg.gadget); + } + sysfs_notify(&twl->dev->kobj, NULL, "vbus"); +} + static int twl4030_set_suspend(struct otg_transceiver *x, int suspend) { struct twl4030_usb *twl = xceiv_to_twl(x); @@ -550,7 +583,6 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) struct twl4030_usb_data *pdata = pdev->dev.platform_data; struct twl4030_usb *twl; int status, err; - u8 pwr; if (!pdata) { dev_dbg(&pdev->dev, "platform_data not available\n"); @@ -569,10 +601,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) twl->otg.set_peripheral = twl4030_set_peripheral; twl->otg.set_suspend = twl4030_set_suspend; twl->usb_mode = pdata->usb_mode; - - pwr = twl4030_usb_read(twl, PHY_PWR_CTRL); - - twl->asleep = (pwr & PHY_PWR_PHYPWD); + twl->asleep = 1; /* init spinlock for workqueue */ spin_lock_init(&twl->lock); @@ -610,15 +639,10 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) return status; } - /* The IRQ handler just handles changes from the previous states - * of the ID and VBUS pins ... in probe() we must initialize that - * previous state. The easy way: fake an IRQ. - * - * REVISIT: a real IRQ might have happened already, if PREEMPT is - * enabled. Else the IRQ may not yet be configured or enabled, - * because of scheduling delays. + /* Power down phy or make it work according to + * current link state. */ - twl4030_usb_irq(twl->irq, twl); + twl4030_usb_phy_init(twl); dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); return 0; diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 30922a7e334..aa665817a27 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -2024,6 +2024,9 @@ static int mos7720_ioctl(struct tty_struct *tty, struct file *file, case TIOCGICOUNT: cnow = mos7720_port->icount; + + memset(&icount, 0, sizeof(struct serial_icounter_struct)); + icount.cts = cnow.cts; icount.dsr = cnow.dsr; icount.rng = cnow.rng; diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 1c9b6e9b238..1a42bc21379 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -2285,6 +2285,9 @@ static int mos7840_ioctl(struct tty_struct *tty, struct file *file, case TIOCGICOUNT: cnow = mos7840_port->icount; smp_rmb(); + + memset(&icount, 0, sizeof(struct serial_icounter_struct)); + icount.cts = cnow.cts; icount.dsr = cnow.dsr; icount.rng = cnow.rng; diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index 552679b8dbd..e24ce312307 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -507,6 +507,7 @@ static const struct file_operations skel_fops = { .open = skel_open, .release = skel_release, .flush = skel_flush, + .llseek = noop_llseek, }; /* diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 29e850a7a2f..861af4a8b79 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -127,7 +127,10 @@ static void handle_tx(struct vhost_net *net) size_t len, total_len = 0; int err, wmem; size_t hdr_size; - struct socket *sock = rcu_dereference(vq->private_data); + struct socket *sock; + + sock = rcu_dereference_check(vq->private_data, + lockdep_is_held(&vq->mutex)); if (!sock) return; @@ -243,7 +246,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, int r, nlogs = 0; while (datalen > 0) { - if (unlikely(headcount >= VHOST_NET_MAX_SG)) { + if (unlikely(seg >= VHOST_NET_MAX_SG)) { r = -ENOBUFS; goto err; } @@ -582,7 +585,10 @@ static void vhost_net_disable_vq(struct vhost_net *n, static void vhost_net_enable_vq(struct vhost_net *n, struct vhost_virtqueue *vq) { - struct socket *sock = vq->private_data; + struct socket *sock; + + sock = rcu_dereference_protected(vq->private_data, + lockdep_is_held(&vq->mutex)); if (!sock) return; if (vq == n->vqs + VHOST_NET_VQ_TX) { @@ -598,7 +604,8 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n, struct socket *sock; mutex_lock(&vq->mutex); - sock = vq->private_data; + sock = rcu_dereference_protected(vq->private_data, + lockdep_is_held(&vq->mutex)); vhost_net_disable_vq(n, vq); rcu_assign_pointer(vq->private_data, NULL); mutex_unlock(&vq->mutex); @@ -736,7 +743,8 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) } /* start polling new socket */ - oldsock = vq->private_data; + oldsock = rcu_dereference_protected(vq->private_data, + lockdep_is_held(&vq->mutex)); if (sock != oldsock) { vhost_net_disable_vq(n, vq); rcu_assign_pointer(vq->private_data, sock); @@ -869,6 +877,7 @@ static const struct file_operations vhost_net_fops = { .compat_ioctl = vhost_net_compat_ioctl, #endif .open = vhost_net_open, + .llseek = noop_llseek, }; static struct miscdevice vhost_net_misc = { diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index c579dcc9200..8b5a1b33d0f 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -320,7 +320,7 @@ long vhost_dev_reset_owner(struct vhost_dev *dev) vhost_dev_cleanup(dev); memory->nregions = 0; - dev->memory = memory; + RCU_INIT_POINTER(dev->memory, memory); return 0; } @@ -352,8 +352,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev) fput(dev->log_file); dev->log_file = NULL; /* No one will access memory at this point */ - kfree(dev->memory); - dev->memory = NULL; + kfree(rcu_dereference_protected(dev->memory, + lockdep_is_held(&dev->mutex))); + RCU_INIT_POINTER(dev->memory, NULL); if (dev->mm) mmput(dev->mm); dev->mm = NULL; @@ -440,14 +441,22 @@ static int vq_access_ok(unsigned int num, /* Caller should have device mutex but not vq mutex */ int vhost_log_access_ok(struct vhost_dev *dev) { - return memory_access_ok(dev, dev->memory, 1); + struct vhost_memory *mp; + + mp = rcu_dereference_protected(dev->memory, + lockdep_is_held(&dev->mutex)); + return memory_access_ok(dev, mp, 1); } /* Verify access for write logging. */ /* Caller should have vq mutex and device mutex */ static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base) { - return vq_memory_access_ok(log_base, vq->dev->memory, + struct vhost_memory *mp; + + mp = rcu_dereference_protected(vq->dev->memory, + lockdep_is_held(&vq->mutex)); + return vq_memory_access_ok(log_base, mp, vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) && (!vq->log_used || log_access_ok(log_base, vq->log_addr, sizeof *vq->used + @@ -487,7 +496,8 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) kfree(newmem); return -EFAULT; } - oldmem = d->memory; + oldmem = rcu_dereference_protected(d->memory, + lockdep_is_held(&d->mutex)); rcu_assign_pointer(d->memory, newmem); synchronize_rcu(); kfree(oldmem); @@ -858,11 +868,12 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, if (r < 0) return r; len -= l; - if (!len) + if (!len) { + if (vq->log_ctx) + eventfd_signal(vq->log_ctx, 1); return 0; + } } - if (vq->log_ctx) - eventfd_signal(vq->log_ctx, 1); /* Length written exceeds what we have stored. This is a bug. */ BUG(); return 0; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index afd77295971..af3c11ded5f 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -106,7 +106,7 @@ struct vhost_virtqueue { * vhost_work execution acts instead of rcu_read_lock() and the end of * vhost_work execution acts instead of rcu_read_lock(). * Writers use virtqueue mutex. */ - void *private_data; + void __rcu *private_data; /* Log write descriptors */ void __user *log_base; struct vhost_log log[VHOST_NET_MAX_SG]; @@ -116,7 +116,7 @@ struct vhost_dev { /* Readers use RCU to access memory table pointer * log base pointer and features. * Writers use mutex below.*/ - struct vhost_memory *memory; + struct vhost_memory __rcu *memory; struct mm_struct *mm; struct mutex mutex; unsigned acked_features; @@ -173,7 +173,11 @@ enum { static inline int vhost_has_feature(struct vhost_dev *dev, int bit) { - unsigned acked_features = rcu_dereference(dev->acked_features); + unsigned acked_features; + + acked_features = + rcu_dereference_index_check(dev->acked_features, + lockdep_is_held(&dev->mutex)); return acked_features & (1 << bit); } diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 84f842331df..7ccc967831f 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -3508,7 +3508,7 @@ static void fbcon_exit(void) softback_buf = 0UL; for (i = 0; i < FB_MAX; i++) { - int pending; + int pending = 0; mapped = 0; info = registered_fb[i]; @@ -3516,7 +3516,8 @@ static void fbcon_exit(void) if (info == NULL) continue; - pending = cancel_work_sync(&info->queue); + if (info->queue.func) + pending = cancel_work_sync(&info->queue); DPRINTK("fbcon: %s pending work\n", (pending ? "canceled" : "no")); diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index 815f84b0793..70477c2e4b6 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c @@ -13,7 +13,7 @@ #include <linux/platform_device.h> #include <linux/screen_info.h> #include <linux/dmi.h> - +#include <linux/pci.h> #include <video/vga.h> static struct fb_var_screeninfo efifb_defined __devinitdata = { @@ -39,17 +39,31 @@ enum { M_I20, /* 20-Inch iMac */ M_I20_SR, /* 20-Inch iMac (Santa Rosa) */ M_I24, /* 24-Inch iMac */ + M_I24_8_1, /* 24-Inch iMac, 8,1th gen */ + M_I24_10_1, /* 24-Inch iMac, 10,1th gen */ + M_I27_11_1, /* 27-Inch iMac, 11,1th gen */ M_MINI, /* Mac Mini */ + M_MINI_3_1, /* Mac Mini, 3,1th gen */ + M_MINI_4_1, /* Mac Mini, 4,1th gen */ M_MB, /* MacBook */ M_MB_2, /* MacBook, 2nd rev. */ M_MB_3, /* MacBook, 3rd rev. */ + M_MB_5_1, /* MacBook, 5th rev. */ + M_MB_6_1, /* MacBook, 6th rev. */ + M_MB_7_1, /* MacBook, 7th rev. */ M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */ M_MBA, /* MacBook Air */ M_MBP, /* MacBook Pro */ M_MBP_2, /* MacBook Pro 2nd gen */ + M_MBP_2_2, /* MacBook Pro 2,2nd gen */ M_MBP_SR, /* MacBook Pro (Santa Rosa) */ M_MBP_4, /* MacBook Pro, 4th gen */ M_MBP_5_1, /* MacBook Pro, 5,1th gen */ + M_MBP_5_2, /* MacBook Pro, 5,2th gen */ + M_MBP_5_3, /* MacBook Pro, 5,3rd gen */ + M_MBP_6_1, /* MacBook Pro, 6,1th gen */ + M_MBP_6_2, /* MacBook Pro, 6,2th gen */ + M_MBP_7_1, /* MacBook Pro, 7,1th gen */ M_UNKNOWN /* placeholder */ }; @@ -64,14 +78,28 @@ static struct efifb_dmi_info { [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050 }, /* guess */ [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050 }, [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200 }, /* guess */ + [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200 }, + [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080 }, + [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440 }, [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768 }, + [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768 }, + [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200 }, [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800 }, + [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800 }, + [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800 }, + [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800 }, [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800 }, [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900 }, [M_MBP_2] = { "mbp2", 0, 0, 0, 0 }, /* placeholder */ + [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900 }, [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900 }, [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200 }, [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900 }, + [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200 }, + [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900 }, + [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200 }, + [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050 }, + [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800 }, [M_UNKNOWN] = { NULL, 0, 0, 0, 0 } }; @@ -92,7 +120,12 @@ static const struct dmi_system_id dmi_system_table[] __initconst = { EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24), EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24), EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1), EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1), EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB), /* At least one of these two will be right; maybe both? */ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB), @@ -101,14 +134,23 @@ static const struct dmi_system_id dmi_system_table[] __initconst = { EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB), EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB), EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1), EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA), EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP), EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2), + EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2), EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2), EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR), EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR), EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4), EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2), + EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1), {}, }; @@ -116,7 +158,7 @@ static int set_system(const struct dmi_system_id *id) { struct efifb_dmi_info *info = id->driver_data; if (info->base == 0) - return -ENODEV; + return 0; printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p " "(%dx%d, stride %d)\n", id->ident, @@ -124,18 +166,55 @@ static int set_system(const struct dmi_system_id *id) info->stride); /* Trust the bootloader over the DMI tables */ - if (screen_info.lfb_base == 0) + if (screen_info.lfb_base == 0) { +#if defined(CONFIG_PCI) + struct pci_dev *dev = NULL; + int found_bar = 0; +#endif screen_info.lfb_base = info->base; - if (screen_info.lfb_linelength == 0) - screen_info.lfb_linelength = info->stride; - if (screen_info.lfb_width == 0) - screen_info.lfb_width = info->width; - if (screen_info.lfb_height == 0) - screen_info.lfb_height = info->height; - if (screen_info.orig_video_isVGA == 0) - screen_info.orig_video_isVGA = VIDEO_TYPE_EFI; - return 0; +#if defined(CONFIG_PCI) + /* make sure that the address in the table is actually on a + * VGA device's PCI BAR */ + + for_each_pci_dev(dev) { + int i; + if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) + continue; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + resource_size_t start, end; + + start = pci_resource_start(dev, i); + if (start == 0) + break; + end = pci_resource_end(dev, i); + if (screen_info.lfb_base >= start && + screen_info.lfb_base < end) { + found_bar = 1; + } + } + } + if (!found_bar) + screen_info.lfb_base = 0; +#endif + } + if (screen_info.lfb_base) { + if (screen_info.lfb_linelength == 0) + screen_info.lfb_linelength = info->stride; + if (screen_info.lfb_width == 0) + screen_info.lfb_width = info->width; + if (screen_info.lfb_height == 0) + screen_info.lfb_height = info->height; + if (screen_info.orig_video_isVGA == 0) + screen_info.orig_video_isVGA = VIDEO_TYPE_EFI; + } else { + screen_info.lfb_linelength = 0; + screen_info.lfb_width = 0; + screen_info.lfb_height = 0; + screen_info.orig_video_isVGA = 0; + return 0; + } + return 1; } static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green, diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index b06647517c0..42e303ff862 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1439,6 +1439,7 @@ static const struct file_operations fb_fops = { #ifdef CONFIG_FB_DEFERRED_IO .fsync = fb_deferred_io_fsync, #endif + .llseek = default_llseek, }; struct class *fb_class; diff --git a/drivers/video/mbx/mbxdebugfs.c b/drivers/video/mbx/mbxdebugfs.c index ecad9652457..12dec7634c5 100644 --- a/drivers/video/mbx/mbxdebugfs.c +++ b/drivers/video/mbx/mbxdebugfs.c @@ -175,36 +175,42 @@ static const struct file_operations sysconf_fops = { .read = sysconf_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static const struct file_operations clock_fops = { .read = clock_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static const struct file_operations display_fops = { .read = display_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static const struct file_operations gsctl_fops = { .read = gsctl_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static const struct file_operations sdram_fops = { .read = sdram_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static const struct file_operations misc_fops = { .read = misc_read_file, .write = write_file_dummy, .open = open_file_generic, + .llseek = default_llseek, }; static void __devinit mbxfb_debugfs_init(struct fb_info *fbi) diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c index f6fdc2085f3..fed2a72bc6b 100644 --- a/drivers/video/omap2/vram.c +++ b/drivers/video/omap2/vram.c @@ -554,12 +554,8 @@ void __init omap_vram_reserve_sdram_memblock(void) size = PAGE_ALIGN(size); if (paddr) { - struct memblock_property res; - - res.base = paddr; - res.size = size; - if ((paddr & ~PAGE_MASK) || memblock_find(&res) || - res.base != paddr || res.size != size) { + if ((paddr & ~PAGE_MASK) || + !memblock_is_region_memory(paddr, size)) { pr_err("Illegal SDRAM region for VRAM\n"); return; } diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c index 5d786bd3e30..cea6403ae71 100644 --- a/drivers/video/pxa168fb.c +++ b/drivers/video/pxa168fb.c @@ -298,8 +298,8 @@ static void set_dma_control0(struct pxa168fb_info *fbi) * Set bit to enable graphics DMA. */ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); - x |= fbi->active ? 0x00000100 : 0; - fbi->active = 0; + x &= ~CFG_GRA_ENA_MASK; + x |= fbi->active ? CFG_GRA_ENA(1) : CFG_GRA_ENA(0); /* * If we are in a pseudo-color mode, we need to enable @@ -784,12 +784,53 @@ failed: return ret; } +static int __devexit pxa168fb_remove(struct platform_device *pdev) +{ + struct pxa168fb_info *fbi = platform_get_drvdata(pdev); + struct fb_info *info; + int irq; + unsigned int data; + + if (!fbi) + return 0; + + /* disable DMA transfer */ + data = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); + data &= ~CFG_GRA_ENA_MASK; + writel(data, fbi->reg_base + LCD_SPU_DMA_CTRL0); + + info = fbi->info; + + unregister_framebuffer(info); + + writel(GRA_FRAME_IRQ0_ENA(0x0), fbi->reg_base + SPU_IRQ_ENA); + + if (info->cmap.len) + fb_dealloc_cmap(&info->cmap); + + irq = platform_get_irq(pdev, 0); + free_irq(irq, fbi); + + dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len), + info->screen_base, info->fix.smem_start); + + iounmap(fbi->reg_base); + + clk_disable(fbi->clk); + clk_put(fbi->clk); + + framebuffer_release(info); + + return 0; +} + static struct platform_driver pxa168fb_driver = { .driver = { .name = "pxa168-fb", .owner = THIS_MODULE, }, .probe = pxa168fb_probe, + .remove = __devexit_p(pxa168fb_remove), }; static int __init pxa168fb_init(void) @@ -798,6 +839,12 @@ static int __init pxa168fb_init(void) } module_init(pxa168fb_init); +static void __exit pxa168fb_exit(void) +{ + platform_driver_unregister(&pxa168fb_driver); +} +module_exit(pxa168fb_exit); + MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> " "Green Wan <gwan@marvell.com>"); MODULE_DESCRIPTION("Framebuffer driver for PXA168/910"); diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 559bf1727a2..b52f8e4ef1f 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c @@ -1701,6 +1701,9 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, break; case FBIOGET_VBLANK: + + memset(&sisvbblank, 0, sizeof(struct fb_vblank)); + sisvbblank.count = 0; sisvbblank.flags = sisfb_setupvbblankflags(ivideo, &sisvbblank.vcount, &sisvbblank.hcount); diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c index f2d9e667972..f885c868a04 100644 --- a/drivers/vlynq/vlynq.c +++ b/drivers/vlynq/vlynq.c @@ -31,6 +31,7 @@ #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/irq.h> #include <linux/vlynq.h> diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 24efd8ea41b..c356146bd71 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -957,12 +957,32 @@ config PIKA_WDT the Warp platform. config BOOKE_WDT - bool "PowerPC Book-E Watchdog Timer" + tristate "PowerPC Book-E Watchdog Timer" depends on BOOKE || 4xx ---help--- + Watchdog driver for PowerPC Book-E chips, such as the Freescale + MPC85xx SOCs and the IBM PowerPC 440. + Please see Documentation/watchdog/watchdog-api.txt for more information. +config BOOKE_WDT_DEFAULT_TIMEOUT + int "PowerPC Book-E Watchdog Timer Default Timeout" + depends on BOOKE_WDT + default 38 if FSL_BOOKE + range 0 63 if FSL_BOOKE + default 3 if !FSL_BOOKE + range 0 3 if !FSL_BOOKE + help + Select the default watchdog timer period to be used by the PowerPC + Book-E watchdog driver. A watchdog "event" occurs when the bit + position represented by this number transitions from zero to one. + + For Freescale Book-E processors, this is a number between 0 and 63. + For other Book-E processors, this is a number between 0 and 3. + + The value can be overidden by the wdt_period command-line parameter. + # PPC64 Architecture config WATCHDOG_RTAS diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c index c764c52412e..b2922178359 100644 --- a/drivers/watchdog/ar7_wdt.c +++ b/drivers/watchdog/ar7_wdt.c @@ -267,6 +267,7 @@ static const struct file_operations ar7_wdt_fops = { .unlocked_ioctl = ar7_wdt_ioctl, .open = ar7_wdt_open, .release = ar7_wdt_release, + .llseek = no_llseek, }; static struct miscdevice ar7_wdt_miscdev = { diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 3d49671cdf5..d11ffb091b0 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c @@ -4,7 +4,7 @@ * Author: Matthew McClintock * Maintainer: Kumar Gala <galak@kernel.crashing.org> * - * Copyright 2005, 2008 Freescale Semiconductor Inc. + * Copyright 2005, 2008, 2010 Freescale Semiconductor Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -33,14 +33,8 @@ * occur, and the final time the board will reset. */ -#ifdef CONFIG_FSL_BOOKE -#define WDT_PERIOD_DEFAULT 38 /* Ex. wdt_period=28 bus=333Mhz,reset=~40sec */ -#else -#define WDT_PERIOD_DEFAULT 3 /* Refer to the PPC40x and PPC4xx manuals */ -#endif /* for timing information */ - u32 booke_wdt_enabled; -u32 booke_wdt_period = WDT_PERIOD_DEFAULT; +u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT; #ifdef CONFIG_FSL_BOOKE #define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) @@ -114,6 +108,27 @@ static void __booke_wdt_enable(void *data) mtspr(SPRN_TCR, val); } +/** + * booke_wdt_disable - disable the watchdog on the given CPU + * + * This function is called on each CPU. It disables the watchdog on that CPU. + * + * TCR[WRC] cannot be changed once it has been set to non-zero, but we can + * effectively disable the watchdog by setting its period to the maximum value. + */ +static void __booke_wdt_disable(void *data) +{ + u32 val; + + val = mfspr(SPRN_TCR); + val &= ~(TCR_WIE | WDTP_MASK); + mtspr(SPRN_TCR, val); + + /* clear status to make sure nothing is pending */ + __booke_wdt_ping(NULL); + +} + static ssize_t booke_wdt_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { @@ -193,12 +208,21 @@ static int booke_wdt_open(struct inode *inode, struct file *file) return nonseekable_open(inode, file); } +static int booke_wdt_release(struct inode *inode, struct file *file) +{ + on_each_cpu(__booke_wdt_disable, NULL, 0); + booke_wdt_enabled = 0; + + return 0; +} + static const struct file_operations booke_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = booke_wdt_write, .unlocked_ioctl = booke_wdt_ioctl, .open = booke_wdt_open, + .release = booke_wdt_release, }; static struct miscdevice booke_wdt_miscdev = { @@ -237,4 +261,9 @@ static int __init booke_wdt_init(void) return ret; } -device_initcall(booke_wdt_init); + +module_init(booke_wdt_init); +module_exit(booke_wdt_exit); + +MODULE_DESCRIPTION("PowerPC Book-E watchdog driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index 566343b3c13..eca855a55c0 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -25,7 +25,7 @@ #include <linux/ioport.h> #include <linux/timer.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> @@ -89,6 +89,7 @@ struct cpwd { } devs[WD_NUMDEVS]; }; +static DEFINE_MUTEX(cpwd_mutex); static struct cpwd *cpwd_device; /* Sun uses Altera PLD EPF8820ATC144-4 @@ -368,7 +369,7 @@ static int cpwd_open(struct inode *inode, struct file *f) { struct cpwd *p = cpwd_device; - lock_kernel(); + mutex_lock(&cpwd_mutex); switch (iminor(inode)) { case WD0_MINOR: case WD1_MINOR: @@ -376,7 +377,7 @@ static int cpwd_open(struct inode *inode, struct file *f) break; default: - unlock_kernel(); + mutex_unlock(&cpwd_mutex); return -ENODEV; } @@ -386,13 +387,13 @@ static int cpwd_open(struct inode *inode, struct file *f) IRQF_SHARED, DRIVER_NAME, p)) { printk(KERN_ERR PFX "Cannot register IRQ %d\n", p->irq); - unlock_kernel(); + mutex_unlock(&cpwd_mutex); return -EBUSY; } p->initialized = true; } - unlock_kernel(); + mutex_unlock(&cpwd_mutex); return nonseekable_open(inode, f); } @@ -482,9 +483,9 @@ static long cpwd_compat_ioctl(struct file *file, unsigned int cmd, case WIOCSTART: case WIOCSTOP: case WIOCGSTAT: - lock_kernel(); + mutex_lock(&cpwd_mutex); rval = cpwd_ioctl(file, cmd, arg); - unlock_kernel(); + mutex_unlock(&cpwd_mutex); break; /* everything else is handled by the generic compat layer */ @@ -524,6 +525,7 @@ static const struct file_operations cpwd_fops = { .write = cpwd_write, .read = cpwd_read, .release = cpwd_release, + .llseek = no_llseek, }; static int __devinit cpwd_probe(struct platform_device *op, diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c index 59359c9a5e0..726b7df61fd 100644 --- a/drivers/watchdog/ep93xx_wdt.c +++ b/drivers/watchdog/ep93xx_wdt.c @@ -188,6 +188,7 @@ static const struct file_operations ep93xx_wdt_fops = { .unlocked_ioctl = ep93xx_wdt_ioctl, .open = ep93xx_wdt_open, .release = ep93xx_wdt_release, + .llseek = no_llseek, }; static struct miscdevice ep93xx_wdt_miscdev = { diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c index 2a410170eca..909923800a0 100644 --- a/drivers/watchdog/octeon-wdt-main.c +++ b/drivers/watchdog/octeon-wdt-main.c @@ -64,6 +64,7 @@ #include <linux/cpu.h> #include <linux/smp.h> #include <linux/fs.h> +#include <linux/irq.h> #include <asm/mipsregs.h> #include <asm/uasm.h> diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index 76b58abf445..81e3d610089 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -258,6 +258,7 @@ static const struct file_operations omap_wdt_fops = { .unlocked_ioctl = omap_wdt_ioctl, .open = omap_wdt_open, .release = omap_wdt_release, + .llseek = no_llseek, }; static int __devinit omap_wdt_probe(struct platform_device *pdev) diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 13365ba3521..7d24b0d94ed 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -338,30 +338,29 @@ static void unmask_evtchn(int port) static int find_unbound_irq(void) { - int irq; - struct irq_desc *desc; + struct irq_data *data; + int irq, res; for (irq = 0; irq < nr_irqs; irq++) { - desc = irq_to_desc(irq); + data = irq_get_irq_data(irq); /* only 0->15 have init'd desc; handle irq > 16 */ - if (desc == NULL) + if (!data) break; - if (desc->chip == &no_irq_chip) + if (data->chip == &no_irq_chip) break; - if (desc->chip != &xen_dynamic_chip) + if (data->chip != &xen_dynamic_chip) continue; if (irq_info[irq].type == IRQT_UNBOUND) - break; + return irq; } if (irq == nr_irqs) panic("No available IRQ to bind to: increase nr_irqs!\n"); - desc = irq_to_desc_alloc_node(irq, 0); - if (WARN_ON(desc == NULL)) - return -1; + res = irq_alloc_desc_at(irq, 0); - dynamic_irq_init_keep_chip_data(irq); + if (WARN_ON(res != irq)) + return -1; return irq; } @@ -495,7 +494,7 @@ static void unbind_from_irq(unsigned int irq) if (irq_info[irq].type != IRQT_UNBOUND) { irq_info[irq] = mk_unbound_info(); - dynamic_irq_cleanup(irq); + irq_free_desc(irq); } spin_unlock(&irq_mapping_update_lock); diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index 66e185cfe92..fec6ba3c08a 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -467,6 +467,7 @@ static const struct file_operations evtchn_fops = { .fasync = evtchn_fasync, .open = evtchn_open, .release = evtchn_release, + .llseek = noop_llseek, }; static struct miscdevice evtchn_miscdev = { diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 29bac511887..d409495876f 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -755,7 +755,10 @@ int register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; - blocking_notifier_chain_register(&xenstore_chain, nb); + if (xenstored_ready > 0) + ret = nb->notifier_call(nb, 0, NULL); + else + blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } @@ -769,7 +772,7 @@ EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); void xenbus_probe(struct work_struct *unused) { - BUG_ON((xenstored_ready <= 0)); + xenstored_ready = 1; /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); @@ -835,8 +838,8 @@ static int __init xenbus_init(void) xen_store_evtchn = xen_start_info->store_evtchn; xen_store_mfn = xen_start_info->store_mfn; xen_store_interface = mfn_to_virt(xen_store_mfn); + xenstored_ready = 1; } - xenstored_ready = 1; } /* Initialize the interface to xenstore. */ diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index 78bfab0700b..bd96340063c 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c @@ -35,6 +35,7 @@ static ssize_t capabilities_read(struct file *file, char __user *buf, static const struct file_operations capabilities_file_ops = { .read = capabilities_read, + .llseek = default_llseek, }; static int xenfs_fill_super(struct super_block *sb, void *data, int silent) diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c index 3b39c3752e2..1c1236087f7 100644 --- a/drivers/xen/xenfs/xenbus.c +++ b/drivers/xen/xenfs/xenbus.c @@ -594,4 +594,5 @@ const struct file_operations xenbus_file_ops = { .open = xenbus_file_open, .release = xenbus_file_release, .poll = xenbus_file_poll, + .llseek = no_llseek, }; |