diff options
Diffstat (limited to 'drivers')
636 files changed, 28347 insertions, 9820 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 0a0a90f52d2..0e87a34b647 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -132,6 +132,8 @@ source "drivers/staging/Kconfig" source "drivers/platform/Kconfig" +source "drivers/soc/Kconfig" + source "drivers/clk/Kconfig" source "drivers/hwspinlock/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index da72e328f77..f98b50d8251 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -33,6 +33,9 @@ obj-y += amba/ # really early. obj-$(CONFIG_DMADEVICES) += dma/ +# SOC specific infrastructure drivers. +obj-y += soc/ + obj-$(CONFIG_VIRTIO) += virtio/ obj-$(CONFIG_XEN) += xen/ @@ -118,7 +121,7 @@ obj-$(CONFIG_SGI_SN) += sn/ obj-y += firmware/ obj-$(CONFIG_CRYPTO) += crypto/ obj-$(CONFIG_SUPERH) += sh/ -obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += sh/ +obj-$(CONFIG_ARCH_SHMOBILE) += sh/ ifndef CONFIG_ARCH_USES_GETTIMEOFFSET obj-y += clocksource/ endif diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index ab686b31010..a34a2284100 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -47,6 +47,23 @@ config ACPI_SLEEP depends on SUSPEND || HIBERNATION default y +config ACPI_PROCFS_POWER + bool "Deprecated power /proc/acpi directories" + depends on PROC_FS + help + For backwards compatibility, this option allows + deprecated power /proc/acpi/ directories to exist, even when + they have been replaced by functions in /sys. + The deprecated directories (and their replacements) include: + /proc/acpi/battery/* (/sys/class/power_supply/*) + /proc/acpi/ac_adapter/* (sys/class/power_supply/*) + This option has no effect on /proc/acpi/ directories + and functions, which do not yet exist in /sys + This option, together with the proc directories, will be + deleted in the future. + + Say N to delete power /proc/acpi/ directories that have moved to /sys/ + config ACPI_EC_DEBUGFS tristate "EC read/write access through /sys/kernel/debug/ec" default n diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 0331f91d56e..bce34afadcd 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -47,6 +47,7 @@ acpi-y += sysfs.o acpi-$(CONFIG_X86) += acpi_cmos_rtc.o acpi-$(CONFIG_DEBUG_FS) += debugfs.o acpi-$(CONFIG_ACPI_NUMA) += numa.o +acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o ifdef CONFIG_ACPI_VIDEO acpi-y += video_detect.o endif diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 2c01c1da29c..c67f6f5ad61 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -52,11 +52,39 @@ MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI AC Adapter Driver"); MODULE_LICENSE("GPL"); +static int acpi_ac_add(struct acpi_device *device); +static int acpi_ac_remove(struct acpi_device *device); +static void acpi_ac_notify(struct acpi_device *device, u32 event); + +static const struct acpi_device_id ac_device_ids[] = { + {"ACPI0003", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, ac_device_ids); + +#ifdef CONFIG_PM_SLEEP +static int acpi_ac_resume(struct device *dev); +#endif +static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); + static int ac_sleep_before_get_state_ms; +static struct acpi_driver acpi_ac_driver = { + .name = "ac", + .class = ACPI_AC_CLASS, + .ids = ac_device_ids, + .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, + .ops = { + .add = acpi_ac_add, + .remove = acpi_ac_remove, + .notify = acpi_ac_notify, + }, + .drv.pm = &acpi_ac_pm, +}; + struct acpi_ac { struct power_supply charger; - struct platform_device *pdev; + struct acpi_device * device; unsigned long long state; struct notifier_block battery_nb; }; @@ -69,10 +97,12 @@ struct acpi_ac { static int acpi_ac_get_state(struct acpi_ac *ac) { - acpi_status status; - acpi_handle handle = ACPI_HANDLE(&ac->pdev->dev); + acpi_status status = AE_OK; + + if (!ac) + return -EINVAL; - status = acpi_evaluate_integer(handle, "_PSR", NULL, + status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL, &ac->state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, @@ -117,10 +147,9 @@ static enum power_supply_property ac_props[] = { Driver Model -------------------------------------------------------------------------- */ -static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) +static void acpi_ac_notify(struct acpi_device *device, u32 event) { - struct acpi_ac *ac = data; - struct acpi_device *adev; + struct acpi_ac *ac = acpi_driver_data(device); if (!ac) return; @@ -143,11 +172,10 @@ static void acpi_ac_notify_handler(acpi_handle handle, u32 event, void *data) msleep(ac_sleep_before_get_state_ms); acpi_ac_get_state(ac); - adev = ACPI_COMPANION(&ac->pdev->dev); - acpi_bus_generate_netlink_event(adev->pnp.device_class, - dev_name(&ac->pdev->dev), - event, (u32) ac->state); - acpi_notifier_call_chain(adev, event, (u32) ac->state); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, + (u32) ac->state); + acpi_notifier_call_chain(device, event, (u32) ac->state); kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); } @@ -192,49 +220,39 @@ static struct dmi_system_id ac_dmi_table[] = { {}, }; -static int acpi_ac_probe(struct platform_device *pdev) +static int acpi_ac_add(struct acpi_device *device) { int result = 0; struct acpi_ac *ac = NULL; - struct acpi_device *adev; - if (!pdev) - return -EINVAL; - adev = ACPI_COMPANION(&pdev->dev); - if (!adev) - return -ENODEV; + if (!device) + return -EINVAL; ac = kzalloc(sizeof(struct acpi_ac), GFP_KERNEL); if (!ac) return -ENOMEM; - strcpy(acpi_device_name(adev), ACPI_AC_DEVICE_NAME); - strcpy(acpi_device_class(adev), ACPI_AC_CLASS); - ac->pdev = pdev; - platform_set_drvdata(pdev, ac); + ac->device = device; + strcpy(acpi_device_name(device), ACPI_AC_DEVICE_NAME); + strcpy(acpi_device_class(device), ACPI_AC_CLASS); + device->driver_data = ac; result = acpi_ac_get_state(ac); if (result) goto end; - ac->charger.name = acpi_device_bid(adev); + ac->charger.name = acpi_device_bid(device); ac->charger.type = POWER_SUPPLY_TYPE_MAINS; ac->charger.properties = ac_props; ac->charger.num_properties = ARRAY_SIZE(ac_props); ac->charger.get_property = get_ac_property; - result = power_supply_register(&pdev->dev, &ac->charger); + result = power_supply_register(&ac->device->dev, &ac->charger); if (result) goto end; - result = acpi_install_notify_handler(ACPI_HANDLE(&pdev->dev), - ACPI_ALL_NOTIFY, acpi_ac_notify_handler, ac); - if (result) { - power_supply_unregister(&ac->charger); - goto end; - } printk(KERN_INFO PREFIX "%s [%s] (%s)\n", - acpi_device_name(adev), acpi_device_bid(adev), + acpi_device_name(device), acpi_device_bid(device), ac->state ? "on-line" : "off-line"); ac->battery_nb.notifier_call = acpi_ac_battery_notify; @@ -256,7 +274,7 @@ static int acpi_ac_resume(struct device *dev) if (!dev) return -EINVAL; - ac = platform_get_drvdata(to_platform_device(dev)); + ac = acpi_driver_data(to_acpi_device(dev)); if (!ac) return -EINVAL; @@ -270,19 +288,17 @@ static int acpi_ac_resume(struct device *dev) #else #define acpi_ac_resume NULL #endif -static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume); -static int acpi_ac_remove(struct platform_device *pdev) +static int acpi_ac_remove(struct acpi_device *device) { - struct acpi_ac *ac; + struct acpi_ac *ac = NULL; + - if (!pdev) + if (!device || !acpi_driver_data(device)) return -EINVAL; - acpi_remove_notify_handler(ACPI_HANDLE(&pdev->dev), - ACPI_ALL_NOTIFY, acpi_ac_notify_handler); + ac = acpi_driver_data(device); - ac = platform_get_drvdata(pdev); if (ac->charger.dev) power_supply_unregister(&ac->charger); unregister_acpi_notifier(&ac->battery_nb); @@ -292,23 +308,6 @@ static int acpi_ac_remove(struct platform_device *pdev) return 0; } -static const struct acpi_device_id acpi_ac_match[] = { - { "ACPI0003", 0 }, - { } -}; -MODULE_DEVICE_TABLE(acpi, acpi_ac_match); - -static struct platform_driver acpi_ac_driver = { - .probe = acpi_ac_probe, - .remove = acpi_ac_remove, - .driver = { - .name = "acpi-ac", - .owner = THIS_MODULE, - .pm = &acpi_ac_pm_ops, - .acpi_match_table = ACPI_PTR(acpi_ac_match), - }, -}; - static int __init acpi_ac_init(void) { int result; @@ -316,7 +315,7 @@ static int __init acpi_ac_init(void) if (acpi_disabled) return -ENODEV; - result = platform_driver_register(&acpi_ac_driver); + result = acpi_bus_register_driver(&acpi_ac_driver); if (result < 0) return -ENODEV; @@ -325,7 +324,7 @@ static int __init acpi_ac_init(void) static void __exit acpi_ac_exit(void) { - platform_driver_unregister(&acpi_ac_driver); + acpi_bus_unregister_driver(&acpi_ac_driver); } module_init(acpi_ac_init); module_exit(acpi_ac_exit); diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index dbfe49e5fd6..1d4950388fa 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c @@ -29,7 +29,6 @@ ACPI_MODULE_NAME("platform"); static const struct acpi_device_id acpi_platform_device_ids[] = { { "PNP0D40" }, - { "ACPI0003" }, { "VPC2004" }, { "BCM4752" }, diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index c29c2c3ec0a..52c81c49cc7 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) acpi_status status; int ret; + if (pr->apic_id == -1) + return -ENODEV; + status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT)) return -ENODEV; @@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device) } apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); - if (apic_id < 0) { + if (apic_id < 0) acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); - return -ENODEV; - } pr->apic_id = apic_id; cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); @@ -404,7 +405,6 @@ static int acpi_processor_add(struct acpi_device *device, goto err; pr->dev = dev; - dev->offline = pr->flags.need_hotplug_init; /* Trigger the processor driver's .probe() if present. */ if (device_attach(dev) >= 0) diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 49bbc71fad5..a08a448068d 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -141,9 +141,9 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE); * address. Although ACPICA adheres to the ACPI specification which * requires the use of the corresponding 64-bit address if it is non-zero, * some machines have been found to have a corrupted non-zero 64-bit - * address. Default is FALSE, do not favor the 32-bit addresses. + * address. Default is TRUE, favor the 32-bit addresses. */ -ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, FALSE); +ACPI_INIT_GLOBAL(u8, acpi_gbl_use32_bit_fadt_addresses, TRUE); /* * Optionally truncate I/O addresses to 16 bits. Provides compatibility diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index a4702eee91a..9fb85f38de9 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c @@ -461,6 +461,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) u32 table_count; struct acpi_table_header *table; acpi_physical_address address; + acpi_physical_address rsdt_address; u32 length; u8 *table_entry; acpi_status status; @@ -488,11 +489,14 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) * as per the ACPI specification. */ address = (acpi_physical_address) rsdp->xsdt_physical_address; + rsdt_address = + (acpi_physical_address) rsdp->rsdt_physical_address; table_entry_size = ACPI_XSDT_ENTRY_SIZE; } else { /* Root table is an RSDT (32-bit physical addresses) */ address = (acpi_physical_address) rsdp->rsdt_physical_address; + rsdt_address = address; table_entry_size = ACPI_RSDT_ENTRY_SIZE; } @@ -515,8 +519,7 @@ acpi_status __init acpi_tb_parse_root_table(acpi_physical_address rsdp_address) /* Fall back to the RSDT */ - address = - (acpi_physical_address) rsdp->rsdt_physical_address; + address = rsdt_address; table_entry_size = ACPI_RSDT_ENTRY_SIZE; } } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 9a2c63b2005..6e7b2a12860 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -36,6 +36,12 @@ #include <linux/suspend.h> #include <asm/unaligned.h> +#ifdef CONFIG_ACPI_PROCFS_POWER +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <asm/uaccess.h> +#endif + #include <linux/acpi.h> #include <linux/power_supply.h> @@ -64,6 +70,19 @@ static unsigned int cache_time = 1000; module_param(cache_time, uint, 0644); MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); +#ifdef CONFIG_ACPI_PROCFS_POWER +extern struct proc_dir_entry *acpi_lock_battery_dir(void); +extern void *acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir); + +enum acpi_battery_files { + info_tag = 0, + state_tag, + alarm_tag, + ACPI_BATTERY_NUMFILES, +}; + +#endif + static const struct acpi_device_id battery_device_ids[] = { {"PNP0C0A", 0}, {"", 0}, @@ -299,6 +318,14 @@ static enum power_supply_property energy_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, }; +#ifdef CONFIG_ACPI_PROCFS_POWER +inline char *acpi_battery_units(struct acpi_battery *battery) +{ + return (battery->power_unit == ACPI_BATTERY_POWER_UNIT_MA) ? + "mA" : "mW"; +} +#endif + /* -------------------------------------------------------------------------- Battery Management -------------------------------------------------------------------------- */ @@ -717,6 +744,279 @@ static void acpi_battery_refresh(struct acpi_battery *battery) } /* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +#ifdef CONFIG_ACPI_PROCFS_POWER +static struct proc_dir_entry *acpi_battery_dir; + +static int acpi_battery_print_info(struct seq_file *seq, int result) +{ + struct acpi_battery *battery = seq->private; + + if (result) + goto end; + + seq_printf(seq, "present: %s\n", + acpi_battery_present(battery) ? "yes" : "no"); + if (!acpi_battery_present(battery)) + goto end; + if (battery->design_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "design capacity: unknown\n"); + else + seq_printf(seq, "design capacity: %d %sh\n", + battery->design_capacity, + acpi_battery_units(battery)); + + if (battery->full_charge_capacity == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "last full capacity: unknown\n"); + else + seq_printf(seq, "last full capacity: %d %sh\n", + battery->full_charge_capacity, + acpi_battery_units(battery)); + + seq_printf(seq, "battery technology: %srechargeable\n", + (!battery->technology)?"non-":""); + + if (battery->design_voltage == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "design voltage: unknown\n"); + else + seq_printf(seq, "design voltage: %d mV\n", + battery->design_voltage); + seq_printf(seq, "design capacity warning: %d %sh\n", + battery->design_capacity_warning, + acpi_battery_units(battery)); + seq_printf(seq, "design capacity low: %d %sh\n", + battery->design_capacity_low, + acpi_battery_units(battery)); + seq_printf(seq, "cycle count: %i\n", battery->cycle_count); + seq_printf(seq, "capacity granularity 1: %d %sh\n", + battery->capacity_granularity_1, + acpi_battery_units(battery)); + seq_printf(seq, "capacity granularity 2: %d %sh\n", + battery->capacity_granularity_2, + acpi_battery_units(battery)); + seq_printf(seq, "model number: %s\n", battery->model_number); + seq_printf(seq, "serial number: %s\n", battery->serial_number); + seq_printf(seq, "battery type: %s\n", battery->type); + seq_printf(seq, "OEM info: %s\n", battery->oem_info); + end: + if (result) + seq_printf(seq, "ERROR: Unable to read battery info\n"); + return result; +} + +static int acpi_battery_print_state(struct seq_file *seq, int result) +{ + struct acpi_battery *battery = seq->private; + + if (result) + goto end; + + seq_printf(seq, "present: %s\n", + acpi_battery_present(battery) ? "yes" : "no"); + if (!acpi_battery_present(battery)) + goto end; + + seq_printf(seq, "capacity state: %s\n", + (battery->state & 0x04) ? "critical" : "ok"); + if ((battery->state & 0x01) && (battery->state & 0x02)) + seq_printf(seq, + "charging state: charging/discharging\n"); + else if (battery->state & 0x01) + seq_printf(seq, "charging state: discharging\n"); + else if (battery->state & 0x02) + seq_printf(seq, "charging state: charging\n"); + else + seq_printf(seq, "charging state: charged\n"); + + if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "present rate: unknown\n"); + else + seq_printf(seq, "present rate: %d %s\n", + battery->rate_now, acpi_battery_units(battery)); + + if (battery->capacity_now == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "remaining capacity: unknown\n"); + else + seq_printf(seq, "remaining capacity: %d %sh\n", + battery->capacity_now, acpi_battery_units(battery)); + if (battery->voltage_now == ACPI_BATTERY_VALUE_UNKNOWN) + seq_printf(seq, "present voltage: unknown\n"); + else + seq_printf(seq, "present voltage: %d mV\n", + battery->voltage_now); + end: + if (result) + seq_printf(seq, "ERROR: Unable to read battery state\n"); + + return result; +} + +static int acpi_battery_print_alarm(struct seq_file *seq, int result) +{ + struct acpi_battery *battery = seq->private; + + if (result) + goto end; + + if (!acpi_battery_present(battery)) { + seq_printf(seq, "present: no\n"); + goto end; + } + seq_printf(seq, "alarm: "); + if (!battery->alarm) + seq_printf(seq, "unsupported\n"); + else + seq_printf(seq, "%u %sh\n", battery->alarm, + acpi_battery_units(battery)); + end: + if (result) + seq_printf(seq, "ERROR: Unable to read battery alarm\n"); + return result; +} + +static ssize_t acpi_battery_write_alarm(struct file *file, + const char __user * buffer, + size_t count, loff_t * ppos) +{ + int result = 0; + char alarm_string[12] = { '\0' }; + struct seq_file *m = file->private_data; + struct acpi_battery *battery = m->private; + + if (!battery || (count > sizeof(alarm_string) - 1)) + return -EINVAL; + if (!acpi_battery_present(battery)) { + result = -ENODEV; + goto end; + } + if (copy_from_user(alarm_string, buffer, count)) { + result = -EFAULT; + goto end; + } + alarm_string[count] = '\0'; + battery->alarm = simple_strtol(alarm_string, NULL, 0); + result = acpi_battery_set_alarm(battery); + end: + if (!result) + return count; + return result; +} + +typedef int(*print_func)(struct seq_file *seq, int result); + +static print_func acpi_print_funcs[ACPI_BATTERY_NUMFILES] = { + acpi_battery_print_info, + acpi_battery_print_state, + acpi_battery_print_alarm, +}; + +static int acpi_battery_read(int fid, struct seq_file *seq) +{ + struct acpi_battery *battery = seq->private; + int result = acpi_battery_update(battery); + return acpi_print_funcs[fid](seq, result); +} + +#define DECLARE_FILE_FUNCTIONS(_name) \ +static int acpi_battery_read_##_name(struct seq_file *seq, void *offset) \ +{ \ + return acpi_battery_read(_name##_tag, seq); \ +} \ +static int acpi_battery_##_name##_open_fs(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, acpi_battery_read_##_name, PDE_DATA(inode)); \ +} + +DECLARE_FILE_FUNCTIONS(info); +DECLARE_FILE_FUNCTIONS(state); +DECLARE_FILE_FUNCTIONS(alarm); + +#undef DECLARE_FILE_FUNCTIONS + +#define FILE_DESCRIPTION_RO(_name) \ + { \ + .name = __stringify(_name), \ + .mode = S_IRUGO, \ + .ops = { \ + .open = acpi_battery_##_name##_open_fs, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + .owner = THIS_MODULE, \ + }, \ + } + +#define FILE_DESCRIPTION_RW(_name) \ + { \ + .name = __stringify(_name), \ + .mode = S_IFREG | S_IRUGO | S_IWUSR, \ + .ops = { \ + .open = acpi_battery_##_name##_open_fs, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .write = acpi_battery_write_##_name, \ + .release = single_release, \ + .owner = THIS_MODULE, \ + }, \ + } + +static const struct battery_file { + struct file_operations ops; + umode_t mode; + const char *name; +} acpi_battery_file[] = { + FILE_DESCRIPTION_RO(info), + FILE_DESCRIPTION_RO(state), + FILE_DESCRIPTION_RW(alarm), +}; + +#undef FILE_DESCRIPTION_RO +#undef FILE_DESCRIPTION_RW + +static int acpi_battery_add_fs(struct acpi_device *device) +{ + struct proc_dir_entry *entry = NULL; + int i; + + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded," + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); + if (!acpi_device_dir(device)) { + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), + acpi_battery_dir); + if (!acpi_device_dir(device)) + return -ENODEV; + } + + for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { + entry = proc_create_data(acpi_battery_file[i].name, + acpi_battery_file[i].mode, + acpi_device_dir(device), + &acpi_battery_file[i].ops, + acpi_driver_data(device)); + if (!entry) + return -ENODEV; + } + return 0; +} + +static void acpi_battery_remove_fs(struct acpi_device *device) +{ + int i; + if (!acpi_device_dir(device)) + return; + for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) + remove_proc_entry(acpi_battery_file[i].name, + acpi_device_dir(device)); + + remove_proc_entry(acpi_device_bid(device), acpi_battery_dir); + acpi_device_dir(device) = NULL; +} + +#endif + +/* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ @@ -790,6 +1090,15 @@ static int acpi_battery_add(struct acpi_device *device) result = acpi_battery_update(battery); if (result) goto fail; +#ifdef CONFIG_ACPI_PROCFS_POWER + result = acpi_battery_add_fs(device); +#endif + if (result) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_battery_remove_fs(device); +#endif + goto fail; + } printk(KERN_INFO PREFIX "%s Slot [%s] (battery %s)\n", ACPI_BATTERY_DEVICE_NAME, acpi_device_bid(device), @@ -816,6 +1125,9 @@ static int acpi_battery_remove(struct acpi_device *device) return -EINVAL; battery = acpi_driver_data(device); unregister_pm_notifier(&battery->pm_nb); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_battery_remove_fs(device); +#endif sysfs_remove_battery(battery); mutex_destroy(&battery->lock); mutex_destroy(&battery->sysfs_lock); @@ -866,7 +1178,19 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie) if (dmi_check_system(bat_dmi_table)) battery_bix_broken_package = 1; - acpi_bus_register_driver(&acpi_battery_driver); + +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_battery_dir = acpi_lock_battery_dir(); + if (!acpi_battery_dir) + return; +#endif + if (acpi_bus_register_driver(&acpi_battery_driver) < 0) { +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_battery_dir(acpi_battery_dir); +#endif + return; + } + return; } static int __init acpi_battery_init(void) @@ -878,6 +1202,9 @@ static int __init acpi_battery_init(void) static void __exit acpi_battery_exit(void) { acpi_bus_unregister_driver(&acpi_battery_driver); +#ifdef CONFIG_ACPI_PROCFS_POWER + acpi_unlock_battery_dir(acpi_battery_dir); +#endif } module_init(acpi_battery_init); diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index afec4526c48..3d8413d02a9 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c @@ -314,6 +314,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"), }, }, + { + .callback = dmi_disable_osi_win8, + .ident = "Dell Inspiron 7737", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7737"), + }, + }, /* * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. @@ -374,6 +382,19 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"), }, }, + /* + * Without this this EEEpc exports a non working WMI interface, with + * this it exports a working "good old" eeepc_laptop interface, fixing + * both brightness control, and rfkill not working. + */ + { + .callback = dmi_enable_osi_linux, + .ident = "Asus EEE PC 1015PX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"), + }, + }, {} }; diff --git a/drivers/acpi/cm_sbs.c b/drivers/acpi/cm_sbs.c new file mode 100644 index 00000000000..6c9ee68e46f --- /dev/null +++ b/drivers/acpi/cm_sbs.c @@ -0,0 +1,105 @@ +/* + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/acpi.h> +#include <linux/types.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <acpi/acpi_bus.h> +#include <acpi/acpi_drivers.h> + +#define PREFIX "ACPI: " + +ACPI_MODULE_NAME("cm_sbs"); +#define ACPI_AC_CLASS "ac_adapter" +#define ACPI_BATTERY_CLASS "battery" +#define _COMPONENT ACPI_SBS_COMPONENT +static struct proc_dir_entry *acpi_ac_dir; +static struct proc_dir_entry *acpi_battery_dir; + +static DEFINE_MUTEX(cm_sbs_mutex); + +static int lock_ac_dir_cnt; +static int lock_battery_dir_cnt; + +struct proc_dir_entry *acpi_lock_ac_dir(void) +{ + mutex_lock(&cm_sbs_mutex); + if (!acpi_ac_dir) + acpi_ac_dir = proc_mkdir(ACPI_AC_CLASS, acpi_root_dir); + if (acpi_ac_dir) { + lock_ac_dir_cnt++; + } else { + printk(KERN_ERR PREFIX + "Cannot create %s\n", ACPI_AC_CLASS); + } + mutex_unlock(&cm_sbs_mutex); + return acpi_ac_dir; +} +EXPORT_SYMBOL(acpi_lock_ac_dir); + +void acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir_param) +{ + mutex_lock(&cm_sbs_mutex); + if (acpi_ac_dir_param) + lock_ac_dir_cnt--; + if (lock_ac_dir_cnt == 0 && acpi_ac_dir_param && acpi_ac_dir) { + remove_proc_entry(ACPI_AC_CLASS, acpi_root_dir); + acpi_ac_dir = NULL; + } + mutex_unlock(&cm_sbs_mutex); +} +EXPORT_SYMBOL(acpi_unlock_ac_dir); + +struct proc_dir_entry *acpi_lock_battery_dir(void) +{ + mutex_lock(&cm_sbs_mutex); + if (!acpi_battery_dir) { + acpi_battery_dir = + proc_mkdir(ACPI_BATTERY_CLASS, acpi_root_dir); + } + if (acpi_battery_dir) { + lock_battery_dir_cnt++; + } else { + printk(KERN_ERR PREFIX + "Cannot create %s\n", ACPI_BATTERY_CLASS); + } + mutex_unlock(&cm_sbs_mutex); + return acpi_battery_dir; +} +EXPORT_SYMBOL(acpi_lock_battery_dir); + +void acpi_unlock_battery_dir(struct proc_dir_entry *acpi_battery_dir_param) +{ + mutex_lock(&cm_sbs_mutex); + if (acpi_battery_dir_param) + lock_battery_dir_cnt--; + if (lock_battery_dir_cnt == 0 && acpi_battery_dir_param + && acpi_battery_dir) { + remove_proc_entry(ACPI_BATTERY_CLASS, acpi_root_dir); + acpi_battery_dir = NULL; + } + mutex_unlock(&cm_sbs_mutex); + return; +} +EXPORT_SYMBOL(acpi_unlock_battery_dir); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d7d32c28829..ad11ba4a412 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -206,13 +206,13 @@ unlock: spin_unlock_irqrestore(&ec->lock, flags); } -static int acpi_ec_sync_query(struct acpi_ec *ec); +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) { if (state & ACPI_EC_FLAG_SCI) { if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) - return acpi_ec_sync_query(ec); + return acpi_ec_sync_query(ec, NULL); } return 0; } @@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void) EXPORT_SYMBOL(ec_get_handle); -static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); - /* - * Clears stale _Q events that might have accumulated in the EC. + * Process _Q events that might have accumulated in the EC. * Run with locked ec mutex. */ static void acpi_ec_clear(struct acpi_ec *ec) @@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec) u8 value = 0; for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { - status = acpi_ec_query_unlocked(ec, &value); + status = acpi_ec_sync_query(ec, &value); if (status || !value) break; } @@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt) kfree(handler); } -static int acpi_ec_sync_query(struct acpi_ec *ec) +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) { u8 value = 0; int status; struct acpi_ec_query_handler *handler, *copy; - if ((status = acpi_ec_query_unlocked(ec, &value))) + + status = acpi_ec_query_unlocked(ec, &value); + if (data) + *data = value; + if (status) return status; + list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { /* have custom handler for this bit */ @@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt) if (!ec) return; mutex_lock(&ec->mutex); - acpi_ec_sync_query(ec); + acpi_ec_sync_query(ec, NULL); mutex_unlock(&ec->mutex); } diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index c1e31a41f94..25bbc55dca8 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -1278,8 +1278,8 @@ static int __init acpi_thermal_init(void) static void __exit acpi_thermal_exit(void) { - destroy_workqueue(acpi_thermal_pm_queue); acpi_bus_unregister_driver(&acpi_thermal_driver); + destroy_workqueue(acpi_thermal_pm_queue); return; } diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 8b6990e417e..f8bc5a755dd 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -457,10 +457,10 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, { .callback = video_set_use_native_backlight, - .ident = "ThinkPad T430s", + .ident = "ThinkPad T430 and T430s", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430"), }, }, { @@ -472,7 +472,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, }, { - .callback = video_set_use_native_backlight, + .callback = video_set_use_native_backlight, .ident = "ThinkPad X1 Carbon", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), @@ -500,7 +500,7 @@ static struct dmi_system_id video_dmi_table[] __initdata = { .ident = "Dell Inspiron 7520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), }, }, { @@ -513,6 +513,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { }, { .callback = video_set_use_native_backlight, + .ident = "Acer Aspire 5742G", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5742G"), + }, + }, + { + .callback = video_set_use_native_backlight, .ident = "Acer Aspire V5-431", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index c2706047337..0033fafc470 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -815,7 +815,7 @@ config PATA_AT32 config PATA_AT91 tristate "PATA support for AT91SAM9260" - depends on ARM && ARCH_AT91 + depends on ARM && SOC_AT91SAM9 help This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 71e15b73513..60707814a84 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -1115,6 +1115,17 @@ static bool ahci_broken_online(struct pci_dev *pdev) return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff); } +static bool ahci_broken_devslp(struct pci_dev *pdev) +{ + /* device with broken DEVSLP but still showing SDS capability */ + static const struct pci_device_id ids[] = { + { PCI_VDEVICE(INTEL, 0x0f23)}, /* Valleyview SoC */ + {} + }; + + return pci_match_id(ids, pdev); +} + #ifdef CONFIG_ATA_ACPI static void ahci_gtf_filter_workaround(struct ata_host *host) { @@ -1364,6 +1375,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar]; + /* must set flag prior to save config in order to take effect */ + if (ahci_broken_devslp(pdev)) + hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; + /* save initial config */ ahci_pci_save_initial_config(pdev, hpriv); diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index b5eb886da22..af63c75c200 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -236,6 +236,7 @@ enum { port start (wait until error-handling stage) */ AHCI_HFLAG_MULTI_MSI = (1 << 16), /* multiple PCI MSIs */ + AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ /* ap->flags bits */ diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index 497c7abe1c7..8befeb69eeb 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -29,9 +29,25 @@ #include "ahci.h" enum { - PORT_PHY_CTL = 0x178, /* Port0 PHY Control */ - PORT_PHY_CTL_PDDQ_LOC = 0x100000, /* PORT_PHY_CTL bits */ - HOST_TIMER1MS = 0xe0, /* Timer 1-ms */ + /* Timer 1-ms Register */ + IMX_TIMER1MS = 0x00e0, + /* Port0 PHY Control Register */ + IMX_P0PHYCR = 0x0178, + IMX_P0PHYCR_TEST_PDDQ = 1 << 20, + IMX_P0PHYCR_CR_READ = 1 << 19, + IMX_P0PHYCR_CR_WRITE = 1 << 18, + IMX_P0PHYCR_CR_CAP_DATA = 1 << 17, + IMX_P0PHYCR_CR_CAP_ADDR = 1 << 16, + /* Port0 PHY Status Register */ + IMX_P0PHYSR = 0x017c, + IMX_P0PHYSR_CR_ACK = 1 << 18, + IMX_P0PHYSR_CR_DATA_OUT = 0xffff << 0, + /* Lane0 Output Status Register */ + IMX_LANE0_OUT_STAT = 0x2003, + IMX_LANE0_OUT_STAT_RX_PLL_STATE = 1 << 1, + /* Clock Reset Register */ + IMX_CLOCK_RESET = 0x7f3f, + IMX_CLOCK_RESET_RESET = 1 << 0, }; enum ahci_imx_type { @@ -54,9 +70,149 @@ MODULE_PARM_DESC(hotplug, "AHCI IMX hot-plug support (0=Don't support, 1=support static void ahci_imx_host_stop(struct ata_host *host); +static int imx_phy_crbit_assert(void __iomem *mmio, u32 bit, bool assert) +{ + int timeout = 10; + u32 crval; + u32 srval; + + /* Assert or deassert the bit */ + crval = readl(mmio + IMX_P0PHYCR); + if (assert) + crval |= bit; + else + crval &= ~bit; + writel(crval, mmio + IMX_P0PHYCR); + + /* Wait for the cr_ack signal */ + do { + srval = readl(mmio + IMX_P0PHYSR); + if ((assert ? srval : ~srval) & IMX_P0PHYSR_CR_ACK) + break; + usleep_range(100, 200); + } while (--timeout); + + return timeout ? 0 : -ETIMEDOUT; +} + +static int imx_phy_reg_addressing(u16 addr, void __iomem *mmio) +{ + u32 crval = addr; + int ret; + + /* Supply the address on cr_data_in */ + writel(crval, mmio + IMX_P0PHYCR); + + /* Assert the cr_cap_addr signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, true); + if (ret) + return ret; + + /* Deassert cr_cap_addr */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_ADDR, false); + if (ret) + return ret; + + return 0; +} + +static int imx_phy_reg_write(u16 val, void __iomem *mmio) +{ + u32 crval = val; + int ret; + + /* Supply the data on cr_data_in */ + writel(crval, mmio + IMX_P0PHYCR); + + /* Assert the cr_cap_data signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, true); + if (ret) + return ret; + + /* Deassert cr_cap_data */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_CAP_DATA, false); + if (ret) + return ret; + + if (val & IMX_CLOCK_RESET_RESET) { + /* + * In case we're resetting the phy, it's unable to acknowledge, + * so we return immediately here. + */ + crval |= IMX_P0PHYCR_CR_WRITE; + writel(crval, mmio + IMX_P0PHYCR); + goto out; + } + + /* Assert the cr_write signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, true); + if (ret) + return ret; + + /* Deassert cr_write */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_WRITE, false); + if (ret) + return ret; + +out: + return 0; +} + +static int imx_phy_reg_read(u16 *val, void __iomem *mmio) +{ + int ret; + + /* Assert the cr_read signal */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, true); + if (ret) + return ret; + + /* Capture the data from cr_data_out[] */ + *val = readl(mmio + IMX_P0PHYSR) & IMX_P0PHYSR_CR_DATA_OUT; + + /* Deassert cr_read */ + ret = imx_phy_crbit_assert(mmio, IMX_P0PHYCR_CR_READ, false); + if (ret) + return ret; + + return 0; +} + +static int imx_sata_phy_reset(struct ahci_host_priv *hpriv) +{ + void __iomem *mmio = hpriv->mmio; + int timeout = 10; + u16 val; + int ret; + + /* Reset SATA PHY by setting RESET bit of PHY register CLOCK_RESET */ + ret = imx_phy_reg_addressing(IMX_CLOCK_RESET, mmio); + if (ret) + return ret; + ret = imx_phy_reg_write(IMX_CLOCK_RESET_RESET, mmio); + if (ret) + return ret; + + /* Wait for PHY RX_PLL to be stable */ + do { + usleep_range(100, 200); + ret = imx_phy_reg_addressing(IMX_LANE0_OUT_STAT, mmio); + if (ret) + return ret; + ret = imx_phy_reg_read(&val, mmio); + if (ret) + return ret; + if (val & IMX_LANE0_OUT_STAT_RX_PLL_STATE) + break; + } while (--timeout); + + return timeout ? 0 : -ETIMEDOUT; +} + static int imx_sata_enable(struct ahci_host_priv *hpriv) { struct imx_ahci_priv *imxpriv = hpriv->plat_data; + struct device *dev = &imxpriv->ahci_pdev->dev; int ret; if (imxpriv->no_device) @@ -101,6 +257,14 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv) regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, IMX6Q_GPR13_SATA_MPLL_CLK_EN, IMX6Q_GPR13_SATA_MPLL_CLK_EN); + + usleep_range(100, 200); + + ret = imx_sata_phy_reset(hpriv); + if (ret) { + dev_err(dev, "failed to reset phy: %d\n", ret); + goto disable_regulator; + } } usleep_range(1000, 2000); @@ -156,8 +320,8 @@ static void ahci_imx_error_handler(struct ata_port *ap) * without full reset once the pddq mode is enabled making it * impossible to use as part of libata LPM. */ - reg_val = readl(mmio + PORT_PHY_CTL); - writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL); + reg_val = readl(mmio + IMX_P0PHYCR); + writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR); imx_sata_disable(hpriv); imxpriv->no_device = true; } @@ -217,6 +381,7 @@ static int imx_ahci_probe(struct platform_device *pdev) if (!imxpriv) return -ENOMEM; + imxpriv->ahci_pdev = pdev; imxpriv->no_device = false; imxpriv->first_time = true; imxpriv->type = (enum ahci_imx_type)of_id->data; @@ -248,7 +413,7 @@ static int imx_ahci_probe(struct platform_device *pdev) /* * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, - * and IP vendor specific register HOST_TIMER1MS. + * and IP vendor specific register IMX_TIMER1MS. * Configure CAP_SSS (support stagered spin up). * Implement the port0. * Get the ahb clock rate, and configure the TIMER1MS register. @@ -265,7 +430,7 @@ static int imx_ahci_probe(struct platform_device *pdev) } reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000; - writel(reg_val, hpriv->mmio + HOST_TIMER1MS); + writel(reg_val, hpriv->mmio + IMX_TIMER1MS); ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 0, 0); if (ret) diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 6bd4f660b4e..b9861453fc8 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -452,6 +452,13 @@ void ahci_save_initial_config(struct device *dev, cap &= ~HOST_CAP_SNTF; } + if ((cap2 & HOST_CAP2_SDS) && (hpriv->flags & AHCI_HFLAG_NO_DEVSLP)) { + dev_info(dev, + "controller can't do DEVSLP, turning off\n"); + cap2 &= ~HOST_CAP2_SDS; + cap2 &= ~HOST_CAP2_SADM; + } + if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) { dev_info(dev, "controller can do FBS, turning on CAP_FBS\n"); cap |= HOST_CAP_FBS; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 943cc8b83e5..ea83828bfea 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -6314,6 +6314,8 @@ int ata_host_activate(struct ata_host *host, int irq, static void ata_port_detach(struct ata_port *ap) { unsigned long flags; + struct ata_link *link; + struct ata_device *dev; if (!ap->ops->error_handler) goto skip_eh; @@ -6333,6 +6335,13 @@ static void ata_port_detach(struct ata_port *ap) cancel_delayed_work_sync(&ap->hotplug_task); skip_eh: + /* clean up zpodd on port removal */ + ata_for_each_link(link, ap, HOST_FIRST) { + ata_for_each_dev(dev, link, ALL) { + if (zpodd_dev_enabled(dev)) + zpodd_exit(dev); + } + } if (ap->pmp_link) { int i; for (i = 0; i < SATA_PMP_MAX_PORTS; i++) diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 8986b9f2278..62ec61e8f84 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex); static LIST_HEAD(deferred_probe_pending_list); static LIST_HEAD(deferred_probe_active_list); static struct workqueue_struct *deferred_wq; +static atomic_t deferred_trigger_count = ATOMIC_INIT(0); /** * deferred_probe_work_func() - Retry probing devices in the active list. @@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false; * This functions moves all devices from the pending list to the active * list and schedules the deferred probe workqueue to process them. It * should be called anytime a driver is successfully bound to a device. + * + * Note, there is a race condition in multi-threaded probe. In the case where + * more than one device is probing at the same time, it is possible for one + * probe to complete successfully while another is about to defer. If the second + * depends on the first, then it will get put on the pending list after the + * trigger event has already occured and will be stuck there. + * + * The atomic 'deferred_trigger_count' is used to determine if a successful + * trigger has occurred in the midst of probing a driver. If the trigger count + * changes in the midst of a probe, then deferred processing should be triggered + * again. */ static void driver_deferred_probe_trigger(void) { @@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void) * into the active list so they can be retried by the workqueue */ mutex_lock(&deferred_probe_mutex); + atomic_inc(&deferred_trigger_count); list_splice_tail_init(&deferred_probe_pending_list, &deferred_probe_active_list); mutex_unlock(&deferred_probe_mutex); @@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue); static int really_probe(struct device *dev, struct device_driver *drv) { int ret = 0; + int local_trigger_count = atomic_read(&deferred_trigger_count); atomic_inc(&probe_count); pr_debug("bus: '%s': %s: probing driver %s with device %s\n", @@ -310,6 +324,9 @@ probe_failed: /* Driver requested deferred probing */ dev_info(dev, "Driver %s requests probe deferral\n", drv->name); driver_deferred_probe_add(dev); + /* Did a trigger occur while probing? Need to re-trigger if yes */ + if (local_trigger_count != atomic_read(&deferred_trigger_count)) + driver_deferred_probe_trigger(); } else if (ret != -ENODEV && ret != -ENXIO) { /* driver matched but the probe failed */ printk(KERN_WARNING diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index bc256b64102..7d6e84a5142 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -10,13 +10,13 @@ struct dma_coherent_mem { void *virt_base; dma_addr_t device_base; - phys_addr_t pfn_base; + unsigned long pfn_base; int size; int flags; unsigned long *bitmap; }; -int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { void __iomem *mem_base = NULL; @@ -32,7 +32,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ - mem_base = ioremap(bus_addr, size); + mem_base = ioremap(phys_addr, size); if (!mem_base) goto out; @@ -45,7 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dev->dma_mem->virt_base = mem_base; dev->dma_mem->device_base = device_addr; - dev->dma_mem->pfn_base = PFN_DOWN(bus_addr); + dev->dma_mem->pfn_base = PFN_DOWN(phys_addr); dev->dma_mem->size = pages; dev->dma_mem->flags = flags; @@ -208,7 +208,7 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, *ret = -ENXIO; if (off < count && user_count <= count - off) { - unsigned pfn = mem->pfn_base + start + off; + unsigned long pfn = mem->pfn_base + start + off; *ret = remap_pfn_range(vma, vma->vm_start, pfn, user_count << PAGE_SHIFT, vma->vm_page_prot); diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 0ce39a33b3c..6cd08e145bf 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -175,7 +175,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res) /** * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() * @dev: Device to declare coherent memory for - * @bus_addr: Bus address of coherent memory to be declared + * @phys_addr: Physical address of coherent memory to be declared * @device_addr: Device address of coherent memory to be declared * @size: Size of coherent memory to be declared * @flags: Flags @@ -185,7 +185,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res) * RETURNS: * 0 on success, -errno on failure. */ -int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, +int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags) { void *res; @@ -195,7 +195,7 @@ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, if (!res) return -ENOMEM; - rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size, + rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, flags); if (rc == 0) devres_add(dev, res); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index e714709704e..5b47210889e 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -13,6 +13,7 @@ #include <linux/string.h> #include <linux/platform_device.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/module.h> #include <linux/init.h> #include <linux/dma-mapping.h> @@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) return -ENXIO; return dev->archdata.irqs[num]; #else - struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); + struct resource *r; + if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) + return of_irq_get(dev->dev.of_node, num); + + r = platform_get_resource(dev, IORESOURCE_IRQ, num); return r ? r->start : -ENXIO; #endif diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 748dea4f34d..758da2287d9 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1406,7 +1406,7 @@ next_segment: track = block / (floppy->dtype->sects * floppy->type->sect_mult); sector = block % (floppy->dtype->sects * floppy->type->sect_mult); - data = rq->buffer + 512 * cnt; + data = bio_data(rq->bio) + 512 * cnt; #ifdef DEBUG printk("access to track %d, sector %d, with buffer at " "0x%08lx\n", track, sector, data); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 96b629e1f0c..2104b1b4ccd 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1484,7 +1484,7 @@ repeat: ReqCnt = 0; ReqCmd = rq_data_dir(fd_request); ReqBlock = blk_rq_pos(fd_request); - ReqBuffer = fd_request->buffer; + ReqBuffer = bio_data(fd_request->bio); setup_req_params( drive ); do_fd_action( drive ); @@ -1952,7 +1952,7 @@ static int __init atari_floppy_init (void) goto Enomem; } TrackBuffer = DMABuffer + 512; - PhysDMABuffer = virt_to_phys(DMABuffer); + PhysDMABuffer = atari_stram_to_phys(DMABuffer); PhysTrackBuffer = virt_to_phys(TrackBuffer); BufferDrive = BufferSide = BufferTrack = -1; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 73894ca3395..4595c22f33f 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -4080,7 +4080,7 @@ static void cciss_interrupt_mode(ctlr_info_t *h) goto default_int_mode; if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { - err = pci_enable_msix(h->pdev, cciss_msix_entries, 4); + err = pci_enable_msix_exact(h->pdev, cciss_msix_entries, 4); if (!err) { h->intr[0] = cciss_msix_entries[0].vector; h->intr[1] = cciss_msix_entries[1].vector; @@ -4088,10 +4088,6 @@ static void cciss_interrupt_mode(ctlr_info_t *h) h->intr[3] = cciss_msix_entries[3].vector; h->msix_vector = 1; return; - } - if (err > 0) { - dev_warn(&h->pdev->dev, - "only %d MSI-X vectors available\n", err); } else { dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index 90ae4ba8f9e..05a1780ffa8 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -29,7 +29,6 @@ #include <linux/drbd_limits.h> #include <linux/dynamic_debug.h> #include "drbd_int.h" -#include "drbd_wrappers.h" enum al_transaction_types { @@ -204,7 +203,7 @@ int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bd BUG_ON(!bdev->md_bdev); - drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", + dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", current->comm, current->pid, __func__, (unsigned long long)sector, (rw & WRITE) ? "WRITE" : "READ", (void*)_RET_IP_ ); @@ -276,7 +275,6 @@ bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval return _al_get(device, first, true); } -static bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i) { /* for bios crossing activity log extent boundaries, @@ -846,7 +844,7 @@ void __drbd_set_in_sync(struct drbd_device *device, sector_t sector, int size, int wake_up = 0; unsigned long flags; - if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { + if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { drbd_err(device, "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", (unsigned long long)sector, size); return; @@ -920,7 +918,7 @@ int __drbd_set_out_of_sync(struct drbd_device *device, sector_t sector, int size if (size == 0) return 0; - if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { + if (size < 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { drbd_err(device, "sector: %llus, size: %d\n", (unsigned long long)sector, size); return 0; @@ -1023,8 +1021,7 @@ int drbd_rs_begin_io(struct drbd_device *device, sector_t sector) unsigned int enr = BM_SECT_TO_EXT(sector); struct bm_extent *bm_ext; int i, sig; - int sa = 200; /* Step aside 200 times, then grab the extent and let app-IO wait. - 200 times -> 20 seconds. */ + bool sa; retry: sig = wait_event_interruptible(device->al_wait, @@ -1035,12 +1032,15 @@ retry: if (test_bit(BME_LOCKED, &bm_ext->flags)) return 0; + /* step aside only while we are above c-min-rate; unless disabled. */ + sa = drbd_rs_c_min_rate_throttle(device); + for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { sig = wait_event_interruptible(device->al_wait, !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) || - test_bit(BME_PRIORITY, &bm_ext->flags)); + (sa && test_bit(BME_PRIORITY, &bm_ext->flags))); - if (sig || (test_bit(BME_PRIORITY, &bm_ext->flags) && sa)) { + if (sig || (sa && test_bit(BME_PRIORITY, &bm_ext->flags))) { spin_lock_irq(&device->al_lock); if (lc_put(device->resync, &bm_ext->lce) == 0) { bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */ @@ -1052,9 +1052,6 @@ retry: return -EINTR; if (schedule_timeout_interruptible(HZ/10)) return -EINTR; - if (sa && --sa == 0) - drbd_warn(device, "drbd_rs_begin_io() stepped aside for 20sec." - "Resync stalled?\n"); goto retry; } } @@ -1288,7 +1285,7 @@ void drbd_rs_failed_io(struct drbd_device *device, sector_t sector, int size) sector_t esector, nr_sectors; int wake_up = 0; - if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { + if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_DISCARD_SIZE) { drbd_err(device, "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", (unsigned long long)sector, size); return; diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index e7093d4291f..a76ceb344d6 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -382,6 +382,12 @@ enum { __EE_CALL_AL_COMPLETE_IO, __EE_MAY_SET_IN_SYNC, + /* is this a TRIM aka REQ_DISCARD? */ + __EE_IS_TRIM, + /* our lower level cannot handle trim, + * and we want to fall back to zeroout instead */ + __EE_IS_TRIM_USE_ZEROOUT, + /* In case a barrier failed, * we need to resubmit without the barrier flag. */ __EE_RESUBMITTED, @@ -405,7 +411,9 @@ enum { }; #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) -#define EE_RESUBMITTED (1<<__EE_RESUBMITTED) +#define EE_IS_TRIM (1<<__EE_IS_TRIM) +#define EE_IS_TRIM_USE_ZEROOUT (1<<__EE_IS_TRIM_USE_ZEROOUT) +#define EE_RESUBMITTED (1<<__EE_RESUBMITTED) #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) #define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS) @@ -579,6 +587,7 @@ struct drbd_resource { struct list_head resources; struct res_opts res_opts; struct mutex conf_update; /* mutex for ready-copy-update of net_conf and disk_conf */ + struct mutex adm_mutex; /* mutex to serialize administrative requests */ spinlock_t req_lock; unsigned susp:1; /* IO suspended by user */ @@ -609,6 +618,7 @@ struct drbd_connection { struct drbd_socket data; /* data/barrier/cstate/parameter packets */ struct drbd_socket meta; /* ping/ack (metadata) packets */ int agreed_pro_version; /* actually used protocol version */ + u32 agreed_features; unsigned long last_received; /* in jiffies, either socket */ unsigned int ko_count; @@ -814,6 +824,28 @@ struct drbd_device { struct submit_worker submit; }; +struct drbd_config_context { + /* assigned from drbd_genlmsghdr */ + unsigned int minor; + /* assigned from request attributes, if present */ + unsigned int volume; +#define VOLUME_UNSPECIFIED (-1U) + /* pointer into the request skb, + * limited lifetime! */ + char *resource_name; + struct nlattr *my_addr; + struct nlattr *peer_addr; + + /* reply buffer */ + struct sk_buff *reply_skb; + /* pointer into reply buffer */ + struct drbd_genlmsghdr *reply_dh; + /* resolved from attributes, if possible */ + struct drbd_device *device; + struct drbd_resource *resource; + struct drbd_connection *connection; +}; + static inline struct drbd_device *minor_to_device(unsigned int minor) { return (struct drbd_device *)idr_find(&drbd_devices, minor); @@ -821,7 +853,7 @@ static inline struct drbd_device *minor_to_device(unsigned int minor) static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device) { - return list_first_entry(&device->peer_devices, struct drbd_peer_device, peer_devices); + return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices); } #define for_each_resource(resource, _resources) \ @@ -1139,6 +1171,12 @@ struct bm_extent { #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */ #define DRBD_MAX_BIO_SIZE_P95 (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */ +/* For now, don't allow more than one activity log extent worth of data + * to be discarded in one go. We may need to rework drbd_al_begin_io() + * to allow for even larger discard ranges */ +#define DRBD_MAX_DISCARD_SIZE AL_EXTENT_SIZE +#define DRBD_MAX_DISCARD_SECTORS (DRBD_MAX_DISCARD_SIZE >> 9) + extern int drbd_bm_init(struct drbd_device *device); extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits); extern void drbd_bm_cleanup(struct drbd_device *device); @@ -1229,9 +1267,9 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask); extern rwlock_t global_state_lock; extern int conn_lowest_minor(struct drbd_connection *connection); -enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr); +extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor); extern void drbd_destroy_device(struct kref *kref); -extern void drbd_delete_device(struct drbd_device *mdev); +extern void drbd_delete_device(struct drbd_device *device); extern struct drbd_resource *drbd_create_resource(const char *name); extern void drbd_free_resource(struct drbd_resource *resource); @@ -1257,7 +1295,7 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t); /* drbd_nl.c */ -extern int drbd_msg_put_info(const char *info); +extern int drbd_msg_put_info(struct sk_buff *skb, const char *info); extern void drbd_suspend_io(struct drbd_device *device); extern void drbd_resume_io(struct drbd_device *device); extern char *ppsize(char *buf, unsigned long long size); @@ -1283,6 +1321,10 @@ extern void conn_try_outdate_peer_async(struct drbd_connection *connection); extern int drbd_khelper(struct drbd_device *device, char *cmd); /* drbd_worker.c */ +/* bi_end_io handlers */ +extern void drbd_md_io_complete(struct bio *bio, int error); +extern void drbd_peer_request_endio(struct bio *bio, int error); +extern void drbd_request_endio(struct bio *bio, int error); extern int drbd_worker(struct drbd_thread *thi); enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor); void drbd_resync_after_changed(struct drbd_device *device); @@ -1332,16 +1374,20 @@ extern int w_start_resync(struct drbd_work *, int); extern void resync_timer_fn(unsigned long data); extern void start_resync_timer_fn(unsigned long data); +extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req); + /* drbd_receiver.c */ extern int drbd_receiver(struct drbd_thread *thi); extern int drbd_asender(struct drbd_thread *thi); -extern int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector); +extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device); +extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector); extern int drbd_submit_peer_request(struct drbd_device *, struct drbd_peer_request *, const unsigned, const int); extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *); extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64, sector_t, unsigned int, + bool, gfp_t) __must_hold(local); extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *, int); @@ -1401,6 +1447,37 @@ static inline void drbd_tcp_quickack(struct socket *sock) (char*)&val, sizeof(val)); } +/* sets the number of 512 byte sectors of our virtual device */ +static inline void drbd_set_my_capacity(struct drbd_device *device, + sector_t size) +{ + /* set_capacity(device->this_bdev->bd_disk, size); */ + set_capacity(device->vdisk, size); + device->this_bdev->bd_inode->i_size = (loff_t)size << 9; +} + +/* + * used to submit our private bio + */ +static inline void drbd_generic_make_request(struct drbd_device *device, + int fault_type, struct bio *bio) +{ + __release(local); + if (!bio->bi_bdev) { + printk(KERN_ERR "drbd%d: drbd_generic_make_request: " + "bio->bi_bdev == NULL\n", + device_to_minor(device)); + dump_stack(); + bio_endio(bio, -ENODEV); + return; + } + + if (drbd_insert_fault(device, fault_type)) + bio_endio(bio, -EIO); + else + generic_make_request(bio); +} + void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo); /* drbd_proc.c */ @@ -1410,6 +1487,7 @@ extern const char *drbd_conn_str(enum drbd_conns s); extern const char *drbd_role_str(enum drbd_role s); /* drbd_actlog.c */ +extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i); extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i); extern void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate); extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i); @@ -2144,7 +2222,7 @@ static inline void drbd_md_flush(struct drbd_device *device) static inline struct drbd_connection *first_connection(struct drbd_resource *resource) { - return list_first_entry(&resource->connections, + return list_first_entry_or_null(&resource->connections, struct drbd_connection, connections); } diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 331e5cc1227..960645c26e6 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1607,8 +1607,8 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long b return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0; } -/* Used to send write requests - * R_PRIMARY -> Peer (P_DATA) +/* Used to send write or TRIM aka REQ_DISCARD requests + * R_PRIMARY -> Peer (P_DATA, P_TRIM) */ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req) { @@ -1640,6 +1640,16 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * dp_flags |= DP_SEND_WRITE_ACK; } p->dp_flags = cpu_to_be32(dp_flags); + + if (dp_flags & DP_DISCARD) { + struct p_trim *t = (struct p_trim*)p; + t->size = cpu_to_be32(req->i.size); + err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0); + goto out; + } + + /* our digest is still only over the payload. + * TRIM does not carry any payload. */ if (dgs) drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1); err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size); @@ -1675,6 +1685,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request * ... Be noisy about digest too large ... } */ } +out: mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ return err; @@ -2570,6 +2581,7 @@ struct drbd_resource *drbd_create_resource(const char *name) INIT_LIST_HEAD(&resource->connections); list_add_tail_rcu(&resource->resources, &drbd_resources); mutex_init(&resource->conf_update); + mutex_init(&resource->adm_mutex); spin_lock_init(&resource->req_lock); return resource; @@ -2687,14 +2699,16 @@ static int init_submitter(struct drbd_device *device) return 0; } -enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned int minor, int vnr) +enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor) { + struct drbd_resource *resource = adm_ctx->resource; struct drbd_connection *connection; struct drbd_device *device; struct drbd_peer_device *peer_device, *tmp_peer_device; struct gendisk *disk; struct request_queue *q; int id; + int vnr = adm_ctx->volume; enum drbd_ret_code err = ERR_NOMEM; device = minor_to_device(minor); @@ -2763,7 +2777,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i if (id < 0) { if (id == -ENOSPC) { err = ERR_MINOR_EXISTS; - drbd_msg_put_info("requested minor exists already"); + drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already"); } goto out_no_minor_idr; } @@ -2773,7 +2787,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i if (id < 0) { if (id == -ENOSPC) { err = ERR_MINOR_EXISTS; - drbd_msg_put_info("requested minor exists already"); + drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already"); } goto out_idr_remove_minor; } @@ -2794,7 +2808,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i if (id < 0) { if (id == -ENOSPC) { err = ERR_INVALID_REQUEST; - drbd_msg_put_info("requested volume exists already"); + drbd_msg_put_info(adm_ctx->reply_skb, "requested volume exists already"); } goto out_idr_remove_from_resource; } @@ -2803,7 +2817,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_resource *resource, unsigned i if (init_submitter(device)) { err = ERR_NOMEM; - drbd_msg_put_info("unable to create submit workqueue"); + drbd_msg_put_info(adm_ctx->reply_skb, "unable to create submit workqueue"); goto out_idr_remove_vol; } diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 526414bc2ca..1b35c45c92b 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -34,7 +34,6 @@ #include "drbd_int.h" #include "drbd_protocol.h" #include "drbd_req.h" -#include "drbd_wrappers.h" #include <asm/unaligned.h> #include <linux/drbd_limits.h> #include <linux/kthread.h> @@ -82,32 +81,6 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb); /* used blkdev_get_by_path, to claim our meta data device(s) */ static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; -/* Configuration is strictly serialized, because generic netlink message - * processing is strictly serialized by the genl_lock(). - * Which means we can use one static global drbd_config_context struct. - */ -static struct drbd_config_context { - /* assigned from drbd_genlmsghdr */ - unsigned int minor; - /* assigned from request attributes, if present */ - unsigned int volume; -#define VOLUME_UNSPECIFIED (-1U) - /* pointer into the request skb, - * limited lifetime! */ - char *resource_name; - struct nlattr *my_addr; - struct nlattr *peer_addr; - - /* reply buffer */ - struct sk_buff *reply_skb; - /* pointer into reply buffer */ - struct drbd_genlmsghdr *reply_dh; - /* resolved from attributes, if possible */ - struct drbd_device *device; - struct drbd_resource *resource; - struct drbd_connection *connection; -} adm_ctx; - static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) { genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb)))); @@ -117,9 +90,8 @@ static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only * reason it could fail was no space in skb, and there are 4k available. */ -int drbd_msg_put_info(const char *info) +int drbd_msg_put_info(struct sk_buff *skb, const char *info) { - struct sk_buff *skb = adm_ctx.reply_skb; struct nlattr *nla; int err = -EMSGSIZE; @@ -143,42 +115,46 @@ int drbd_msg_put_info(const char *info) * and per-family private info->pointers. * But we need to stay compatible with older kernels. * If it returns successfully, adm_ctx members are valid. + * + * At this point, we still rely on the global genl_lock(). + * If we want to avoid that, and allow "genl_family.parallel_ops", we may need + * to add additional synchronization against object destruction/modification. */ #define DRBD_ADM_NEED_MINOR 1 #define DRBD_ADM_NEED_RESOURCE 2 #define DRBD_ADM_NEED_CONNECTION 4 -static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, - unsigned flags) +static int drbd_adm_prepare(struct drbd_config_context *adm_ctx, + struct sk_buff *skb, struct genl_info *info, unsigned flags) { struct drbd_genlmsghdr *d_in = info->userhdr; const u8 cmd = info->genlhdr->cmd; int err; - memset(&adm_ctx, 0, sizeof(adm_ctx)); + memset(adm_ctx, 0, sizeof(*adm_ctx)); /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */ if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN)) return -EPERM; - adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); - if (!adm_ctx.reply_skb) { + adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!adm_ctx->reply_skb) { err = -ENOMEM; goto fail; } - adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb, + adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb, info, &drbd_genl_family, 0, cmd); /* put of a few bytes into a fresh skb of >= 4k will always succeed. * but anyways */ - if (!adm_ctx.reply_dh) { + if (!adm_ctx->reply_dh) { err = -ENOMEM; goto fail; } - adm_ctx.reply_dh->minor = d_in->minor; - adm_ctx.reply_dh->ret_code = NO_ERROR; + adm_ctx->reply_dh->minor = d_in->minor; + adm_ctx->reply_dh->ret_code = NO_ERROR; - adm_ctx.volume = VOLUME_UNSPECIFIED; + adm_ctx->volume = VOLUME_UNSPECIFIED; if (info->attrs[DRBD_NLA_CFG_CONTEXT]) { struct nlattr *nla; /* parse and validate only */ @@ -188,111 +164,131 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, /* It was present, and valid, * copy it over to the reply skb. */ - err = nla_put_nohdr(adm_ctx.reply_skb, + err = nla_put_nohdr(adm_ctx->reply_skb, info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len, info->attrs[DRBD_NLA_CFG_CONTEXT]); if (err) goto fail; - /* and assign stuff to the global adm_ctx */ + /* and assign stuff to the adm_ctx */ nla = nested_attr_tb[__nla_type(T_ctx_volume)]; if (nla) - adm_ctx.volume = nla_get_u32(nla); + adm_ctx->volume = nla_get_u32(nla); nla = nested_attr_tb[__nla_type(T_ctx_resource_name)]; if (nla) - adm_ctx.resource_name = nla_data(nla); - adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)]; - adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)]; - if ((adm_ctx.my_addr && - nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.connection->my_addr)) || - (adm_ctx.peer_addr && - nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.connection->peer_addr))) { + adm_ctx->resource_name = nla_data(nla); + adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)]; + adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)]; + if ((adm_ctx->my_addr && + nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) || + (adm_ctx->peer_addr && + nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) { err = -EINVAL; goto fail; } } - adm_ctx.minor = d_in->minor; - adm_ctx.device = minor_to_device(d_in->minor); - if (adm_ctx.resource_name) { - adm_ctx.resource = drbd_find_resource(adm_ctx.resource_name); + adm_ctx->minor = d_in->minor; + adm_ctx->device = minor_to_device(d_in->minor); + + /* We are protected by the global genl_lock(). + * But we may explicitly drop it/retake it in drbd_adm_set_role(), + * so make sure this object stays around. */ + if (adm_ctx->device) + kref_get(&adm_ctx->device->kref); + + if (adm_ctx->resource_name) { + adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name); } - if (!adm_ctx.device && (flags & DRBD_ADM_NEED_MINOR)) { - drbd_msg_put_info("unknown minor"); + if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) { + drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor"); return ERR_MINOR_INVALID; } - if (!adm_ctx.resource && (flags & DRBD_ADM_NEED_RESOURCE)) { - drbd_msg_put_info("unknown resource"); - if (adm_ctx.resource_name) + if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) { + drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource"); + if (adm_ctx->resource_name) return ERR_RES_NOT_KNOWN; return ERR_INVALID_REQUEST; } if (flags & DRBD_ADM_NEED_CONNECTION) { - if (adm_ctx.resource) { - drbd_msg_put_info("no resource name expected"); + if (adm_ctx->resource) { + drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected"); return ERR_INVALID_REQUEST; } - if (adm_ctx.device) { - drbd_msg_put_info("no minor number expected"); + if (adm_ctx->device) { + drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected"); return ERR_INVALID_REQUEST; } - if (adm_ctx.my_addr && adm_ctx.peer_addr) - adm_ctx.connection = conn_get_by_addrs(nla_data(adm_ctx.my_addr), - nla_len(adm_ctx.my_addr), - nla_data(adm_ctx.peer_addr), - nla_len(adm_ctx.peer_addr)); - if (!adm_ctx.connection) { - drbd_msg_put_info("unknown connection"); + if (adm_ctx->my_addr && adm_ctx->peer_addr) + adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr), + nla_len(adm_ctx->my_addr), + nla_data(adm_ctx->peer_addr), + nla_len(adm_ctx->peer_addr)); + if (!adm_ctx->connection) { + drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection"); return ERR_INVALID_REQUEST; } } /* some more paranoia, if the request was over-determined */ - if (adm_ctx.device && adm_ctx.resource && - adm_ctx.device->resource != adm_ctx.resource) { + if (adm_ctx->device && adm_ctx->resource && + adm_ctx->device->resource != adm_ctx->resource) { pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n", - adm_ctx.minor, adm_ctx.resource->name, - adm_ctx.device->resource->name); - drbd_msg_put_info("minor exists in different resource"); + adm_ctx->minor, adm_ctx->resource->name, + adm_ctx->device->resource->name); + drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource"); return ERR_INVALID_REQUEST; } - if (adm_ctx.device && - adm_ctx.volume != VOLUME_UNSPECIFIED && - adm_ctx.volume != adm_ctx.device->vnr) { + if (adm_ctx->device && + adm_ctx->volume != VOLUME_UNSPECIFIED && + adm_ctx->volume != adm_ctx->device->vnr) { pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n", - adm_ctx.minor, adm_ctx.volume, - adm_ctx.device->vnr, - adm_ctx.device->resource->name); - drbd_msg_put_info("minor exists as different volume"); + adm_ctx->minor, adm_ctx->volume, + adm_ctx->device->vnr, + adm_ctx->device->resource->name); + drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume"); return ERR_INVALID_REQUEST; } + /* still, provide adm_ctx->resource always, if possible. */ + if (!adm_ctx->resource) { + adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource + : adm_ctx->connection ? adm_ctx->connection->resource : NULL; + if (adm_ctx->resource) + kref_get(&adm_ctx->resource->kref); + } + return NO_ERROR; fail: - nlmsg_free(adm_ctx.reply_skb); - adm_ctx.reply_skb = NULL; + nlmsg_free(adm_ctx->reply_skb); + adm_ctx->reply_skb = NULL; return err; } -static int drbd_adm_finish(struct genl_info *info, int retcode) +static int drbd_adm_finish(struct drbd_config_context *adm_ctx, + struct genl_info *info, int retcode) { - if (adm_ctx.connection) { - kref_put(&adm_ctx.connection->kref, drbd_destroy_connection); - adm_ctx.connection = NULL; + if (adm_ctx->device) { + kref_put(&adm_ctx->device->kref, drbd_destroy_device); + adm_ctx->device = NULL; } - if (adm_ctx.resource) { - kref_put(&adm_ctx.resource->kref, drbd_destroy_resource); - adm_ctx.resource = NULL; + if (adm_ctx->connection) { + kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection); + adm_ctx->connection = NULL; + } + if (adm_ctx->resource) { + kref_put(&adm_ctx->resource->kref, drbd_destroy_resource); + adm_ctx->resource = NULL; } - if (!adm_ctx.reply_skb) + if (!adm_ctx->reply_skb) return -ENOMEM; - adm_ctx.reply_dh->ret_code = retcode; - drbd_adm_send_reply(adm_ctx.reply_skb, info); + adm_ctx->reply_dh->ret_code = retcode; + drbd_adm_send_reply(adm_ctx->reply_skb, info); return 0; } @@ -426,6 +422,14 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connec } rcu_read_unlock(); + if (fp == FP_NOT_AVAIL) { + /* IO Suspending works on the whole resource. + Do it only for one device. */ + vnr = 0; + peer_device = idr_get_next(&connection->peer_devices, &vnr); + drbd_change_state(peer_device->device, CS_VERBOSE | CS_HARD, NS(susp_fen, 0)); + } + return fp; } @@ -438,12 +442,13 @@ bool conn_try_outdate_peer(struct drbd_connection *connection) char *ex_to_string; int r; + spin_lock_irq(&connection->resource->req_lock); if (connection->cstate >= C_WF_REPORT_PARAMS) { drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n"); + spin_unlock_irq(&connection->resource->req_lock); return false; } - spin_lock_irq(&connection->resource->req_lock); connect_cnt = connection->connect_cnt; spin_unlock_irq(&connection->resource->req_lock); @@ -654,11 +659,11 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) put_ldev(device); } } else { - mutex_lock(&device->resource->conf_update); + /* Called from drbd_adm_set_role only. + * We are still holding the conf_update mutex. */ nc = first_peer_device(device)->connection->net_conf; if (nc) nc->discard_my_data = 0; /* without copy; single bit op is atomic */ - mutex_unlock(&device->resource->conf_update); set_disk_ro(device->vdisk, false); if (get_ldev(device)) { @@ -700,11 +705,12 @@ static const char *from_attrs_err_to_txt(int err) int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct set_role_parms parms; int err; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -715,17 +721,22 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info) err = set_role_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } } + genl_unlock(); + mutex_lock(&adm_ctx.resource->adm_mutex); if (info->genlhdr->cmd == DRBD_ADM_PRIMARY) retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate); else retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0); + + mutex_unlock(&adm_ctx.resource->adm_mutex); + genl_lock(); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -1104,15 +1115,18 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_ struct request_queue * const q = device->rq_queue; unsigned int max_hw_sectors = max_bio_size >> 9; unsigned int max_segments = 0; + struct request_queue *b = NULL; if (get_ldev_if_state(device, D_ATTACHING)) { - struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue; + b = device->ldev->backing_bdev->bd_disk->queue; max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); rcu_read_lock(); max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs; rcu_read_unlock(); - put_ldev(device); + + blk_set_stacking_limits(&q->limits); + blk_queue_max_write_same_sectors(q, 0); } blk_queue_logical_block_size(q, 512); @@ -1121,8 +1135,25 @@ static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_ blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); - if (get_ldev_if_state(device, D_ATTACHING)) { - struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue; + if (b) { + struct drbd_connection *connection = first_peer_device(device)->connection; + + if (blk_queue_discard(b) && + (connection->cstate < C_CONNECTED || connection->agreed_features & FF_TRIM)) { + /* For now, don't allow more than one activity log extent worth of data + * to be discarded in one go. We may need to rework drbd_al_begin_io() + * to allow for even larger discard ranges */ + q->limits.max_discard_sectors = DRBD_MAX_DISCARD_SECTORS; + + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + /* REALLY? Is stacking secdiscard "legal"? */ + if (blk_queue_secdiscard(b)) + queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); + } else { + q->limits.max_discard_sectors = 0; + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + queue_flag_clear_unlocked(QUEUE_FLAG_SECDISCARD, q); + } blk_queue_stack_limits(q, b); @@ -1164,8 +1195,14 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device) peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */ else peer = DRBD_MAX_BIO_SIZE; - } + /* We may later detach and re-attach on a disconnected Primary. + * Avoid this setting to jump back in that case. + * We want to store what we know the peer DRBD can handle, + * not what the peer IO backend can handle. */ + if (peer > device->peer_max_bio_size) + device->peer_max_bio_size = peer; + } new = min(local, peer); if (device->state.role == R_PRIMARY && new < now) @@ -1258,19 +1295,21 @@ static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev) int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct drbd_device *device; struct disk_conf *new_disk_conf, *old_disk_conf; struct fifo_buffer *old_plan = NULL, *new_plan = NULL; int err, fifo_size; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; + goto finish; device = adm_ctx.device; + mutex_lock(&adm_ctx.resource->adm_mutex); /* we also need a disk * to change the options on */ @@ -1294,7 +1333,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) err = disk_conf_from_attrs_for_change(new_disk_conf, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail_unlock; } @@ -1385,12 +1424,15 @@ fail_unlock: success: put_ldev(device); out: - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_device *device; int err; enum drbd_ret_code retcode; @@ -1406,13 +1448,14 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) enum drbd_state_rv rv; struct net_conf *nc; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto finish; device = adm_ctx.device; + mutex_lock(&adm_ctx.resource->adm_mutex); conn_reconfig_start(first_peer_device(device)->connection); /* if you want to reconfigure, please tear down first */ @@ -1455,7 +1498,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) err = disk_conf_from_attrs(new_disk_conf, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } @@ -1619,7 +1662,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) } if (device->state.conn < C_CONNECTED && - device->state.role == R_PRIMARY && + device->state.role == R_PRIMARY && device->ed_uuid && (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { drbd_err(device, "Can only attach to data with current UUID=%016llX\n", (unsigned long long)device->ed_uuid); @@ -1797,7 +1840,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); put_ldev(device); conn_reconfig_done(first_peer_device(device)->connection); - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; force_diskless_dec: @@ -1819,9 +1863,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) kfree(new_disk_conf); lc_destroy(resync_lru); kfree(new_plan); - + mutex_unlock(&adm_ctx.resource->adm_mutex); finish: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -1860,11 +1904,12 @@ out: * Only then we have finally detached. */ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct detach_parms parms = { }; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -1874,14 +1919,16 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info) err = detach_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } } + mutex_lock(&adm_ctx.resource->adm_mutex); retcode = adm_detach(adm_ctx.device, parms.force_detach); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2055,6 +2102,7 @@ static void free_crypto(struct crypto *crypto) int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct drbd_connection *connection; struct net_conf *old_net_conf, *new_net_conf = NULL; @@ -2063,13 +2111,14 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) int rsr; /* re-sync running */ struct crypto crypto = { }; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; + goto finish; connection = adm_ctx.connection; + mutex_lock(&adm_ctx.resource->adm_mutex); new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); if (!new_net_conf) { @@ -2084,7 +2133,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) old_net_conf = connection->net_conf; if (!old_net_conf) { - drbd_msg_put_info("net conf missing, try connect"); + drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect"); retcode = ERR_INVALID_REQUEST; goto fail; } @@ -2096,7 +2145,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) err = net_conf_from_attrs_for_change(new_net_conf, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } @@ -2167,12 +2216,15 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) done: conn_reconfig_done(connection); out: - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_peer_device *peer_device; struct net_conf *old_net_conf, *new_net_conf = NULL; struct crypto crypto = { }; @@ -2182,14 +2234,14 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) int i; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) { - drbd_msg_put_info("connection endpoint(s) missing"); + drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing"); retcode = ERR_INVALID_REQUEST; goto out; } @@ -2215,6 +2267,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) } } + mutex_lock(&adm_ctx.resource->adm_mutex); connection = first_connection(adm_ctx.resource); conn_reconfig_start(connection); @@ -2235,7 +2288,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) err = net_conf_from_attrs(new_net_conf, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } @@ -2284,7 +2337,8 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE); conn_reconfig_done(connection); - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; fail: @@ -2292,8 +2346,9 @@ fail: kfree(new_net_conf); conn_reconfig_done(connection); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2356,13 +2411,14 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct disconnect_parms parms; struct drbd_connection *connection; enum drbd_state_rv rv; enum drbd_ret_code retcode; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -2374,18 +2430,20 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) err = disconnect_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } } + mutex_lock(&adm_ctx.resource->adm_mutex); rv = conn_try_disconnect(connection, parms.force_disconnect); if (rv < SS_SUCCESS) retcode = rv; /* FIXME: Type mismatch. */ else retcode = NO_ERROR; + mutex_unlock(&adm_ctx.resource->adm_mutex); fail: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2407,6 +2465,7 @@ void resync_after_online_grow(struct drbd_device *device) int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct disk_conf *old_disk_conf, *new_disk_conf = NULL; struct resize_parms rs; struct drbd_device *device; @@ -2417,12 +2476,13 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) sector_t u_size; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto fail; + goto finish; + mutex_lock(&adm_ctx.resource->adm_mutex); device = adm_ctx.device; if (!get_ldev(device)) { retcode = ERR_NO_DISK; @@ -2436,7 +2496,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) err = resize_parms_from_attrs(&rs, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail_ldev; } } @@ -2482,7 +2542,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) goto fail_ldev; } - if (device->state.conn != C_CONNECTED) { + if (device->state.conn != C_CONNECTED && !rs.resize_force) { retcode = ERR_MD_LAYOUT_CONNECTED; goto fail_ldev; } @@ -2528,7 +2588,9 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) } fail: - drbd_adm_finish(info, retcode); + mutex_unlock(&adm_ctx.resource->adm_mutex); + finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; fail_ldev: @@ -2538,11 +2600,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct res_opts res_opts; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -2555,33 +2618,37 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) err = res_opts_from_attrs(&res_opts, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto fail; } + mutex_lock(&adm_ctx.resource->adm_mutex); err = set_resource_options(adm_ctx.resource, &res_opts); if (err) { retcode = ERR_INVALID_REQUEST; if (err == -ENOMEM) retcode = ERR_NOMEM; } + mutex_unlock(&adm_ctx.resource->adm_mutex); fail: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_device *device; int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); device = adm_ctx.device; /* If there is still bitmap IO pending, probably because of a previous @@ -2605,26 +2672,29 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info) } else retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T)); drbd_resume_io(device); - + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info, union drbd_state mask, union drbd_state val) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); retcode = drbd_request_state(adm_ctx.device, mask, val); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2639,15 +2709,17 @@ static int drbd_bmio_set_susp_al(struct drbd_device *device) int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; int retcode; /* drbd_ret_code, drbd_state_rv */ struct drbd_device *device; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); device = adm_ctx.device; /* If there is still bitmap IO pending, probably because of a previous @@ -2674,40 +2746,45 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info) } else retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S)); drbd_resume_io(device); - + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO) retcode = ERR_PAUSE_IS_SET; + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; union drbd_dev_state s; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) { s = adm_ctx.device->state; if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) { @@ -2717,9 +2794,9 @@ int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info) retcode = ERR_PAUSE_IS_CLEAR; } } - + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2730,15 +2807,17 @@ int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info) int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_device *device; int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); device = adm_ctx.device; if (test_bit(NEW_CUR_UUID, &device->flags)) { drbd_uuid_new_current(device); @@ -2753,9 +2832,9 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info) tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO); } drbd_resume_io(device); - + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -2931,10 +3010,11 @@ nla_put_failure: int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -2946,7 +3026,7 @@ int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info) return err; } out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -3133,11 +3213,12 @@ dump: int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct timeout_parms tp; int err; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -3154,17 +3235,18 @@ int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info) return err; } out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_device *device; enum drbd_ret_code retcode; struct start_ov_parms parms; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -3179,10 +3261,12 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) int err = start_ov_parms_from_attrs(&parms, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } } + mutex_lock(&adm_ctx.resource->adm_mutex); + /* w_make_ov_request expects position to be aligned */ device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1); device->ov_stop_sector = parms.ov_stop_sector; @@ -3193,21 +3277,24 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info) wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags)); retcode = drbd_request_state(device, NS(conn, C_VERIFY_S)); drbd_resume_io(device); + + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_device *device; enum drbd_ret_code retcode; int skip_initial_sync = 0; int err; struct new_c_uuid_parms args; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -3219,11 +3306,12 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) err = new_c_uuid_parms_from_attrs(&args, info); if (err) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out_nolock; } } + mutex_lock(&adm_ctx.resource->adm_mutex); mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */ if (!get_ldev(device)) { @@ -3268,22 +3356,24 @@ out_dec: put_ldev(device); out: mutex_unlock(device->state_mutex); + mutex_unlock(&adm_ctx.resource->adm_mutex); out_nolock: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } static enum drbd_ret_code -drbd_check_resource_name(const char *name) +drbd_check_resource_name(struct drbd_config_context *adm_ctx) { + const char *name = adm_ctx->resource_name; if (!name || !name[0]) { - drbd_msg_put_info("resource name missing"); + drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing"); return ERR_MANDATORY_TAG; } /* if we want to use these in sysfs/configfs/debugfs some day, * we must not allow slashes */ if (strchr(name, '/')) { - drbd_msg_put_info("invalid resource name"); + drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name"); return ERR_INVALID_REQUEST; } return NO_ERROR; @@ -3291,11 +3381,12 @@ drbd_check_resource_name(const char *name) int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; struct res_opts res_opts; int err; - retcode = drbd_adm_prepare(skb, info, 0); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) @@ -3305,48 +3396,50 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info) err = res_opts_from_attrs(&res_opts, info); if (err && err != -ENOMSG) { retcode = ERR_MANDATORY_TAG; - drbd_msg_put_info(from_attrs_err_to_txt(err)); + drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err)); goto out; } - retcode = drbd_check_resource_name(adm_ctx.resource_name); + retcode = drbd_check_resource_name(&adm_ctx); if (retcode != NO_ERROR) goto out; if (adm_ctx.resource) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) { retcode = ERR_INVALID_REQUEST; - drbd_msg_put_info("resource exists"); + drbd_msg_put_info(adm_ctx.reply_skb, "resource exists"); } /* else: still NO_ERROR */ goto out; } + /* not yet safe for genl_family.parallel_ops */ if (!conn_create(adm_ctx.resource_name, &res_opts)) retcode = ERR_NOMEM; out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_genlmsghdr *dh = info->userhdr; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; if (dh->minor > MINORMASK) { - drbd_msg_put_info("requested minor out of range"); + drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range"); retcode = ERR_INVALID_REQUEST; goto out; } if (adm_ctx.volume > DRBD_VOLUME_MAX) { - drbd_msg_put_info("requested volume id out of range"); + drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range"); retcode = ERR_INVALID_REQUEST; goto out; } @@ -3360,9 +3453,11 @@ int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info) goto out; } - retcode = drbd_create_device(adm_ctx.resource, dh->minor, adm_ctx.volume); + mutex_lock(&adm_ctx.resource->adm_mutex); + retcode = drbd_create_device(&adm_ctx, dh->minor); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } @@ -3383,35 +3478,40 @@ static enum drbd_ret_code adm_del_minor(struct drbd_device *device) int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) goto out; + mutex_lock(&adm_ctx.resource->adm_mutex); retcode = adm_del_minor(adm_ctx.device); + mutex_unlock(&adm_ctx.resource->adm_mutex); out: - drbd_adm_finish(info, retcode); + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_resource *resource; struct drbd_connection *connection; struct drbd_device *device; int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */ unsigned i; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; + goto finish; resource = adm_ctx.resource; + mutex_lock(&resource->adm_mutex); /* demote */ for_each_connection(connection, resource) { struct drbd_peer_device *peer_device; @@ -3419,14 +3519,14 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) idr_for_each_entry(&connection->peer_devices, peer_device, i) { retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0); if (retcode < SS_SUCCESS) { - drbd_msg_put_info("failed to demote"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote"); goto out; } } retcode = conn_try_disconnect(connection, 0); if (retcode < SS_SUCCESS) { - drbd_msg_put_info("failed to disconnect"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect"); goto out; } } @@ -3435,7 +3535,7 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) idr_for_each_entry(&resource->devices, device, i) { retcode = adm_detach(device, 0); if (retcode < SS_SUCCESS || retcode > NO_ERROR) { - drbd_msg_put_info("failed to detach"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach"); goto out; } } @@ -3453,7 +3553,7 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) retcode = adm_del_minor(device); if (retcode != NO_ERROR) { /* "can not happen" */ - drbd_msg_put_info("failed to delete volume"); + drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume"); goto out; } } @@ -3462,25 +3562,28 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) synchronize_rcu(); drbd_free_resource(resource); retcode = NO_ERROR; - out: - drbd_adm_finish(info, retcode); + mutex_unlock(&resource->adm_mutex); +finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) { + struct drbd_config_context adm_ctx; struct drbd_resource *resource; struct drbd_connection *connection; enum drbd_ret_code retcode; - retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE); + retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE); if (!adm_ctx.reply_skb) return retcode; if (retcode != NO_ERROR) - goto out; + goto finish; resource = adm_ctx.resource; + mutex_lock(&resource->adm_mutex); for_each_connection(connection, resource) { if (connection->cstate > C_STANDALONE) { retcode = ERR_NET_CONFIGURED; @@ -3499,7 +3602,9 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) drbd_free_resource(resource); retcode = NO_ERROR; out: - drbd_adm_finish(info, retcode); + mutex_unlock(&resource->adm_mutex); +finish: + drbd_adm_finish(&adm_ctx, info, retcode); return 0; } diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c index fa672b6df8d..b2d4791498a 100644 --- a/drivers/block/drbd/drbd_nla.c +++ b/drivers/block/drbd/drbd_nla.c @@ -1,4 +1,3 @@ -#include "drbd_wrappers.h" #include <linux/kernel.h> #include <net/netlink.h> #include <linux/drbd_genl_api.h> diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 2f26e8ffa45..89736bdbbc7 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -116,7 +116,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se /* ------------------------ ~18s average ------------------------ */ i = (device->rs_last_mark + 2) % DRBD_SYNC_MARKS; dt = (jiffies - device->rs_mark_time[i]) / HZ; - if (dt > (DRBD_SYNC_MARK_STEP * DRBD_SYNC_MARKS)) + if (dt > 180) stalled = 1; if (!dt) diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h index 3c04ec0ea33..2da9104a385 100644 --- a/drivers/block/drbd/drbd_protocol.h +++ b/drivers/block/drbd/drbd_protocol.h @@ -54,6 +54,11 @@ enum drbd_packet { P_CONN_ST_CHG_REPLY = 0x2b, /* meta sock: Connection side state req reply */ P_RETRY_WRITE = 0x2c, /* Protocol C: retry conflicting write request */ P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */ + /* 0x2e to 0x30 reserved, used in drbd 9 */ + + /* REQ_DISCARD. We used "discard" in different contexts before, + * which is why I chose TRIM here, to disambiguate. */ + P_TRIM = 0x31, P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ P_MAX_OPT_CMD = 0x101, @@ -119,6 +124,11 @@ struct p_data { u32 dp_flags; } __packed; +struct p_trim { + struct p_data p_data; + u32 size; /* == bio->bi_size */ +} __packed; + /* * commands which share a struct: * p_block_ack: @@ -150,6 +160,8 @@ struct p_block_req { * ReportParams */ +#define FF_TRIM 1 + struct p_connection_features { u32 protocol_min; u32 feature_flags; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 68e3992e883..b6c8aaf4931 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -46,9 +46,10 @@ #include "drbd_int.h" #include "drbd_protocol.h" #include "drbd_req.h" - #include "drbd_vli.h" +#define PRO_FEATURES (FF_TRIM) + struct packet_info { enum drbd_packet cmd; unsigned int size; @@ -65,7 +66,7 @@ enum finish_epoch { static int drbd_do_features(struct drbd_connection *connection); static int drbd_do_auth(struct drbd_connection *connection); static int drbd_disconnected(struct drbd_peer_device *); - +static void conn_wait_active_ee_empty(struct drbd_connection *connection); static enum finish_epoch drbd_may_finish_epoch(struct drbd_connection *, struct drbd_epoch *, enum epoch_event); static int e_end_block(struct drbd_work *, int); @@ -234,9 +235,17 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_device *device) * @retry: whether to retry, if not enough pages are available right now * * Tries to allocate number pages, first from our own page pool, then from - * the kernel, unless this allocation would exceed the max_buffers setting. + * the kernel. * Possibly retry until DRBD frees sufficient pages somewhere else. * + * If this allocation would exceed the max_buffers setting, we throttle + * allocation (schedule_timeout) to give the system some room to breathe. + * + * We do not use max-buffers as hard limit, because it could lead to + * congestion and further to a distributed deadlock during online-verify or + * (checksum based) resync, if the max-buffers, socket buffer sizes and + * resync-rate settings are mis-configured. + * * Returns a page chain linked via page->private. */ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int number, @@ -246,10 +255,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int struct page *page = NULL; struct net_conf *nc; DEFINE_WAIT(wait); - int mxb; + unsigned int mxb; - /* Yes, we may run up to @number over max_buffers. If we - * follow it strictly, the admin will get it wrong anyways. */ rcu_read_lock(); nc = rcu_dereference(peer_device->connection->net_conf); mxb = nc ? nc->max_buffers : 1000000; @@ -277,7 +284,8 @@ struct page *drbd_alloc_pages(struct drbd_peer_device *peer_device, unsigned int break; } - schedule(); + if (schedule_timeout(HZ/10) == 0) + mxb = UINT_MAX; } finish_wait(&drbd_pp_wait, &wait); @@ -331,7 +339,7 @@ You must not have the req_lock: struct drbd_peer_request * drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t sector, - unsigned int data_size, gfp_t gfp_mask) __must_hold(local) + unsigned int data_size, bool has_payload, gfp_t gfp_mask) __must_hold(local) { struct drbd_device *device = peer_device->device; struct drbd_peer_request *peer_req; @@ -348,7 +356,7 @@ drbd_alloc_peer_req(struct drbd_peer_device *peer_device, u64 id, sector_t secto return NULL; } - if (data_size) { + if (has_payload && data_size) { page = drbd_alloc_pages(peer_device, nr_pages, (gfp_mask & __GFP_WAIT)); if (!page) goto fail; @@ -1026,24 +1034,27 @@ randomize: if (drbd_send_protocol(connection) == -EOPNOTSUPP) return -1; + /* Prevent a race between resync-handshake and + * being promoted to Primary. + * + * Grab and release the state mutex, so we know that any current + * drbd_set_role() is finished, and any incoming drbd_set_role + * will see the STATE_SENT flag, and wait for it to be cleared. + */ + idr_for_each_entry(&connection->peer_devices, peer_device, vnr) + mutex_lock(peer_device->device->state_mutex); + set_bit(STATE_SENT, &connection->flags); + idr_for_each_entry(&connection->peer_devices, peer_device, vnr) + mutex_unlock(peer_device->device->state_mutex); + rcu_read_lock(); idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { struct drbd_device *device = peer_device->device; kref_get(&device->kref); rcu_read_unlock(); - /* Prevent a race between resync-handshake and - * being promoted to Primary. - * - * Grab and release the state mutex, so we know that any current - * drbd_set_role() is finished, and any incoming drbd_set_role - * will see the STATE_SENT flag, and wait for it to be cleared. - */ - mutex_lock(device->state_mutex); - mutex_unlock(device->state_mutex); - if (discard_my_data) set_bit(DISCARD_MY_DATA, &device->flags); else @@ -1315,6 +1326,20 @@ int drbd_submit_peer_request(struct drbd_device *device, unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; int err = -ENOMEM; + if (peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) { + /* wait for all pending IO completions, before we start + * zeroing things out. */ + conn_wait_active_ee_empty(first_peer_device(device)->connection); + if (blkdev_issue_zeroout(device->ldev->backing_bdev, + sector, ds >> 9, GFP_NOIO)) + peer_req->flags |= EE_WAS_ERROR; + drbd_endio_write_sec_final(peer_req); + return 0; + } + + if (peer_req->flags & EE_IS_TRIM) + nr_pages = 0; /* discards don't have any payload. */ + /* In most cases, we will only need one bio. But in case the lower * level restrictions happen to be different at this offset on this * side than those of the sending peer, we may need to submit the @@ -1326,7 +1351,7 @@ int drbd_submit_peer_request(struct drbd_device *device, next_bio: bio = bio_alloc(GFP_NOIO, nr_pages); if (!bio) { - drbd_err(device, "submit_ee: Allocation of a bio failed\n"); + drbd_err(device, "submit_ee: Allocation of a bio failed (nr_pages=%u)\n", nr_pages); goto fail; } /* > peer_req->i.sector, unless this is the first bio */ @@ -1340,6 +1365,11 @@ next_bio: bios = bio; ++n_bios; + if (rw & REQ_DISCARD) { + bio->bi_iter.bi_size = ds; + goto submit; + } + page_chain_for_each(page) { unsigned len = min_t(unsigned, ds, PAGE_SIZE); if (!bio_add_page(bio, page, len, 0)) { @@ -1360,8 +1390,9 @@ next_bio: sector += len >> 9; --nr_pages; } - D_ASSERT(device, page == NULL); D_ASSERT(device, ds == 0); +submit: + D_ASSERT(device, page == NULL); atomic_set(&peer_req->pending_bios, n_bios); do { @@ -1490,19 +1521,21 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf * and from receive_Data */ static struct drbd_peer_request * read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, - int data_size) __must_hold(local) + struct packet_info *pi) __must_hold(local) { struct drbd_device *device = peer_device->device; const sector_t capacity = drbd_get_capacity(device->this_bdev); struct drbd_peer_request *peer_req; struct page *page; int dgs, ds, err; + int data_size = pi->size; void *dig_in = peer_device->connection->int_dig_in; void *dig_vv = peer_device->connection->int_dig_vv; unsigned long *data; + struct p_trim *trim = (pi->cmd == P_TRIM) ? pi->data : NULL; dgs = 0; - if (peer_device->connection->peer_integrity_tfm) { + if (!trim && peer_device->connection->peer_integrity_tfm) { dgs = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm); /* * FIXME: Receive the incoming digest into the receive buffer @@ -1514,9 +1547,15 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, data_size -= dgs; } + if (trim) { + D_ASSERT(peer_device, data_size == 0); + data_size = be32_to_cpu(trim->size); + } + if (!expect(IS_ALIGNED(data_size, 512))) return NULL; - if (!expect(data_size <= DRBD_MAX_BIO_SIZE)) + /* prepare for larger trim requests. */ + if (!trim && !expect(data_size <= DRBD_MAX_BIO_SIZE)) return NULL; /* even though we trust out peer, @@ -1532,11 +1571,11 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ - peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, GFP_NOIO); + peer_req = drbd_alloc_peer_req(peer_device, id, sector, data_size, trim == NULL, GFP_NOIO); if (!peer_req) return NULL; - if (!data_size) + if (trim) return peer_req; ds = data_size; @@ -1676,12 +1715,12 @@ static int e_end_resync_block(struct drbd_work *w, int unused) } static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t sector, - int data_size) __releases(local) + struct packet_info *pi) __releases(local) { struct drbd_device *device = peer_device->device; struct drbd_peer_request *peer_req; - peer_req = read_in_block(peer_device, ID_SYNCER, sector, data_size); + peer_req = read_in_block(peer_device, ID_SYNCER, sector, pi); if (!peer_req) goto fail; @@ -1697,7 +1736,7 @@ static int recv_resync_read(struct drbd_peer_device *peer_device, sector_t secto list_add(&peer_req->w.list, &device->sync_ee); spin_unlock_irq(&device->resource->req_lock); - atomic_add(data_size >> 9, &device->rs_sect_ev); + atomic_add(pi->size >> 9, &device->rs_sect_ev); if (drbd_submit_peer_request(device, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) return 0; @@ -1785,7 +1824,7 @@ static int receive_RSDataReply(struct drbd_connection *connection, struct packet /* data is submitted to disk within recv_resync_read. * corresponding put_ldev done below on error, * or in drbd_peer_request_endio. */ - err = recv_resync_read(peer_device, sector, pi->size); + err = recv_resync_read(peer_device, sector, pi); } else { if (__ratelimit(&drbd_ratelimit_state)) drbd_err(device, "Can not write resync data to local disk.\n"); @@ -2196,7 +2235,7 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * */ sector = be64_to_cpu(p->sector); - peer_req = read_in_block(peer_device, p->block_id, sector, pi->size); + peer_req = read_in_block(peer_device, p->block_id, sector, pi); if (!peer_req) { put_ldev(device); return -EIO; @@ -2206,7 +2245,15 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * dp_flags = be32_to_cpu(p->dp_flags); rw |= wire_flags_to_bio(dp_flags); - if (peer_req->pages == NULL) { + if (pi->cmd == P_TRIM) { + struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); + peer_req->flags |= EE_IS_TRIM; + if (!blk_queue_discard(q)) + peer_req->flags |= EE_IS_TRIM_USE_ZEROOUT; + D_ASSERT(peer_device, peer_req->i.size > 0); + D_ASSERT(peer_device, rw & REQ_DISCARD); + D_ASSERT(peer_device, peer_req->pages == NULL); + } else if (peer_req->pages == NULL) { D_ASSERT(device, peer_req->i.size == 0); D_ASSERT(device, dp_flags & DP_FLUSH); } @@ -2242,7 +2289,12 @@ static int receive_Data(struct drbd_connection *connection, struct packet_info * update_peer_seq(peer_device, peer_seq); spin_lock_irq(&device->resource->req_lock); } - list_add(&peer_req->w.list, &device->active_ee); + /* if we use the zeroout fallback code, we process synchronously + * and we wait for all pending requests, respectively wait for + * active_ee to become empty in drbd_submit_peer_request(); + * better not add ourselves here. */ + if ((peer_req->flags & EE_IS_TRIM_USE_ZEROOUT) == 0) + list_add(&peer_req->w.list, &device->active_ee); spin_unlock_irq(&device->resource->req_lock); if (device->state.conn == C_SYNC_TARGET) @@ -2313,39 +2365,45 @@ out_interrupted: * The current sync rate used here uses only the most recent two step marks, * to have a short time average so we can react faster. */ -int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) +bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) { - struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; - unsigned long db, dt, dbdt; struct lc_element *tmp; - int curr_events; - int throttle = 0; - unsigned int c_min_rate; - - rcu_read_lock(); - c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate; - rcu_read_unlock(); + bool throttle = true; - /* feature disabled? */ - if (c_min_rate == 0) - return 0; + if (!drbd_rs_c_min_rate_throttle(device)) + return false; spin_lock_irq(&device->al_lock); tmp = lc_find(device->resync, BM_SECT_TO_EXT(sector)); if (tmp) { struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); - if (test_bit(BME_PRIORITY, &bm_ext->flags)) { - spin_unlock_irq(&device->al_lock); - return 0; - } + if (test_bit(BME_PRIORITY, &bm_ext->flags)) + throttle = false; /* Do not slow down if app IO is already waiting for this extent */ } spin_unlock_irq(&device->al_lock); + return throttle; +} + +bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) +{ + struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; + unsigned long db, dt, dbdt; + unsigned int c_min_rate; + int curr_events; + + rcu_read_lock(); + c_min_rate = rcu_dereference(device->ldev->disk_conf)->c_min_rate; + rcu_read_unlock(); + + /* feature disabled? */ + if (c_min_rate == 0) + return false; + curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + (int)part_stat_read(&disk->part0, sectors[1]) - atomic_read(&device->rs_sect_ev); - if (!device->rs_last_events || curr_events - device->rs_last_events > 64) { unsigned long rs_left; int i; @@ -2368,12 +2426,11 @@ int drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector) dbdt = Bit2KB(db/dt); if (dbdt > c_min_rate) - throttle = 1; + return true; } - return throttle; + return false; } - static int receive_DataRequest(struct drbd_connection *connection, struct packet_info *pi) { struct drbd_peer_device *peer_device; @@ -2436,7 +2493,8 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ - peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, GFP_NOIO); + peer_req = drbd_alloc_peer_req(peer_device, p->block_id, sector, size, + true /* has real payload */, GFP_NOIO); if (!peer_req) { put_ldev(device); return -ENOMEM; @@ -3648,6 +3706,13 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info put_ldev(device); } + device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); + drbd_reconsider_max_bio_size(device); + /* Leave drbd_reconsider_max_bio_size() before drbd_determine_dev_size(). + In case we cleared the QUEUE_FLAG_DISCARD from our queue in + drbd_reconsider_max_bio_size(), we can be sure that after + drbd_determine_dev_size() no REQ_DISCARDs are in the queue. */ + ddsf = be16_to_cpu(p->dds_flags); if (get_ldev(device)) { dd = drbd_determine_dev_size(device, ddsf, NULL); @@ -3660,9 +3725,6 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info drbd_set_my_capacity(device, p_size); } - device->peer_max_bio_size = be32_to_cpu(p->max_bio_size); - drbd_reconsider_max_bio_size(device); - if (get_ldev(device)) { if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev)) { device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); @@ -4423,6 +4485,7 @@ static struct data_cmd drbd_cmd_handler[] = { [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state }, [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol }, + [P_TRIM] = { 0, sizeof(struct p_trim), receive_Data }, }; static void drbdd(struct drbd_connection *connection) @@ -4630,6 +4693,7 @@ static int drbd_send_features(struct drbd_connection *connection) memset(p, 0, sizeof(*p)); p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); + p->feature_flags = cpu_to_be32(PRO_FEATURES); return conn_send_command(connection, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0); } @@ -4683,10 +4747,14 @@ static int drbd_do_features(struct drbd_connection *connection) goto incompat; connection->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); + connection->agreed_features = PRO_FEATURES & be32_to_cpu(p->feature_flags); drbd_info(connection, "Handshake successful: " "Agreed network protocol version %d\n", connection->agreed_pro_version); + drbd_info(connection, "Agreed to%ssupport TRIM on protocol level\n", + connection->agreed_features & FF_TRIM ? " " : " not "); + return 1; incompat: @@ -4778,6 +4846,12 @@ static int drbd_do_auth(struct drbd_connection *connection) goto fail; } + if (pi.size < CHALLENGE_LEN) { + drbd_err(connection, "AuthChallenge payload too small.\n"); + rv = -1; + goto fail; + } + peers_ch = kmalloc(pi.size, GFP_NOIO); if (peers_ch == NULL) { drbd_err(connection, "kmalloc of peers_ch failed\n"); @@ -4791,6 +4865,12 @@ static int drbd_do_auth(struct drbd_connection *connection) goto fail; } + if (!memcmp(my_challenge, peers_ch, CHALLENGE_LEN)) { + drbd_err(connection, "Peer presented the same challenge!\n"); + rv = -1; + goto fail; + } + resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm); response = kmalloc(resp_size, GFP_NOIO); if (response == NULL) { diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3779c8d2875..09803d0d520 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -522,6 +522,13 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); break; + case DISCARD_COMPLETED_NOTSUPP: + case DISCARD_COMPLETED_WITH_ERROR: + /* I'd rather not detach from local disk just because it + * failed a REQ_DISCARD. */ + mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); + break; + case QUEUE_FOR_NET_READ: /* READ or READA, and * no local disk, @@ -1235,6 +1242,7 @@ void do_submit(struct work_struct *ws) if (list_empty(&incoming)) break; +skip_fast_path: wait_event(device->al_wait, prepare_al_transaction_nonblock(device, &incoming, &pending)); /* Maybe more was queued, while we prepared the transaction? * Try to stuff them into this transaction as well. @@ -1273,6 +1281,25 @@ void do_submit(struct work_struct *ws) list_del_init(&req->tl_requests); drbd_send_and_submit(device, req); } + + /* If all currently hot activity log extents are kept busy by + * incoming requests, we still must not totally starve new + * requests to cold extents. In that case, prepare one request + * in blocking mode. */ + list_for_each_entry_safe(req, tmp, &incoming, tl_requests) { + list_del_init(&req->tl_requests); + req->rq_state |= RQ_IN_ACT_LOG; + if (!drbd_al_begin_io_prepare(device, &req->i)) { + /* Corresponding extent was hot after all? */ + drbd_send_and_submit(device, req); + } else { + /* Found a request to a cold extent. + * Put on "pending" list, + * and try to cumulate with more. */ + list_add(&req->tl_requests, &pending); + goto skip_fast_path; + } + } } } @@ -1326,23 +1353,35 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct return limit; } -static struct drbd_request *find_oldest_request(struct drbd_connection *connection) +static void find_oldest_requests( + struct drbd_connection *connection, + struct drbd_device *device, + struct drbd_request **oldest_req_waiting_for_peer, + struct drbd_request **oldest_req_waiting_for_disk) { - /* Walk the transfer log, - * and find the oldest not yet completed request */ struct drbd_request *r; + *oldest_req_waiting_for_peer = NULL; + *oldest_req_waiting_for_disk = NULL; list_for_each_entry(r, &connection->transfer_log, tl_requests) { - if (atomic_read(&r->completion_ref)) - return r; + const unsigned s = r->rq_state; + if (!*oldest_req_waiting_for_peer + && ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) + *oldest_req_waiting_for_peer = r; + + if (!*oldest_req_waiting_for_disk + && (s & RQ_LOCAL_PENDING) && r->device == device) + *oldest_req_waiting_for_disk = r; + + if (*oldest_req_waiting_for_peer && *oldest_req_waiting_for_disk) + break; } - return NULL; } void request_timer_fn(unsigned long data) { struct drbd_device *device = (struct drbd_device *) data; struct drbd_connection *connection = first_peer_device(device)->connection; - struct drbd_request *req; /* oldest request */ + struct drbd_request *req_disk, *req_peer; /* oldest request */ struct net_conf *nc; unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ unsigned long now; @@ -1366,8 +1405,8 @@ void request_timer_fn(unsigned long data) now = jiffies; spin_lock_irq(&device->resource->req_lock); - req = find_oldest_request(connection); - if (!req) { + find_oldest_requests(connection, device, &req_peer, &req_disk); + if (req_peer == NULL && req_disk == NULL) { spin_unlock_irq(&device->resource->req_lock); mod_timer(&device->request_timer, now + et); return; @@ -1389,19 +1428,26 @@ void request_timer_fn(unsigned long data) * ~198 days with 250 HZ, we have a window where the timeout would need * to expire twice (worst case) to become effective. Good enough. */ - if (ent && req->rq_state & RQ_NET_PENDING && - time_after(now, req->start_time + ent) && + if (ent && req_peer && + time_after(now, req_peer->start_time + ent) && !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) { drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); } - if (dt && req->rq_state & RQ_LOCAL_PENDING && req->device == device && - time_after(now, req->start_time + dt) && + if (dt && req_disk && + time_after(now, req_disk->start_time + dt) && !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); __drbd_chk_io_error(device, DRBD_FORCE_DETACH); } - nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; + + /* Reschedule timer for the nearest not already expired timeout. + * Fallback to now + min(effective network timeout, disk timeout). */ + ent = (ent && req_peer && time_before(now, req_peer->start_time + ent)) + ? req_peer->start_time + ent : now + et; + dt = (dt && req_disk && time_before(now, req_disk->start_time + dt)) + ? req_disk->start_time + dt : now + et; + nt = time_before(ent, dt) ? ent : dt; spin_unlock_irq(&connection->resource->req_lock); mod_timer(&device->request_timer, nt); } diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index c684c963538..8566cd5866b 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -30,7 +30,6 @@ #include <linux/slab.h> #include <linux/drbd.h> #include "drbd_int.h" -#include "drbd_wrappers.h" /* The request callbacks will be called in irq context by the IDE drivers, and in Softirqs/Tasklets/BH context by the SCSI drivers, @@ -111,11 +110,14 @@ enum drbd_req_event { BARRIER_ACKED, /* in protocol A and B */ DATA_RECEIVED, /* (remote read) */ + COMPLETED_OK, READ_COMPLETED_WITH_ERROR, READ_AHEAD_COMPLETED_WITH_ERROR, WRITE_COMPLETED_WITH_ERROR, + DISCARD_COMPLETED_NOTSUPP, + DISCARD_COMPLETED_WITH_ERROR, + ABORT_DISK_IO, - COMPLETED_OK, RESEND, FAIL_FROZEN_DISK_IO, RESTART_FROZEN_DISK_IO, diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c index 1a84345a386..a5d8aae00e0 100644 --- a/drivers/block/drbd/drbd_state.c +++ b/drivers/block/drbd/drbd_state.c @@ -54,8 +54,8 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state); static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *); static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns); -static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns, - enum sanitize_state_warnings *warn); +static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os, + union drbd_state ns, enum sanitize_state_warnings *warn); static inline bool is_susp(union drbd_state s) { @@ -287,7 +287,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask, spin_lock_irqsave(&device->resource->req_lock, flags); os = drbd_read_state(device); - ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); + ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); rv = is_valid_transition(os, ns); if (rv >= SS_SUCCESS) rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ @@ -333,7 +333,7 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask, spin_lock_irqsave(&device->resource->req_lock, flags); os = drbd_read_state(device); - ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); + ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); rv = is_valid_transition(os, ns); if (rv < SS_SUCCESS) { spin_unlock_irqrestore(&device->resource->req_lock, flags); @@ -740,8 +740,8 @@ static void print_sanitize_warnings(struct drbd_device *device, enum sanitize_st * When we loose connection, we have to set the state of the peers disk (pdsk) * to D_UNKNOWN. This rule and many more along those lines are in this function. */ -static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns, - enum sanitize_state_warnings *warn) +static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state os, + union drbd_state ns, enum sanitize_state_warnings *warn) { enum drbd_fencing_p fp; enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max; @@ -882,11 +882,13 @@ static union drbd_state sanitize_state(struct drbd_device *device, union drbd_st } if (fp == FP_STONITH && - (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED)) + (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED) && + !(os.role == R_PRIMARY && os.conn < C_CONNECTED && os.pdsk > D_OUTDATED)) ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */ if (device->resource->res_opts.on_no_data == OND_SUSPEND_IO && - (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)) + (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) && + !(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE)) ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */ if (ns.aftr_isp || ns.peer_isp || ns.user_isp) { @@ -958,7 +960,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, os = drbd_read_state(device); - ns = sanitize_state(device, ns, &ssw); + ns = sanitize_state(device, os, ns, &ssw); if (ns.i == os.i) return SS_NOTHING_TO_DO; @@ -1656,7 +1658,7 @@ conn_is_valid_transition(struct drbd_connection *connection, union drbd_state ma idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { struct drbd_device *device = peer_device->device; os = drbd_read_state(device); - ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); + ns = sanitize_state(device, os, apply_mask_val(os, mask, val), NULL); if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED) ns.disk = os.disk; @@ -1718,7 +1720,7 @@ conn_set_state(struct drbd_connection *connection, union drbd_state mask, union number_of_volumes++; os = drbd_read_state(device); ns = apply_mask_val(os, mask, val); - ns = sanitize_state(device, ns, NULL); + ns = sanitize_state(device, os, ns, NULL); if (flags & CS_IGN_OUTD_FAIL && ns.disk == D_OUTDATED && os.disk < D_OUTDATED) ns.disk = os.disk; @@ -1763,19 +1765,19 @@ conn_set_state(struct drbd_connection *connection, union drbd_state mask, union static enum drbd_state_rv _conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val) { - enum drbd_state_rv rv; + enum drbd_state_rv err, rv = SS_UNKNOWN_ERROR; /* continue waiting */; if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags)) - return SS_CW_SUCCESS; + rv = SS_CW_SUCCESS; if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags)) - return SS_CW_FAILED_BY_PEER; + rv = SS_CW_FAILED_BY_PEER; - rv = conn_is_valid_transition(connection, mask, val, 0); - if (rv == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS) - rv = SS_UNKNOWN_ERROR; /* continue waiting */ + err = conn_is_valid_transition(connection, mask, val, 0); + if (err == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS) + return rv; - return rv; + return err; } enum drbd_state_rv diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 2c4ce42c365..d8f57b6305c 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -118,7 +118,7 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele /* writes on behalf of the partner, or resync writes, * "submitted" by the receiver, final stage. */ -static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) +void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(local) { unsigned long flags = 0; struct drbd_peer_device *peer_device = peer_req->peer_device; @@ -150,7 +150,9 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee); - if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) + /* FIXME do we want to detach for failed REQ_DISCARD? + * ((peer_req->flags & (EE_WAS_ERROR|EE_IS_TRIM)) == EE_WAS_ERROR) */ + if (peer_req->flags & EE_WAS_ERROR) __drbd_chk_io_error(device, DRBD_WRITE_ERROR); spin_unlock_irqrestore(&device->resource->req_lock, flags); @@ -176,10 +178,12 @@ void drbd_peer_request_endio(struct bio *bio, int error) struct drbd_device *device = peer_req->peer_device->device; int uptodate = bio_flagged(bio, BIO_UPTODATE); int is_write = bio_data_dir(bio) == WRITE; + int is_discard = !!(bio->bi_rw & REQ_DISCARD); if (error && __ratelimit(&drbd_ratelimit_state)) drbd_warn(device, "%s: error=%d s=%llus\n", - is_write ? "write" : "read", error, + is_write ? (is_discard ? "discard" : "write") + : "read", error, (unsigned long long)peer_req->i.sector); if (!error && !uptodate) { if (__ratelimit(&drbd_ratelimit_state)) @@ -263,7 +267,12 @@ void drbd_request_endio(struct bio *bio, int error) /* to avoid recursion in __req_mod */ if (unlikely(error)) { - what = (bio_data_dir(bio) == WRITE) + if (bio->bi_rw & REQ_DISCARD) + what = (error == -EOPNOTSUPP) + ? DISCARD_COMPLETED_NOTSUPP + : DISCARD_COMPLETED_WITH_ERROR; + else + what = (bio_data_dir(bio) == WRITE) ? WRITE_COMPLETED_WITH_ERROR : (bio_rw(bio) == READ) ? READ_COMPLETED_WITH_ERROR @@ -395,7 +404,7 @@ static int read_for_csum(struct drbd_peer_device *peer_device, sector_t sector, /* GFP_TRY, because if there is no memory available right now, this may * be rescheduled for later. It is "only" background resync, after all. */ peer_req = drbd_alloc_peer_req(peer_device, ID_SYNCER /* unused */, sector, - size, GFP_TRY); + size, true /* has real payload */, GFP_TRY); if (!peer_req) goto defer; @@ -492,10 +501,9 @@ struct fifo_buffer *fifo_alloc(int fifo_size) return fb; } -static int drbd_rs_controller(struct drbd_device *device) +static int drbd_rs_controller(struct drbd_device *device, unsigned int sect_in) { struct disk_conf *dc; - unsigned int sect_in; /* Number of sectors that came in since the last turn */ unsigned int want; /* The number of sectors we want in the proxy */ int req_sect; /* Number of sectors to request in this turn */ int correction; /* Number of sectors more we need in the proxy*/ @@ -505,9 +513,6 @@ static int drbd_rs_controller(struct drbd_device *device) int max_sect; struct fifo_buffer *plan; - sect_in = atomic_xchg(&device->rs_sect_in, 0); /* Number of sectors that came in */ - device->rs_in_flight -= sect_in; - dc = rcu_dereference(device->ldev->disk_conf); plan = rcu_dereference(device->rs_plan_s); @@ -550,11 +555,16 @@ static int drbd_rs_controller(struct drbd_device *device) static int drbd_rs_number_requests(struct drbd_device *device) { - int number; + unsigned int sect_in; /* Number of sectors that came in since the last turn */ + int number, mxb; + + sect_in = atomic_xchg(&device->rs_sect_in, 0); + device->rs_in_flight -= sect_in; rcu_read_lock(); + mxb = drbd_get_max_buffers(device) / 2; if (rcu_dereference(device->rs_plan_s)->size) { - number = drbd_rs_controller(device) >> (BM_BLOCK_SHIFT - 9); + number = drbd_rs_controller(device, sect_in) >> (BM_BLOCK_SHIFT - 9); device->c_sync_rate = number * HZ * (BM_BLOCK_SIZE / 1024) / SLEEP_TIME; } else { device->c_sync_rate = rcu_dereference(device->ldev->disk_conf)->resync_rate; @@ -562,8 +572,14 @@ static int drbd_rs_number_requests(struct drbd_device *device) } rcu_read_unlock(); - /* ignore the amount of pending requests, the resync controller should - * throttle down to incoming reply rate soon enough anyways. */ + /* Don't have more than "max-buffers"/2 in-flight. + * Otherwise we may cause the remote site to stall on drbd_alloc_pages(), + * potentially causing a distributed deadlock on congestion during + * online-verify or (checksum-based) resync, if max-buffers, + * socket buffer sizes and resync rate settings are mis-configured. */ + if (mxb - device->rs_in_flight < number) + number = mxb - device->rs_in_flight; + return number; } @@ -597,7 +613,7 @@ static int make_resync_request(struct drbd_device *device, int cancel) max_bio_size = queue_max_hw_sectors(device->rq_queue) << 9; number = drbd_rs_number_requests(device); - if (number == 0) + if (number <= 0) goto requeue; for (i = 0; i < number; i++) { @@ -647,7 +663,7 @@ next_sector: */ align = 1; rollback_i = i; - for (;;) { + while (i < number) { if (size + BM_BLOCK_SIZE > max_bio_size) break; @@ -1670,11 +1686,15 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) } clear_bit(B_RS_H_DONE, &device->flags); - write_lock_irq(&global_state_lock); + /* req_lock: serialize with drbd_send_and_submit() and others + * global_state_lock: for stable sync-after dependencies */ + spin_lock_irq(&device->resource->req_lock); + write_lock(&global_state_lock); /* Did some connection breakage or IO error race with us? */ if (device->state.conn < C_CONNECTED || !get_ldev_if_state(device, D_NEGOTIATING)) { - write_unlock_irq(&global_state_lock); + write_unlock(&global_state_lock); + spin_unlock_irq(&device->resource->req_lock); mutex_unlock(device->state_mutex); return; } @@ -1714,7 +1734,8 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) } _drbd_pause_after(device); } - write_unlock_irq(&global_state_lock); + write_unlock(&global_state_lock); + spin_unlock_irq(&device->resource->req_lock); if (r == SS_SUCCESS) { /* reset rs_last_bcast when a resync or verify is started, @@ -1778,34 +1799,6 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) mutex_unlock(device->state_mutex); } -/* If the resource already closed the current epoch, but we did not - * (because we have not yet seen new requests), we should send the - * corresponding barrier now. Must be checked within the same spinlock - * that is used to check for new requests. */ -static bool need_to_send_barrier(struct drbd_connection *connection) -{ - if (!connection->send.seen_any_write_yet) - return false; - - /* Skip barriers that do not contain any writes. - * This may happen during AHEAD mode. */ - if (!connection->send.current_epoch_writes) - return false; - - /* ->req_lock is held when requests are queued on - * connection->sender_work, and put into ->transfer_log. - * It is also held when ->current_tle_nr is increased. - * So either there are already new requests queued, - * and corresponding barriers will be send there. - * Or nothing new is queued yet, so the difference will be 1. - */ - if (atomic_read(&connection->current_tle_nr) != - connection->send.current_epoch_nr + 1) - return false; - - return true; -} - static bool dequeue_work_batch(struct drbd_work_queue *queue, struct list_head *work_list) { spin_lock_irq(&queue->q_lock); @@ -1864,12 +1857,22 @@ static void wait_for_work(struct drbd_connection *connection, struct list_head * spin_unlock_irq(&connection->resource->req_lock); break; } - send_barrier = need_to_send_barrier(connection); + + /* We found nothing new to do, no to-be-communicated request, + * no other work item. We may still need to close the last + * epoch. Next incoming request epoch will be connection -> + * current transfer log epoch number. If that is different + * from the epoch of the last request we communicated, it is + * safe to send the epoch separating barrier now. + */ + send_barrier = + atomic_read(&connection->current_tle_nr) != + connection->send.current_epoch_nr; spin_unlock_irq(&connection->resource->req_lock); - if (send_barrier) { - drbd_send_barrier(connection); - connection->send.current_epoch_nr++; - } + + if (send_barrier) + maybe_send_barrier(connection, + connection->send.current_epoch_nr + 1); schedule(); /* may be woken up for other things but new work, too, * e.g. if the current epoch got closed. diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h deleted file mode 100644 index 3db9ebaf64f..00000000000 --- a/drivers/block/drbd/drbd_wrappers.h +++ /dev/null @@ -1,54 +0,0 @@ -#ifndef _DRBD_WRAPPERS_H -#define _DRBD_WRAPPERS_H - -#include <linux/ctype.h> -#include <linux/mm.h> -#include "drbd_int.h" - -/* see get_sb_bdev and bd_claim */ -extern char *drbd_sec_holder; - -/* sets the number of 512 byte sectors of our virtual device */ -static inline void drbd_set_my_capacity(struct drbd_device *device, - sector_t size) -{ - /* set_capacity(device->this_bdev->bd_disk, size); */ - set_capacity(device->vdisk, size); - device->this_bdev->bd_inode->i_size = (loff_t)size << 9; -} - -#define drbd_bio_uptodate(bio) bio_flagged(bio, BIO_UPTODATE) - -/* bi_end_io handlers */ -extern void drbd_md_io_complete(struct bio *bio, int error); -extern void drbd_peer_request_endio(struct bio *bio, int error); -extern void drbd_request_endio(struct bio *bio, int error); - -/* - * used to submit our private bio - */ -static inline void drbd_generic_make_request(struct drbd_device *device, - int fault_type, struct bio *bio) -{ - __release(local); - if (!bio->bi_bdev) { - printk(KERN_ERR "drbd%d: drbd_generic_make_request: " - "bio->bi_bdev == NULL\n", - device_to_minor(device)); - dump_stack(); - bio_endio(bio, -ENODEV); - return; - } - - if (drbd_insert_fault(device, fault_type)) - bio_endio(bio, -EIO); - else - generic_make_request(bio); -} - -#ifndef __CHECKER__ -# undef __cond_lock -# define __cond_lock(x,c) (c) -#endif - -#endif diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 8f5565bf34c..677db049f55 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -2351,7 +2351,7 @@ static void rw_interrupt(void) } if (CT(COMMAND) != FD_READ || - raw_cmd->kernel_data == current_req->buffer) { + raw_cmd->kernel_data == bio_data(current_req->bio)) { /* transfer directly from buffer */ cont->done(1); } else if (CT(COMMAND) == FD_READ) { @@ -2640,7 +2640,7 @@ static int make_raw_rw_request(void) raw_cmd->flags &= ~FD_RAW_WRITE; raw_cmd->flags |= FD_RAW_READ; COMMAND = FM_MODE(_floppy, FD_READ); - } else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) { + } else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) { unsigned long dma_limit; int direct, indirect; @@ -2654,13 +2654,13 @@ static int make_raw_rw_request(void) */ max_size = buffer_chain_size(); dma_limit = (MAX_DMA_ADDRESS - - ((unsigned long)current_req->buffer)) >> 9; + ((unsigned long)bio_data(current_req->bio))) >> 9; if ((unsigned long)max_size > dma_limit) max_size = dma_limit; /* 64 kb boundaries */ - if (CROSS_64KB(current_req->buffer, max_size << 9)) + if (CROSS_64KB(bio_data(current_req->bio), max_size << 9)) max_size = (K_64 - - ((unsigned long)current_req->buffer) % + ((unsigned long)bio_data(current_req->bio)) % K_64) >> 9; direct = transfer_size(ssize, max_sector, max_size) - fsector_t; /* @@ -2677,7 +2677,7 @@ static int make_raw_rw_request(void) (DP->read_track & (1 << DRS->probed_format)))))) { max_size = blk_rq_sectors(current_req); } else { - raw_cmd->kernel_data = current_req->buffer; + raw_cmd->kernel_data = bio_data(current_req->bio); raw_cmd->length = current_count_sectors << 9; if (raw_cmd->length == 0) { DPRINT("%s: zero dma transfer attempted\n", __func__); @@ -2731,7 +2731,7 @@ static int make_raw_rw_request(void) raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1; raw_cmd->length <<= 9; if ((raw_cmd->length < current_count_sectors << 9) || - (raw_cmd->kernel_data != current_req->buffer && + (raw_cmd->kernel_data != bio_data(current_req->bio) && CT(COMMAND) == FD_WRITE && (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max || aligned_sector_t < buffer_min)) || @@ -2739,7 +2739,7 @@ static int make_raw_rw_request(void) raw_cmd->length <= 0 || current_count_sectors <= 0) { DPRINT("fractionary current count b=%lx s=%lx\n", raw_cmd->length, current_count_sectors); - if (raw_cmd->kernel_data != current_req->buffer) + if (raw_cmd->kernel_data != bio_data(current_req->bio)) pr_info("addr=%d, length=%ld\n", (int)((raw_cmd->kernel_data - floppy_track_buffer) >> 9), @@ -2756,7 +2756,7 @@ static int make_raw_rw_request(void) return 0; } - if (raw_cmd->kernel_data != current_req->buffer) { + if (raw_cmd->kernel_data != bio_data(current_req->bio)) { if (raw_cmd->kernel_data < floppy_track_buffer || current_count_sectors < 0 || raw_cmd->length < 0 || @@ -3067,7 +3067,10 @@ static int raw_cmd_copyout(int cmd, void __user *param, int ret; while (ptr) { - ret = copy_to_user(param, ptr, sizeof(*ptr)); + struct floppy_raw_cmd cmd = *ptr; + cmd.next = NULL; + cmd.kernel_data = NULL; + ret = copy_to_user(param, &cmd, sizeof(cmd)); if (ret) return -EFAULT; param += sizeof(struct floppy_raw_cmd); @@ -3121,10 +3124,11 @@ loop: return -ENOMEM; *rcmd = ptr; ret = copy_from_user(ptr, param, sizeof(*ptr)); - if (ret) - return -EFAULT; ptr->next = NULL; ptr->buffer_length = 0; + ptr->kernel_data = NULL; + if (ret) + return -EFAULT; param += sizeof(struct floppy_raw_cmd); if (ptr->cmd_count > 33) /* the command may now also take up the space @@ -3140,7 +3144,6 @@ loop: for (i = 0; i < 16; i++) ptr->reply[i] = 0; ptr->resultcode = 0; - ptr->kernel_data = NULL; if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) { if (ptr->length <= 0) @@ -3809,7 +3812,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) bio.bi_iter.bi_size = size; bio.bi_bdev = bdev; bio.bi_iter.bi_sector = 0; - bio.bi_flags = (1 << BIO_QUIET); + bio.bi_flags |= (1 << BIO_QUIET); bio.bi_private = &cbdata; bio.bi_end_io = floppy_rb0_cb; diff --git a/drivers/block/hd.c b/drivers/block/hd.c index bf397bf108b..8a290c08262 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c @@ -464,11 +464,11 @@ static void read_intr(void) ok_to_read: req = hd_req; - insw(HD_DATA, req->buffer, 256); + insw(HD_DATA, bio_data(req->bio), 256); #ifdef DEBUG printk("%s: read: sector %ld, remaining = %u, buffer=%p\n", req->rq_disk->disk_name, blk_rq_pos(req) + 1, - blk_rq_sectors(req) - 1, req->buffer+512); + blk_rq_sectors(req) - 1, bio_data(req->bio)+512); #endif if (hd_end_request(0, 512)) { SET_HANDLER(&read_intr); @@ -505,7 +505,7 @@ static void write_intr(void) ok_to_write: if (hd_end_request(0, 512)) { SET_HANDLER(&write_intr); - outsw(HD_DATA, req->buffer, 256); + outsw(HD_DATA, bio_data(req->bio), 256); return; } @@ -624,7 +624,7 @@ repeat: printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n", req->rq_disk->disk_name, req_data_dir(req) == READ ? "read" : "writ", - cyl, head, sec, nsect, req->buffer); + cyl, head, sec, nsect, bio_data(req->bio)); #endif if (req->cmd_type == REQ_TYPE_FS) { switch (rq_data_dir(req)) { @@ -643,7 +643,7 @@ repeat: bad_rw_intr(); goto repeat; } - outsw(HD_DATA, req->buffer, 256); + outsw(HD_DATA, bio_data(req->bio), 256); break; default: printk("unknown hd-command\n"); diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index eb59b124136..e352cac707e 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c @@ -479,7 +479,7 @@ static unsigned int mg_out(struct mg_host *host, static void mg_read_one(struct mg_host *host, struct request *req) { - u16 *buff = (u16 *)req->buffer; + u16 *buff = (u16 *)bio_data(req->bio); u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) @@ -496,7 +496,7 @@ static void mg_read(struct request *req) mg_bad_rw_intr(host); MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", - blk_rq_sectors(req), blk_rq_pos(req), req->buffer); + blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio)); do { if (mg_wait(host, ATA_DRQ, @@ -514,7 +514,7 @@ static void mg_read(struct request *req) static void mg_write_one(struct mg_host *host, struct request *req) { - u16 *buff = (u16 *)req->buffer; + u16 *buff = (u16 *)bio_data(req->bio); u32 i; for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) @@ -534,7 +534,7 @@ static void mg_write(struct request *req) } MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", - rem, blk_rq_pos(req), req->buffer); + rem, blk_rq_pos(req), bio_data(req->bio)); if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { @@ -585,7 +585,7 @@ ok_to_read: mg_read_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", - blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); + blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio)); /* send read confirm */ outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); @@ -624,7 +624,7 @@ ok_to_write: /* write 1 sector and set handler if remains */ mg_write_one(host, req); MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", - blk_rq_pos(req), blk_rq_sectors(req), req->buffer); + blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio)); host->mg_do_intr = mg_write_intr; mod_timer(&host->timer, jiffies + 3 * HZ); } diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 59c5abe32f0..abc858b3528 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -31,6 +31,7 @@ #include <linux/module.h> #include <linux/genhd.h> #include <linux/blkdev.h> +#include <linux/blk-mq.h> #include <linux/bio.h> #include <linux/dma-mapping.h> #include <linux/idr.h> @@ -173,60 +174,34 @@ static bool mtip_check_surprise_removal(struct pci_dev *pdev) return false; /* device present */ } -/* - * Obtain an empty command slot. - * - * This function needs to be reentrant since it could be called - * at the same time on multiple CPUs. The allocation of the - * command slot must be atomic. - * - * @port Pointer to the port data structure. - * - * return value - * >= 0 Index of command slot obtained. - * -1 No command slots available. - */ -static int get_slot(struct mtip_port *port) +static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) { - int slot, i; - unsigned int num_command_slots = port->dd->slot_groups * 32; + struct request *rq; - /* - * Try 10 times, because there is a small race here. - * that's ok, because it's still cheaper than a lock. - * - * Race: Since this section is not protected by lock, same bit - * could be chosen by different process contexts running in - * different processor. So instead of costly lock, we are going - * with loop. - */ - for (i = 0; i < 10; i++) { - slot = find_next_zero_bit(port->allocated, - num_command_slots, 1); - if ((slot < num_command_slots) && - (!test_and_set_bit(slot, port->allocated))) - return slot; - } - dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n"); + rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true); + return blk_mq_rq_to_pdu(rq); +} - mtip_check_surprise_removal(port->dd->pdev); - return -1; +static void mtip_put_int_command(struct driver_data *dd, struct mtip_cmd *cmd) +{ + blk_put_request(blk_mq_rq_from_pdu(cmd)); } /* - * Release a command slot. - * - * @port Pointer to the port data structure. - * @tag Tag of command to release - * - * return value - * None + * Once we add support for one hctx per mtip group, this will change a bit */ -static inline void release_slot(struct mtip_port *port, int tag) +static struct request *mtip_rq_from_tag(struct driver_data *dd, + unsigned int tag) +{ + return blk_mq_tag_to_rq(dd->queue->queue_hw_ctx[0], tag); +} + +static struct mtip_cmd *mtip_cmd_from_tag(struct driver_data *dd, + unsigned int tag) { - smp_mb__before_clear_bit(); - clear_bit(tag, port->allocated); - smp_mb__after_clear_bit(); + struct request *rq = mtip_rq_from_tag(dd, tag); + + return blk_mq_rq_to_pdu(rq); } /* @@ -248,93 +223,28 @@ static inline void release_slot(struct mtip_port *port, int tag) * None */ static void mtip_async_complete(struct mtip_port *port, - int tag, - void *data, - int status) + int tag, struct mtip_cmd *cmd, int status) { - struct mtip_cmd *cmd; - struct driver_data *dd = data; - int unaligned, cb_status = status ? -EIO : 0; - void (*func)(void *, int); + struct driver_data *dd = port->dd; + struct request *rq; if (unlikely(!dd) || unlikely(!port)) return; - cmd = &port->commands[tag]; - if (unlikely(status == PORT_IRQ_TF_ERR)) { dev_warn(&port->dd->pdev->dev, "Command tag %d failed due to TFE\n", tag); } - /* Clear the active flag */ - atomic_set(&port->commands[tag].active, 0); + /* Unmap the DMA scatter list entries */ + dma_unmap_sg(&dd->pdev->dev, cmd->sg, cmd->scatter_ents, cmd->direction); - /* Upper layer callback */ - func = cmd->async_callback; - if (likely(func && cmpxchg(&cmd->async_callback, func, 0) == func)) { + rq = mtip_rq_from_tag(dd, tag); - /* Unmap the DMA scatter list entries */ - dma_unmap_sg(&dd->pdev->dev, - cmd->sg, - cmd->scatter_ents, - cmd->direction); + if (unlikely(cmd->unaligned)) + up(&port->cmd_slot_unal); - func(cmd->async_data, cb_status); - unaligned = cmd->unaligned; - - /* Clear the allocated bit for the command */ - release_slot(port, tag); - - if (unlikely(unaligned)) - up(&port->cmd_slot_unal); - else - up(&port->cmd_slot); - } -} - -/* - * This function is called for clean the pending command in the - * command slot during the surprise removal of device and return - * error to the upper layer. - * - * @dd Pointer to the DRIVER_DATA structure. - * - * return value - * None - */ -static void mtip_command_cleanup(struct driver_data *dd) -{ - int tag = 0; - struct mtip_cmd *cmd; - struct mtip_port *port = dd->port; - unsigned int num_cmd_slots = dd->slot_groups * 32; - - if (!test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) - return; - - if (!port) - return; - - cmd = &port->commands[MTIP_TAG_INTERNAL]; - if (atomic_read(&cmd->active)) - if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & - (1 << MTIP_TAG_INTERNAL)) - if (cmd->comp_func) - cmd->comp_func(port, MTIP_TAG_INTERNAL, - cmd->comp_data, -ENODEV); - - while (1) { - tag = find_next_bit(port->allocated, num_cmd_slots, tag); - if (tag >= num_cmd_slots) - break; - - cmd = &port->commands[tag]; - if (atomic_read(&cmd->active)) - mtip_async_complete(port, tag, dd, -ENODEV); - } - - set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag); + blk_mq_end_io(rq, status ? -EIO : 0); } /* @@ -388,8 +298,6 @@ static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) { int group = tag >> 5; - atomic_set(&port->commands[tag].active, 1); - /* guard SACT and CI registers */ spin_lock(&port->cmd_issue_lock[group]); writel((1 << MTIP_TAG_BIT(tag)), @@ -397,10 +305,6 @@ static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) writel((1 << MTIP_TAG_BIT(tag)), port->cmd_issue[MTIP_TAG_INDEX(tag)]); spin_unlock(&port->cmd_issue_lock[group]); - - /* Set the command's timeout value.*/ - port->commands[tag].comp_time = jiffies + msecs_to_jiffies( - MTIP_NCQ_COMMAND_TIMEOUT_MS); } /* @@ -648,132 +552,13 @@ static void print_tags(struct driver_data *dd, memset(tagmap, 0, sizeof(tagmap)); for (group = SLOTBITS_IN_LONGS; group > 0; group--) - tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ", + tagmap_len += sprintf(tagmap + tagmap_len, "%016lX ", tagbits[group-1]); dev_warn(&dd->pdev->dev, "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap); } /* - * Called periodically to see if any read/write commands are - * taking too long to complete. - * - * @data Pointer to the PORT data structure. - * - * return value - * None - */ -static void mtip_timeout_function(unsigned long int data) -{ - struct mtip_port *port = (struct mtip_port *) data; - struct host_to_dev_fis *fis; - struct mtip_cmd *cmd; - int unaligned, tag, cmdto_cnt = 0; - unsigned int bit, group; - unsigned int num_command_slots; - unsigned long to, tagaccum[SLOTBITS_IN_LONGS]; - void (*func)(void *, int); - - if (unlikely(!port)) - return; - - if (unlikely(port->dd->sr)) - return; - - if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) { - mod_timer(&port->cmd_timer, - jiffies + msecs_to_jiffies(30000)); - return; - } - /* clear the tag accumulator */ - memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); - num_command_slots = port->dd->slot_groups * 32; - - for (tag = 0; tag < num_command_slots; tag++) { - /* - * Skip internal command slot as it has - * its own timeout mechanism - */ - if (tag == MTIP_TAG_INTERNAL) - continue; - - if (atomic_read(&port->commands[tag].active) && - (time_after(jiffies, port->commands[tag].comp_time))) { - group = tag >> 5; - bit = tag & 0x1F; - - cmd = &port->commands[tag]; - fis = (struct host_to_dev_fis *) cmd->command; - - set_bit(tag, tagaccum); - cmdto_cnt++; - if (cmdto_cnt == 1) - set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); - - /* - * Clear the completed bit. This should prevent - * any interrupt handlers from trying to retire - * the command. - */ - writel(1 << bit, port->completed[group]); - - /* Clear the active flag for the command */ - atomic_set(&port->commands[tag].active, 0); - - func = cmd->async_callback; - if (func && - cmpxchg(&cmd->async_callback, func, 0) == func) { - - /* Unmap the DMA scatter list entries */ - dma_unmap_sg(&port->dd->pdev->dev, - cmd->sg, - cmd->scatter_ents, - cmd->direction); - - func(cmd->async_data, -EIO); - unaligned = cmd->unaligned; - - /* Clear the allocated bit for the command. */ - release_slot(port, tag); - - if (unaligned) - up(&port->cmd_slot_unal); - else - up(&port->cmd_slot); - } - } - } - - if (cmdto_cnt) { - print_tags(port->dd, "timed out", tagaccum, cmdto_cnt); - if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { - mtip_device_reset(port->dd); - wake_up_interruptible(&port->svc_wait); - } - clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); - } - - if (port->ic_pause_timer) { - to = port->ic_pause_timer + msecs_to_jiffies(1000); - if (time_after(jiffies, to)) { - if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) { - port->ic_pause_timer = 0; - clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); - clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags); - clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); - wake_up_interruptible(&port->svc_wait); - } - - - } - } - - /* Restart the timer */ - mod_timer(&port->cmd_timer, - jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); -} - -/* * Internal command completion callback function. * * This function is normally called by the driver ISR when an internal @@ -789,28 +574,19 @@ static void mtip_timeout_function(unsigned long int data) * None */ static void mtip_completion(struct mtip_port *port, - int tag, - void *data, - int status) + int tag, struct mtip_cmd *command, int status) { - struct mtip_cmd *command = &port->commands[tag]; - struct completion *waiting = data; + struct completion *waiting = command->comp_data; if (unlikely(status == PORT_IRQ_TF_ERR)) dev_warn(&port->dd->pdev->dev, "Internal command %d completed with TFE\n", tag); - command->async_callback = NULL; - command->comp_func = NULL; - complete(waiting); } static void mtip_null_completion(struct mtip_port *port, - int tag, - void *data, - int status) + int tag, struct mtip_cmd *command, int status) { - return; } static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, @@ -842,19 +618,16 @@ static void mtip_handle_tfe(struct driver_data *dd) port = dd->port; - /* Stop the timer to prevent command timeouts. */ - del_timer(&port->cmd_timer); set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && test_bit(MTIP_TAG_INTERNAL, port->allocated)) { - cmd = &port->commands[MTIP_TAG_INTERNAL]; + cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n"); - atomic_inc(&cmd->active); /* active > 1 indicates error */ if (cmd->comp_data && cmd->comp_func) { cmd->comp_func(port, MTIP_TAG_INTERNAL, - cmd->comp_data, PORT_IRQ_TF_ERR); + cmd, PORT_IRQ_TF_ERR); } goto handle_tfe_exit; } @@ -866,6 +639,8 @@ static void mtip_handle_tfe(struct driver_data *dd) for (group = 0; group < dd->slot_groups; group++) { completed = readl(port->completed[group]); + dev_warn(&dd->pdev->dev, "g=%u, comp=%x\n", group, completed); + /* clear completed status register in the hardware.*/ writel(completed, port->completed[group]); @@ -879,15 +654,11 @@ static void mtip_handle_tfe(struct driver_data *dd) if (tag == MTIP_TAG_INTERNAL) continue; - cmd = &port->commands[tag]; + cmd = mtip_cmd_from_tag(dd, tag); if (likely(cmd->comp_func)) { set_bit(tag, tagaccum); cmd_cnt++; - atomic_set(&cmd->active, 0); - cmd->comp_func(port, - tag, - cmd->comp_data, - 0); + cmd->comp_func(port, tag, cmd, 0); } else { dev_err(&port->dd->pdev->dev, "Missing completion func for tag %d", @@ -947,11 +718,7 @@ static void mtip_handle_tfe(struct driver_data *dd) for (bit = 0; bit < 32; bit++) { reissue = 1; tag = (group << 5) + bit; - cmd = &port->commands[tag]; - - /* If the active bit is set re-issue the command */ - if (atomic_read(&cmd->active) == 0) - continue; + cmd = mtip_cmd_from_tag(dd, tag); fis = (struct host_to_dev_fis *)cmd->command; @@ -970,11 +737,9 @@ static void mtip_handle_tfe(struct driver_data *dd) tag, fail_reason != NULL ? fail_reason : "unknown"); - atomic_set(&cmd->active, 0); if (cmd->comp_func) { cmd->comp_func(port, tag, - cmd->comp_data, - -ENODATA); + cmd, -ENODATA); } continue; } @@ -997,14 +762,9 @@ static void mtip_handle_tfe(struct driver_data *dd) /* Retire a command that will not be reissued */ dev_warn(&port->dd->pdev->dev, "retiring tag %d\n", tag); - atomic_set(&cmd->active, 0); if (cmd->comp_func) - cmd->comp_func( - port, - tag, - cmd->comp_data, - PORT_IRQ_TF_ERR); + cmd->comp_func(port, tag, cmd, PORT_IRQ_TF_ERR); else dev_warn(&port->dd->pdev->dev, "Bad completion for tag %d\n", @@ -1017,9 +777,6 @@ handle_tfe_exit: /* clear eh_active */ clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); wake_up_interruptible(&port->svc_wait); - - mod_timer(&port->cmd_timer, - jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); } /* @@ -1048,15 +805,10 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group, if (unlikely(tag == MTIP_TAG_INTERNAL)) continue; - command = &port->commands[tag]; - /* make internal callback */ - if (likely(command->comp_func)) { - command->comp_func( - port, - tag, - command->comp_data, - 0); - } else { + command = mtip_cmd_from_tag(dd, tag); + if (likely(command->comp_func)) + command->comp_func(port, tag, command, 0); + else { dev_dbg(&dd->pdev->dev, "Null completion for tag %d", tag); @@ -1081,16 +833,13 @@ static inline void mtip_workq_sdbfx(struct mtip_port *port, int group, static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat) { struct mtip_port *port = dd->port; - struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL]; + struct mtip_cmd *cmd = mtip_cmd_from_tag(dd, MTIP_TAG_INTERNAL); if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) && (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL]) & (1 << MTIP_TAG_INTERNAL))) { if (cmd->comp_func) { - cmd->comp_func(port, - MTIP_TAG_INTERNAL, - cmd->comp_data, - 0); + cmd->comp_func(port, MTIP_TAG_INTERNAL, cmd, 0); return; } } @@ -1103,8 +852,6 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat) */ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) { - if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) - mtip_handle_tfe(dd); if (unlikely(port_stat & PORT_IRQ_CONNECT)) { dev_warn(&dd->pdev->dev, @@ -1122,6 +869,12 @@ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat) dev_warn(&dd->pdev->dev, "Port stat errors %x unhandled\n", (port_stat & ~PORT_IRQ_HANDLED)); + if (mtip_check_surprise_removal(dd->pdev)) + return; + } + if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) { + set_bit(MTIP_PF_EH_ACTIVE_BIT, &dd->port->flags); + wake_up_interruptible(&dd->port->svc_wait); } } @@ -1222,7 +975,6 @@ static irqreturn_t mtip_irq_handler(int irq, void *instance) static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag) { - atomic_set(&port->commands[tag].active, 1); writel(1 << MTIP_TAG_BIT(tag), port->cmd_issue[MTIP_TAG_INDEX(tag)]); } @@ -1280,6 +1032,8 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) unsigned int n; unsigned int active = 1; + blk_mq_stop_hw_queues(port->dd->queue); + to = jiffies + msecs_to_jiffies(timeout); do { if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) && @@ -1287,8 +1041,13 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) msleep(20); continue; /* svc thd is actively issuing commands */ } + + msleep(100); + if (mtip_check_surprise_removal(port->dd->pdev)) + goto err_fault; if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag)) - return -EFAULT; + goto err_fault; + /* * Ignore s_active bit 0 of array element 0. * This bit will always be set @@ -1299,11 +1058,13 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) if (!active) break; - - msleep(20); } while (time_before(jiffies, to)); + blk_mq_start_stopped_hw_queues(port->dd->queue, true); return active ? -EBUSY : 0; +err_fault: + blk_mq_start_stopped_hw_queues(port->dd->queue, true); + return -EFAULT; } /* @@ -1335,10 +1096,9 @@ static int mtip_exec_internal_command(struct mtip_port *port, { struct mtip_cmd_sg *command_sg; DECLARE_COMPLETION_ONSTACK(wait); - int rv = 0, ready2go = 1; - struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; - unsigned long to; + struct mtip_cmd *int_cmd; struct driver_data *dd = port->dd; + int rv = 0; /* Make sure the buffer is 8 byte aligned. This is asic specific. */ if (buffer & 0x00000007) { @@ -1346,19 +1106,8 @@ static int mtip_exec_internal_command(struct mtip_port *port, return -EFAULT; } - to = jiffies + msecs_to_jiffies(timeout); - do { - ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL, - port->allocated); - if (ready2go) - break; - mdelay(100); - } while (time_before(jiffies, to)); - if (!ready2go) { - dev_warn(&dd->pdev->dev, - "Internal cmd active. new cmd [%02X]\n", fis->command); - return -EBUSY; - } + int_cmd = mtip_get_int_command(dd); + set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); port->ic_pause_timer = 0; @@ -1368,10 +1117,11 @@ static int mtip_exec_internal_command(struct mtip_port *port, if (atomic == GFP_KERNEL) { if (fis->command != ATA_CMD_STANDBYNOW1) { /* wait for io to complete if non atomic */ - if (mtip_quiesce_io(port, 5000) < 0) { + if (mtip_quiesce_io(port, + MTIP_QUIESCE_IO_TIMEOUT_MS) < 0) { dev_warn(&dd->pdev->dev, "Failed to quiesce IO\n"); - release_slot(port, MTIP_TAG_INTERNAL); + mtip_put_int_command(dd, int_cmd); clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags); wake_up_interruptible(&port->svc_wait); return -EBUSY; @@ -1416,9 +1166,9 @@ static int mtip_exec_internal_command(struct mtip_port *port, if (atomic == GFP_KERNEL) { /* Wait for the command to complete or timeout. */ - if (wait_for_completion_interruptible_timeout( + if ((rv = wait_for_completion_interruptible_timeout( &wait, - msecs_to_jiffies(timeout)) <= 0) { + msecs_to_jiffies(timeout))) <= 0) { if (rv == -ERESTARTSYS) { /* interrupted */ dev_err(&dd->pdev->dev, "Internal command [%02X] was interrupted after %lu ms\n", @@ -1497,8 +1247,7 @@ static int mtip_exec_internal_command(struct mtip_port *port, } exec_ic_exit: /* Clear the allocated and active bits for the internal command. */ - atomic_set(&int_cmd->active, 0); - release_slot(port, MTIP_TAG_INTERNAL); + mtip_put_int_command(dd, int_cmd); if (rv >= 0 && mtip_pause_ncq(port, fis)) { /* NCQ paused */ return rv; @@ -1529,6 +1278,37 @@ static inline void ata_swap_string(u16 *buf, unsigned int len) be16_to_cpus(&buf[i]); } +static void mtip_set_timeout(struct driver_data *dd, + struct host_to_dev_fis *fis, + unsigned int *timeout, u8 erasemode) +{ + switch (fis->command) { + case ATA_CMD_DOWNLOAD_MICRO: + *timeout = 120000; /* 2 minutes */ + break; + case ATA_CMD_SEC_ERASE_UNIT: + case 0xFC: + if (erasemode) + *timeout = ((*(dd->port->identify + 90) * 2) * 60000); + else + *timeout = ((*(dd->port->identify + 89) * 2) * 60000); + break; + case ATA_CMD_STANDBYNOW1: + *timeout = 120000; /* 2 minutes */ + break; + case 0xF7: + case 0xFA: + *timeout = 60000; /* 60 seconds */ + break; + case ATA_CMD_SMART: + *timeout = 15000; /* 15 seconds */ + break; + default: + *timeout = MTIP_IOCTL_CMD_TIMEOUT_MS; + break; + } +} + /* * Request the device identity information. * @@ -1576,7 +1356,7 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer) sizeof(u16) * ATA_ID_WORDS, 0, GFP_KERNEL, - MTIP_INTERNAL_COMMAND_TIMEOUT_MS) + MTIP_INT_CMD_TIMEOUT_MS) < 0) { rv = -1; goto out; @@ -1644,6 +1424,7 @@ static int mtip_standby_immediate(struct mtip_port *port) int rv; struct host_to_dev_fis fis; unsigned long start; + unsigned int timeout; /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); @@ -1651,6 +1432,8 @@ static int mtip_standby_immediate(struct mtip_port *port) fis.opts = 1 << 7; fis.command = ATA_CMD_STANDBYNOW1; + mtip_set_timeout(port->dd, &fis, &timeout, 0); + start = jiffies; rv = mtip_exec_internal_command(port, &fis, @@ -1659,7 +1442,7 @@ static int mtip_standby_immediate(struct mtip_port *port) 0, 0, GFP_ATOMIC, - 15000); + timeout); dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n", jiffies_to_msecs(jiffies - start)); if (rv) @@ -1705,7 +1488,7 @@ static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer, sectors * ATA_SECT_SIZE, 0, GFP_ATOMIC, - MTIP_INTERNAL_COMMAND_TIMEOUT_MS); + MTIP_INT_CMD_TIMEOUT_MS); } /* @@ -1998,6 +1781,7 @@ static int exec_drive_task(struct mtip_port *port, u8 *command) { struct host_to_dev_fis fis; struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG); + unsigned int to; /* Build the FIS. */ memset(&fis, 0, sizeof(struct host_to_dev_fis)); @@ -2011,6 +1795,8 @@ static int exec_drive_task(struct mtip_port *port, u8 *command) fis.cyl_hi = command[5]; fis.device = command[6] & ~0x10; /* Clear the dev bit*/ + mtip_set_timeout(port->dd, &fis, &to, 0); + dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n", __func__, command[0], @@ -2029,7 +1815,7 @@ static int exec_drive_task(struct mtip_port *port, u8 *command) 0, 0, GFP_KERNEL, - MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) { + to) < 0) { return -1; } @@ -2069,6 +1855,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, u8 *buf = NULL; dma_addr_t dma_addr = 0; int rv = 0, xfer_sz = command[3]; + unsigned int to; if (xfer_sz) { if (!user_buffer) @@ -2100,6 +1887,8 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, fis.cyl_hi = 0xC2; } + mtip_set_timeout(port->dd, &fis, &to, 0); + if (xfer_sz) reply = (port->rxfis + RX_FIS_PIO_SETUP); else @@ -2122,7 +1911,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0), 0, GFP_KERNEL, - MTIP_IOCTL_COMMAND_TIMEOUT_MS) + to) < 0) { rv = -EFAULT; goto exit_drive_command; @@ -2202,36 +1991,6 @@ static unsigned int implicit_sector(unsigned char command, } return rv; } -static void mtip_set_timeout(struct driver_data *dd, - struct host_to_dev_fis *fis, - unsigned int *timeout, u8 erasemode) -{ - switch (fis->command) { - case ATA_CMD_DOWNLOAD_MICRO: - *timeout = 120000; /* 2 minutes */ - break; - case ATA_CMD_SEC_ERASE_UNIT: - case 0xFC: - if (erasemode) - *timeout = ((*(dd->port->identify + 90) * 2) * 60000); - else - *timeout = ((*(dd->port->identify + 89) * 2) * 60000); - break; - case ATA_CMD_STANDBYNOW1: - *timeout = 120000; /* 2 minutes */ - break; - case 0xF7: - case 0xFA: - *timeout = 60000; /* 60 seconds */ - break; - case ATA_CMD_SMART: - *timeout = 15000; /* 15 seconds */ - break; - default: - *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; - break; - } -} /* * Executes a taskfile @@ -2606,22 +2365,21 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd, * return value * None */ -static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, - int nsect, int nents, int tag, void *callback, - void *data, int dir, int unaligned) +static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, + struct mtip_cmd *command, int nents, + struct blk_mq_hw_ctx *hctx) { struct host_to_dev_fis *fis; struct mtip_port *port = dd->port; - struct mtip_cmd *command = &port->commands[tag]; - int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - u64 start = sector; + int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + u64 start = blk_rq_pos(rq); + unsigned int nsect = blk_rq_sectors(rq); /* Map the scatter list for DMA access */ nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); command->scatter_ents = nents; - command->unaligned = unaligned; /* * The number of retries for this command before it is * reported as a failure to the upper layers. @@ -2632,8 +2390,10 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, fis = command->command; fis->type = 0x27; fis->opts = 1 << 7; - fis->command = - (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE); + if (rq_data_dir(rq) == READ) + fis->command = ATA_CMD_FPDMA_READ; + else + fis->command = ATA_CMD_FPDMA_WRITE; fis->lba_low = start & 0xFF; fis->lba_mid = (start >> 8) & 0xFF; fis->lba_hi = (start >> 16) & 0xFF; @@ -2643,14 +2403,14 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, fis->device = 1 << 6; fis->features = nsect & 0xFF; fis->features_ex = (nsect >> 8) & 0xFF; - fis->sect_count = ((tag << 3) | (tag >> 5)); + fis->sect_count = ((rq->tag << 3) | (rq->tag >> 5)); fis->sect_cnt_ex = 0; fis->control = 0; fis->res2 = 0; fis->res3 = 0; fill_command_sg(dd, command, nents); - if (unaligned) + if (command->unaligned) fis->device |= 1 << 7; /* Populate the command header */ @@ -2668,81 +2428,17 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector, command->direction = dma_dir; /* - * Set the completion function and data for the command passed - * from the upper layer. - */ - command->async_data = data; - command->async_callback = callback; - - /* * To prevent this command from being issued * if an internal command is in progress or error handling is active. */ if (port->flags & MTIP_PF_PAUSE_IO) { - set_bit(tag, port->cmds_to_issue); + set_bit(rq->tag, port->cmds_to_issue); set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); return; } /* Issue the command to the hardware */ - mtip_issue_ncq_command(port, tag); - - return; -} - -/* - * Release a command slot. - * - * @dd Pointer to the driver data structure. - * @tag Slot tag - * - * return value - * None - */ -static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag, - int unaligned) -{ - struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal : - &dd->port->cmd_slot; - release_slot(dd->port, tag); - up(sem); -} - -/* - * Obtain a command slot and return its associated scatter list. - * - * @dd Pointer to the driver data structure. - * @tag Pointer to an int that will receive the allocated command - * slot tag. - * - * return value - * Pointer to the scatter list for the allocated command slot - * or NULL if no command slots are available. - */ -static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd, - int *tag, int unaligned) -{ - struct semaphore *sem = unaligned ? &dd->port->cmd_slot_unal : - &dd->port->cmd_slot; - - /* - * It is possible that, even with this semaphore, a thread - * may think that no command slots are available. Therefore, we - * need to make an attempt to get_slot(). - */ - down(sem); - *tag = get_slot(dd->port); - - if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { - up(sem); - return NULL; - } - if (unlikely(*tag < 0)) { - up(sem); - return NULL; - } - - return dd->port->commands[*tag].sg; + mtip_issue_ncq_command(port, rq->tag); } /* @@ -3113,6 +2809,7 @@ static int mtip_free_orphan(struct driver_data *dd) if (dd->queue) { dd->queue->queuedata = NULL; blk_cleanup_queue(dd->queue); + blk_mq_free_tag_set(&dd->tags); dd->queue = NULL; } } @@ -3270,6 +2967,11 @@ static int mtip_service_thread(void *data) int ret; while (1) { + if (kthread_should_stop() || + test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) + goto st_out; + clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); + /* * the condition is to check neither an internal command is * is in progress nor error handling is active @@ -3277,11 +2979,12 @@ static int mtip_service_thread(void *data) wait_event_interruptible(port->svc_wait, (port->flags) && !(port->flags & MTIP_PF_PAUSE_IO)); - if (kthread_should_stop()) - goto st_out; - set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); + if (kthread_should_stop() || + test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) + goto st_out; + /* If I am an orphan, start self cleanup */ if (test_bit(MTIP_PF_SR_CLEANUP_BIT, &port->flags)) break; @@ -3290,6 +2993,16 @@ static int mtip_service_thread(void *data) &dd->dd_flag))) goto st_out; +restart_eh: + /* Demux bits: start with error handling */ + if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) { + mtip_handle_tfe(dd); + clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); + } + + if (test_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags)) + goto restart_eh; + if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { slot = 1; /* used to restrict the loop to one iteration */ @@ -3319,16 +3032,14 @@ static int mtip_service_thread(void *data) } clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); - } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { + } + + if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) { if (mtip_ftl_rebuild_poll(dd) < 0) set_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag); clear_bit(MTIP_PF_REBUILD_BIT, &port->flags); } - clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags); - - if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags)) - goto st_out; } /* wait for pci remove to exit */ @@ -3365,7 +3076,6 @@ st_out: */ static void mtip_dma_free(struct driver_data *dd) { - int i; struct mtip_port *port = dd->port; if (port->block1) @@ -3376,13 +3086,6 @@ static void mtip_dma_free(struct driver_data *dd) dmam_free_coherent(&dd->pdev->dev, AHCI_CMD_TBL_SZ, port->command_list, port->command_list_dma); } - - for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) { - if (port->commands[i].command) - dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, - port->commands[i].command, - port->commands[i].command_dma); - } } /* @@ -3396,8 +3099,6 @@ static void mtip_dma_free(struct driver_data *dd) static int mtip_dma_alloc(struct driver_data *dd) { struct mtip_port *port = dd->port; - int i, rv = 0; - u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; /* Allocate dma memory for RX Fis, Identify, and Sector Bufffer */ port->block1 = @@ -3430,41 +3131,63 @@ static int mtip_dma_alloc(struct driver_data *dd) port->smart_buf = port->block1 + AHCI_SMARTBUF_OFFSET; port->smart_buf_dma = port->block1_dma + AHCI_SMARTBUF_OFFSET; - /* Setup per command SGL DMA region */ - - /* Point the command headers at the command tables */ - for (i = 0; i < MTIP_MAX_COMMAND_SLOTS; i++) { - port->commands[i].command = - dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, - &port->commands[i].command_dma, GFP_KERNEL); - if (!port->commands[i].command) { - rv = -ENOMEM; - mtip_dma_free(dd); - return rv; - } - memset(port->commands[i].command, 0, CMD_DMA_ALLOC_SZ); - - port->commands[i].command_header = port->command_list + - (sizeof(struct mtip_cmd_hdr) * i); - port->commands[i].command_header_dma = - dd->port->command_list_dma + - (sizeof(struct mtip_cmd_hdr) * i); + return 0; +} - if (host_cap_64) - port->commands[i].command_header->ctbau = - __force_bit2int cpu_to_le32( - (port->commands[i].command_dma >> 16) >> 16); +static int mtip_hw_get_identify(struct driver_data *dd) +{ + struct smart_attr attr242; + unsigned char *buf; + int rv; - port->commands[i].command_header->ctba = - __force_bit2int cpu_to_le32( - port->commands[i].command_dma & 0xFFFFFFFF); + if (mtip_get_identify(dd->port, NULL) < 0) + return -EFAULT; - sg_init_table(port->commands[i].sg, MTIP_MAX_SG); + if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == + MTIP_FTL_REBUILD_MAGIC) { + set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); + return MTIP_FTL_REBUILD_MAGIC; + } + mtip_dump_identify(dd->port); - /* Mark command as currently inactive */ - atomic_set(&dd->port->commands[i].active, 0); + /* check write protect, over temp and rebuild statuses */ + rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, + dd->port->log_buf, + dd->port->log_buf_dma, 1); + if (rv) { + dev_warn(&dd->pdev->dev, + "Error in READ LOG EXT (10h) command\n"); + /* non-critical error, don't fail the load */ + } else { + buf = (unsigned char *)dd->port->log_buf; + if (buf[259] & 0x1) { + dev_info(&dd->pdev->dev, + "Write protect bit is set.\n"); + set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); + } + if (buf[288] == 0xF7) { + dev_info(&dd->pdev->dev, + "Exceeded Tmax, drive in thermal shutdown.\n"); + set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); + } + if (buf[288] == 0xBF) { + dev_info(&dd->pdev->dev, + "Drive indicates rebuild has failed.\n"); + /* TODO */ + } } - return 0; + + /* get write protect progess */ + memset(&attr242, 0, sizeof(struct smart_attr)); + if (mtip_get_smart_attr(dd->port, 242, &attr242)) + dev_warn(&dd->pdev->dev, + "Unable to check write protect progress\n"); + else + dev_info(&dd->pdev->dev, + "Write protect progress: %u%% (%u blocks)\n", + attr242.cur, le32_to_cpu(attr242.data)); + + return rv; } /* @@ -3481,8 +3204,6 @@ static int mtip_hw_init(struct driver_data *dd) int rv; unsigned int num_command_slots; unsigned long timeout, timetaken; - unsigned char *buf; - struct smart_attr attr242; dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR]; @@ -3513,8 +3234,6 @@ static int mtip_hw_init(struct driver_data *dd) else dd->unal_qdepth = 0; - /* Counting semaphore to track command slot usage */ - sema_init(&dd->port->cmd_slot, num_command_slots - 1 - dd->unal_qdepth); sema_init(&dd->port->cmd_slot_unal, dd->unal_qdepth); /* Spinlock to prevent concurrent issue */ @@ -3599,73 +3318,16 @@ static int mtip_hw_init(struct driver_data *dd) writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN, dd->mmio + HOST_CTL); - init_timer(&dd->port->cmd_timer); init_waitqueue_head(&dd->port->svc_wait); - dd->port->cmd_timer.data = (unsigned long int) dd->port; - dd->port->cmd_timer.function = mtip_timeout_function; - mod_timer(&dd->port->cmd_timer, - jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); - - if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) { rv = -EFAULT; goto out3; } - if (mtip_get_identify(dd->port, NULL) < 0) { - rv = -EFAULT; - goto out3; - } - mtip_dump_identify(dd->port); - - if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == - MTIP_FTL_REBUILD_MAGIC) { - set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags); - return MTIP_FTL_REBUILD_MAGIC; - } - - /* check write protect, over temp and rebuild statuses */ - rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ, - dd->port->log_buf, - dd->port->log_buf_dma, 1); - if (rv) { - dev_warn(&dd->pdev->dev, - "Error in READ LOG EXT (10h) command\n"); - /* non-critical error, don't fail the load */ - } else { - buf = (unsigned char *)dd->port->log_buf; - if (buf[259] & 0x1) { - dev_info(&dd->pdev->dev, - "Write protect bit is set.\n"); - set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag); - } - if (buf[288] == 0xF7) { - dev_info(&dd->pdev->dev, - "Exceeded Tmax, drive in thermal shutdown.\n"); - set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag); - } - if (buf[288] == 0xBF) { - dev_info(&dd->pdev->dev, - "Drive is in security locked state.\n"); - set_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag); - } - } - - /* get write protect progess */ - memset(&attr242, 0, sizeof(struct smart_attr)); - if (mtip_get_smart_attr(dd->port, 242, &attr242)) - dev_warn(&dd->pdev->dev, - "Unable to check write protect progress\n"); - else - dev_info(&dd->pdev->dev, - "Write protect progress: %u%% (%u blocks)\n", - attr242.cur, le32_to_cpu(attr242.data)); return rv; out3: - del_timer_sync(&dd->port->cmd_timer); - /* Disable interrupts on the HBA. */ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN, dd->mmio + HOST_CTL); @@ -3685,6 +3347,22 @@ out1: return rv; } +static void mtip_standby_drive(struct driver_data *dd) +{ + if (dd->sr) + return; + + /* + * Send standby immediate (E0h) to the drive so that it + * saves its state. + */ + if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) && + !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) + if (mtip_standby_immediate(dd->port)) + dev_warn(&dd->pdev->dev, + "STANDBY IMMEDIATE failed\n"); +} + /* * Called to deinitialize an interface. * @@ -3700,12 +3378,6 @@ static int mtip_hw_exit(struct driver_data *dd) * saves its state. */ if (!dd->sr) { - if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags) && - !test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag)) - if (mtip_standby_immediate(dd->port)) - dev_warn(&dd->pdev->dev, - "STANDBY IMMEDIATE failed\n"); - /* de-initialize the port. */ mtip_deinit_port(dd->port); @@ -3714,8 +3386,6 @@ static int mtip_hw_exit(struct driver_data *dd) dd->mmio + HOST_CTL); } - del_timer_sync(&dd->port->cmd_timer); - /* Release the IRQ. */ irq_set_affinity_hint(dd->pdev->irq, NULL); devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd); @@ -4032,100 +3702,138 @@ static const struct block_device_operations mtip_block_ops = { * * @queue Pointer to the request queue. Unused other than to obtain * the driver data structure. - * @bio Pointer to the BIO. + * @rq Pointer to the request. * */ -static void mtip_make_request(struct request_queue *queue, struct bio *bio) +static int mtip_submit_request(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct driver_data *dd = queue->queuedata; - struct scatterlist *sg; - struct bio_vec bvec; - struct bvec_iter iter; - int nents = 0; - int tag = 0, unaligned = 0; + struct driver_data *dd = hctx->queue->queuedata; + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); + unsigned int nents; if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) { - bio_endio(bio, -ENXIO); - return; + return -ENXIO; } if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) { - bio_endio(bio, -ENODATA); - return; + return -ENODATA; } if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag) && - bio_data_dir(bio))) { - bio_endio(bio, -ENODATA); - return; - } - if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) { - bio_endio(bio, -ENODATA); - return; - } - if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) { - bio_endio(bio, -ENXIO); - return; + rq_data_dir(rq))) { + return -ENODATA; } + if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) + return -ENODATA; + if (test_bit(MTIP_DDF_REBUILD_FAILED_BIT, &dd->dd_flag)) + return -ENXIO; } - if (unlikely(bio->bi_rw & REQ_DISCARD)) { - bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector, - bio_sectors(bio))); - return; - } + if (rq->cmd_flags & REQ_DISCARD) { + int err; - if (unlikely(!bio_has_data(bio))) { - blk_queue_flush(queue, 0); - bio_endio(bio, 0); - return; + err = mtip_send_trim(dd, blk_rq_pos(rq), blk_rq_sectors(rq)); + blk_mq_end_io(rq, err); + return 0; } - if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && - dd->unal_qdepth) { - if (bio->bi_iter.bi_sector % 8 != 0) - /* Unaligned on 4k boundaries */ - unaligned = 1; - else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ - unaligned = 1; + /* Create the scatter list for this request. */ + nents = blk_rq_map_sg(hctx->queue, rq, cmd->sg); + + /* Issue the read/write. */ + mtip_hw_submit_io(dd, rq, cmd, nents, hctx); + return 0; +} + +static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, + struct request *rq) +{ + struct driver_data *dd = hctx->queue->queuedata; + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); + + if (!dd->unal_qdepth || rq_data_dir(rq) == READ) + return false; + + /* + * If unaligned depth must be limited on this controller, mark it + * as unaligned if the IO isn't on a 4k boundary (start of length). + */ + if (blk_rq_sectors(rq) <= 64) { + if ((blk_rq_pos(rq) & 7) || (blk_rq_sectors(rq) & 7)) + cmd->unaligned = 1; } - sg = mtip_hw_get_scatterlist(dd, &tag, unaligned); - if (likely(sg != NULL)) { - blk_queue_bounce(queue, &bio); + if (cmd->unaligned && down_trylock(&dd->port->cmd_slot_unal)) + return true; - if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) { - dev_warn(&dd->pdev->dev, - "Maximum number of SGL entries exceeded\n"); - bio_io_error(bio); - mtip_hw_release_scatterlist(dd, tag, unaligned); - return; - } + return false; +} - /* Create the scatter list for this bio. */ - bio_for_each_segment(bvec, bio, iter) { - sg_set_page(&sg[nents], - bvec.bv_page, - bvec.bv_len, - bvec.bv_offset); - nents++; - } +static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) +{ + int ret; - /* Issue the read/write. */ - mtip_hw_submit_io(dd, - bio->bi_iter.bi_sector, - bio_sectors(bio), - nents, - tag, - bio_endio, - bio, - bio_data_dir(bio), - unaligned); - } else - bio_io_error(bio); + if (mtip_check_unal_depth(hctx, rq)) + return BLK_MQ_RQ_QUEUE_BUSY; + + ret = mtip_submit_request(hctx, rq); + if (!ret) + return BLK_MQ_RQ_QUEUE_OK; + + rq->errors = ret; + return BLK_MQ_RQ_QUEUE_ERROR; +} + +static void mtip_free_cmd(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int request_idx) +{ + struct driver_data *dd = data; + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); + + if (!cmd->command) + return; + + dmam_free_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, + cmd->command, cmd->command_dma); +} + +static int mtip_init_cmd(void *data, struct request *rq, unsigned int hctx_idx, + unsigned int request_idx, unsigned int numa_node) +{ + struct driver_data *dd = data; + struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); + u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; + + cmd->command = dmam_alloc_coherent(&dd->pdev->dev, CMD_DMA_ALLOC_SZ, + &cmd->command_dma, GFP_KERNEL); + if (!cmd->command) + return -ENOMEM; + + memset(cmd->command, 0, CMD_DMA_ALLOC_SZ); + + /* Point the command headers at the command tables. */ + cmd->command_header = dd->port->command_list + + (sizeof(struct mtip_cmd_hdr) * request_idx); + cmd->command_header_dma = dd->port->command_list_dma + + (sizeof(struct mtip_cmd_hdr) * request_idx); + + if (host_cap_64) + cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16); + + cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); + + sg_init_table(cmd->sg, MTIP_MAX_SG); + return 0; } +static struct blk_mq_ops mtip_mq_ops = { + .queue_rq = mtip_queue_rq, + .map_queue = blk_mq_map_queue, + .init_request = mtip_init_cmd, + .exit_request = mtip_free_cmd, +}; + /* * Block layer initialization function. * @@ -4148,11 +3856,7 @@ static int mtip_block_initialize(struct driver_data *dd) if (dd->disk) goto skip_create_disk; /* hw init done, before rebuild */ - /* Initialize the protocol layer. */ - wait_for_rebuild = mtip_hw_init(dd); - if (wait_for_rebuild < 0) { - dev_err(&dd->pdev->dev, - "Protocol layer initialization failed\n"); + if (mtip_hw_init(dd)) { rv = -EINVAL; goto protocol_init_error; } @@ -4194,29 +3898,53 @@ static int mtip_block_initialize(struct driver_data *dd) mtip_hw_debugfs_init(dd); - /* - * if rebuild pending, start the service thread, and delay the block - * queue creation and add_disk() - */ - if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) - goto start_service_thread; - skip_create_disk: - /* Allocate the request queue. */ - dd->queue = blk_alloc_queue_node(GFP_KERNEL, dd->numa_node); - if (dd->queue == NULL) { + memset(&dd->tags, 0, sizeof(dd->tags)); + dd->tags.ops = &mtip_mq_ops; + dd->tags.nr_hw_queues = 1; + dd->tags.queue_depth = MTIP_MAX_COMMAND_SLOTS; + dd->tags.reserved_tags = 1; + dd->tags.cmd_size = sizeof(struct mtip_cmd); + dd->tags.numa_node = dd->numa_node; + dd->tags.flags = BLK_MQ_F_SHOULD_MERGE; + dd->tags.driver_data = dd; + + rv = blk_mq_alloc_tag_set(&dd->tags); + if (rv) { dev_err(&dd->pdev->dev, "Unable to allocate request queue\n"); rv = -ENOMEM; goto block_queue_alloc_init_error; } - /* Attach our request function to the request queue. */ - blk_queue_make_request(dd->queue, mtip_make_request); + /* Allocate the request queue. */ + dd->queue = blk_mq_init_queue(&dd->tags); + if (IS_ERR(dd->queue)) { + dev_err(&dd->pdev->dev, + "Unable to allocate request queue\n"); + rv = -ENOMEM; + goto block_queue_alloc_init_error; + } dd->disk->queue = dd->queue; dd->queue->queuedata = dd; + /* Initialize the protocol layer. */ + wait_for_rebuild = mtip_hw_get_identify(dd); + if (wait_for_rebuild < 0) { + dev_err(&dd->pdev->dev, + "Protocol layer initialization failed\n"); + rv = -EINVAL; + goto init_hw_cmds_error; + } + + /* + * if rebuild pending, start the service thread, and delay the block + * queue creation and add_disk() + */ + if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC) + goto start_service_thread; + /* Set device limits. */ set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); blk_queue_max_segments(dd->queue, MTIP_MAX_SG); @@ -4295,8 +4023,9 @@ kthread_run_error: del_gendisk(dd->disk); read_capacity_error: +init_hw_cmds_error: blk_cleanup_queue(dd->queue); - + blk_mq_free_tag_set(&dd->tags); block_queue_alloc_init_error: mtip_hw_debugfs_exit(dd); disk_index_error: @@ -4345,6 +4074,9 @@ static int mtip_block_remove(struct driver_data *dd) kobject_put(kobj); } } + + mtip_standby_drive(dd); + /* * Delete our gendisk structure. This also removes the device * from /dev @@ -4357,6 +4089,7 @@ static int mtip_block_remove(struct driver_data *dd) if (dd->disk->queue) { del_gendisk(dd->disk); blk_cleanup_queue(dd->queue); + blk_mq_free_tag_set(&dd->tags); dd->queue = NULL; } else put_disk(dd->disk); @@ -4391,6 +4124,8 @@ static int mtip_block_remove(struct driver_data *dd) */ static int mtip_block_shutdown(struct driver_data *dd) { + mtip_hw_shutdown(dd); + /* Delete our gendisk structure, and cleanup the blk queue. */ if (dd->disk) { dev_info(&dd->pdev->dev, @@ -4399,6 +4134,7 @@ static int mtip_block_shutdown(struct driver_data *dd) if (dd->disk->queue) { del_gendisk(dd->disk); blk_cleanup_queue(dd->queue); + blk_mq_free_tag_set(&dd->tags); } else put_disk(dd->disk); dd->disk = NULL; @@ -4408,8 +4144,6 @@ static int mtip_block_shutdown(struct driver_data *dd) spin_lock(&rssd_index_lock); ida_remove(&rssd_index_ida, dd->index); spin_unlock(&rssd_index_lock); - - mtip_hw_shutdown(dd); return 0; } @@ -4479,6 +4213,57 @@ static DEFINE_HANDLER(5); static DEFINE_HANDLER(6); static DEFINE_HANDLER(7); +static void mtip_disable_link_opts(struct driver_data *dd, struct pci_dev *pdev) +{ + int pos; + unsigned short pcie_dev_ctrl; + + pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); + if (pos) { + pci_read_config_word(pdev, + pos + PCI_EXP_DEVCTL, + &pcie_dev_ctrl); + if (pcie_dev_ctrl & (1 << 11) || + pcie_dev_ctrl & (1 << 4)) { + dev_info(&dd->pdev->dev, + "Disabling ERO/No-Snoop on bridge device %04x:%04x\n", + pdev->vendor, pdev->device); + pcie_dev_ctrl &= ~(PCI_EXP_DEVCTL_NOSNOOP_EN | + PCI_EXP_DEVCTL_RELAX_EN); + pci_write_config_word(pdev, + pos + PCI_EXP_DEVCTL, + pcie_dev_ctrl); + } + } +} + +static void mtip_fix_ero_nosnoop(struct driver_data *dd, struct pci_dev *pdev) +{ + /* + * This workaround is specific to AMD/ATI chipset with a PCI upstream + * device with device id 0x5aXX + */ + if (pdev->bus && pdev->bus->self) { + if (pdev->bus->self->vendor == PCI_VENDOR_ID_ATI && + ((pdev->bus->self->device & 0xff00) == 0x5a00)) { + mtip_disable_link_opts(dd, pdev->bus->self); + } else { + /* Check further up the topology */ + struct pci_dev *parent_dev = pdev->bus->self; + if (parent_dev->bus && + parent_dev->bus->parent && + parent_dev->bus->parent->self && + parent_dev->bus->parent->self->vendor == + PCI_VENDOR_ID_ATI && + (parent_dev->bus->parent->self->device & + 0xff00) == 0x5a00) { + mtip_disable_link_opts(dd, + parent_dev->bus->parent->self); + } + } + } +} + /* * Called for each supported PCI device detected. * @@ -4630,6 +4415,8 @@ static int mtip_pci_probe(struct pci_dev *pdev, goto msi_initialize_err; } + mtip_fix_ero_nosnoop(dd, pdev); + /* Initialize the block layer. */ rv = mtip_block_initialize(dd); if (rv < 0) { @@ -4710,8 +4497,6 @@ static void mtip_pci_remove(struct pci_dev *pdev) dev_warn(&dd->pdev->dev, "Completion workers still active!\n"); } - /* Cleanup the outstanding commands */ - mtip_command_cleanup(dd); /* Clean up the block layer. */ mtip_block_remove(dd); @@ -4737,8 +4522,6 @@ static void mtip_pci_remove(struct pci_dev *pdev) pcim_iounmap_regions(pdev, 1 << MTIP_ABAR); pci_set_drvdata(pdev, NULL); - pci_dev_put(pdev); - } /* @@ -4935,13 +4718,13 @@ static int __init mtip_init(void) */ static void __exit mtip_exit(void) { - debugfs_remove_recursive(dfs_parent); - /* Release the allocated major block device number. */ unregister_blkdev(mtip_major, MTIP_DRV_NAME); /* Unregister the PCI driver. */ pci_unregister_driver(&mtip_pci_driver); + + debugfs_remove_recursive(dfs_parent); } MODULE_AUTHOR("Micron Technology, Inc"); diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index ffb955e7ccb..4b9b554234b 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -40,9 +40,11 @@ #define MTIP_MAX_RETRIES 2 /* Various timeout values in ms */ -#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000 -#define MTIP_IOCTL_COMMAND_TIMEOUT_MS 5000 -#define MTIP_INTERNAL_COMMAND_TIMEOUT_MS 5000 +#define MTIP_NCQ_CMD_TIMEOUT_MS 15000 +#define MTIP_IOCTL_CMD_TIMEOUT_MS 5000 +#define MTIP_INT_CMD_TIMEOUT_MS 5000 +#define MTIP_QUIESCE_IO_TIMEOUT_MS (MTIP_NCQ_CMD_TIMEOUT_MS * \ + (MTIP_MAX_RETRIES + 1)) /* check for timeouts every 500ms */ #define MTIP_TIMEOUT_CHECK_PERIOD 500 @@ -331,12 +333,8 @@ struct mtip_cmd { */ void (*comp_func)(struct mtip_port *port, int tag, - void *data, + struct mtip_cmd *cmd, int status); - /* Additional callback function that may be called by comp_func() */ - void (*async_callback)(void *data, int status); - - void *async_data; /* Addl. data passed to async_callback() */ int scatter_ents; /* Number of scatter list entries used */ @@ -347,10 +345,6 @@ struct mtip_cmd { int retries; /* The number of retries left for this command. */ int direction; /* Data transfer direction */ - - unsigned long comp_time; /* command completion time, in jiffies */ - - atomic_t active; /* declares if this command sent to the drive. */ }; /* Structure used to describe a port. */ @@ -436,12 +430,6 @@ struct mtip_port { * or error handling is active */ unsigned long cmds_to_issue[SLOTBITS_IN_LONGS]; - /* - * Array of command slots. Structure includes pointers to the - * command header and command table, and completion function and data - * pointers. - */ - struct mtip_cmd commands[MTIP_MAX_COMMAND_SLOTS]; /* Used by mtip_service_thread to wait for an event */ wait_queue_head_t svc_wait; /* @@ -452,13 +440,7 @@ struct mtip_port { /* * Timer used to complete commands that have been active for too long. */ - struct timer_list cmd_timer; unsigned long ic_pause_timer; - /* - * Semaphore used to block threads if there are no - * command slots available. - */ - struct semaphore cmd_slot; /* Semaphore to control queue depth of unaligned IOs */ struct semaphore cmd_slot_unal; @@ -485,6 +467,8 @@ struct driver_data { struct request_queue *queue; /* Our request queue. */ + struct blk_mq_tag_set tags; /* blk_mq tags */ + struct mtip_port *port; /* Pointer to the port data structure. */ unsigned product_type; /* magic value declaring the product type */ diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 091b9ea14fe..77087a29b12 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -32,6 +32,7 @@ struct nullb { unsigned int index; struct request_queue *q; struct gendisk *disk; + struct blk_mq_tag_set tag_set; struct hrtimer timer; unsigned int queue_depth; spinlock_t lock; @@ -202,8 +203,8 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) entry = llist_reverse_order(entry); do { cmd = container_of(entry, struct nullb_cmd, ll_list); - end_cmd(cmd); entry = entry->next; + end_cmd(cmd); } while (entry); } @@ -226,7 +227,7 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd) static void null_softirq_done_fn(struct request *rq) { - end_cmd(rq->special); + end_cmd(blk_mq_rq_to_pdu(rq)); } static inline void null_handle_cmd(struct nullb_cmd *cmd) @@ -311,7 +312,7 @@ static void null_request_fn(struct request_queue *q) static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) { - struct nullb_cmd *cmd = rq->special; + struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); cmd->rq = rq; cmd->nq = hctx->driver_data; @@ -320,46 +321,6 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) return BLK_MQ_RQ_QUEUE_OK; } -static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) -{ - int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes); - int tip = (reg->nr_hw_queues % nr_online_nodes); - int node = 0, i, n; - - /* - * Split submit queues evenly wrt to the number of nodes. If uneven, - * fill the first buckets with one extra, until the rest is filled with - * no extra. - */ - for (i = 0, n = 1; i < hctx_index; i++, n++) { - if (n % b_size == 0) { - n = 0; - node++; - - tip--; - if (!tip) - b_size = reg->nr_hw_queues / nr_online_nodes; - } - } - - /* - * A node might not be online, therefore map the relative node id to the - * real node id. - */ - for_each_online_node(n) { - if (!node) - break; - node--; - } - - return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n); -} - -static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) -{ - kfree(hctx); -} - static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) { BUG_ON(!nullb); @@ -389,19 +350,14 @@ static struct blk_mq_ops null_mq_ops = { .complete = null_softirq_done_fn, }; -static struct blk_mq_reg null_mq_reg = { - .ops = &null_mq_ops, - .queue_depth = 64, - .cmd_size = sizeof(struct nullb_cmd), - .flags = BLK_MQ_F_SHOULD_MERGE, -}; - static void null_del_dev(struct nullb *nullb) { list_del_init(&nullb->list); del_gendisk(nullb->disk); blk_cleanup_queue(nullb->q); + if (queue_mode == NULL_Q_MQ) + blk_mq_free_tag_set(&nullb->tag_set); put_disk(nullb->disk); kfree(nullb); } @@ -506,7 +462,7 @@ static int null_add_dev(void) nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); if (!nullb) - return -ENOMEM; + goto out; spin_lock_init(&nullb->lock); @@ -514,49 +470,44 @@ static int null_add_dev(void) submit_queues = nr_online_nodes; if (setup_queues(nullb)) - goto err; + goto out_free_nullb; if (queue_mode == NULL_Q_MQ) { - null_mq_reg.numa_node = home_node; - null_mq_reg.queue_depth = hw_queue_depth; - null_mq_reg.nr_hw_queues = submit_queues; - - if (use_per_node_hctx) { - null_mq_reg.ops->alloc_hctx = null_alloc_hctx; - null_mq_reg.ops->free_hctx = null_free_hctx; - } else { - null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue; - null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue; - } - - nullb->q = blk_mq_init_queue(&null_mq_reg, nullb); + nullb->tag_set.ops = &null_mq_ops; + nullb->tag_set.nr_hw_queues = submit_queues; + nullb->tag_set.queue_depth = hw_queue_depth; + nullb->tag_set.numa_node = home_node; + nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); + nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + nullb->tag_set.driver_data = nullb; + + if (blk_mq_alloc_tag_set(&nullb->tag_set)) + goto out_cleanup_queues; + + nullb->q = blk_mq_init_queue(&nullb->tag_set); + if (!nullb->q) + goto out_cleanup_tags; } else if (queue_mode == NULL_Q_BIO) { nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); + if (!nullb->q) + goto out_cleanup_queues; blk_queue_make_request(nullb->q, null_queue_bio); init_driver_queues(nullb); } else { nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); + if (!nullb->q) + goto out_cleanup_queues; blk_queue_prep_rq(nullb->q, null_rq_prep_fn); - if (nullb->q) - blk_queue_softirq_done(nullb->q, null_softirq_done_fn); + blk_queue_softirq_done(nullb->q, null_softirq_done_fn); init_driver_queues(nullb); } - if (!nullb->q) - goto queue_fail; - nullb->q->queuedata = nullb; queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); disk = nullb->disk = alloc_disk_node(1, home_node); - if (!disk) { -queue_fail: - blk_cleanup_queue(nullb->q); - cleanup_queues(nullb); -err: - kfree(nullb); - return -ENOMEM; - } + if (!disk) + goto out_cleanup_blk_queue; mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); @@ -579,6 +530,18 @@ err: sprintf(disk->disk_name, "nullb%d", nullb->index); add_disk(disk); return 0; + +out_cleanup_blk_queue: + blk_cleanup_queue(nullb->q); +out_cleanup_tags: + if (queue_mode == NULL_Q_MQ) + blk_mq_free_tag_set(&nullb->tag_set); +out_cleanup_queues: + cleanup_queues(nullb); +out_free_nullb: + kfree(nullb); +out: + return -ENOMEM; } static int __init null_init(void) diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 7c64fa756cc..a842c71dcc2 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -2775,6 +2775,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) return result; } +static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) +{ + struct nvme_dev *dev = pci_get_drvdata(pdev); + + if (prepare) + nvme_dev_shutdown(dev); + else + nvme_dev_resume(dev); +} + static void nvme_shutdown(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); @@ -2839,6 +2849,7 @@ static const struct pci_error_handlers nvme_err_handler = { .link_reset = nvme_link_reset, .slot_reset = nvme_slot_reset, .resume = nvme_error_resume, + .reset_notify = nvme_reset_notify, }; /* Move to pci_ids.h later */ diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index e76bdc074db..719cb1bc164 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -747,7 +747,7 @@ static void do_pcd_request(struct request_queue * q) pcd_current = cd; pcd_sector = blk_rq_pos(pcd_req); pcd_count = blk_rq_cur_sectors(pcd_req); - pcd_buf = pcd_req->buffer; + pcd_buf = bio_data(pcd_req->bio); pcd_busy = 1; ps_set_intr(do_pcd_read, NULL, 0, nice); return; diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 19ad8f0c83e..fea7e76a00d 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -454,7 +454,7 @@ static enum action do_pd_io_start(void) if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) return Fail; pd_run = blk_rq_sectors(pd_req); - pd_buf = pd_req->buffer; + pd_buf = bio_data(pd_req->bio); pd_retries = 0; if (pd_cmd == READ) return do_pd_read_start(); @@ -485,7 +485,7 @@ static int pd_next_buf(void) spin_lock_irqsave(&pd_lock, saved_flags); __blk_end_request_cur(pd_req, 0); pd_count = blk_rq_cur_sectors(pd_req); - pd_buf = pd_req->buffer; + pd_buf = bio_data(pd_req->bio); spin_unlock_irqrestore(&pd_lock, saved_flags); return 0; } diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index f5c86d523ba..9a15fd3c934 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -795,7 +795,7 @@ repeat: } pf_cmd = rq_data_dir(pf_req); - pf_buf = pf_req->buffer; + pf_buf = bio_data(pf_req->bio); pf_retries = 0; pf_busy = 1; @@ -827,7 +827,7 @@ static int pf_next_buf(void) if (!pf_req) return 1; pf_count = blk_rq_cur_sectors(pf_req); - pf_buf = pf_req->buffer; + pf_buf = bio_data(pf_req->bio); } return 0; } diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index a69dd93d1bd..608532d3f8c 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -563,7 +563,6 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req, req = skreq->req; blk_add_request_payload(req, page, len); - req->buffer = buf; } static void skd_request_fn_not_online(struct request_queue *q); @@ -744,6 +743,7 @@ static void skd_request_fn(struct request_queue *q) break; } skreq->discard_page = 1; + req->completion_data = page; skd_prep_discard_cdb(scsi_req, skreq, page, lba, count); } else if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) { @@ -858,8 +858,7 @@ static void skd_end_request(struct skd_device *skdev, (skreq->discard_page == 1)) { pr_debug("%s:%s:%d, free the page!", skdev->name, __func__, __LINE__); - free_page((unsigned long)req->buffer); - req->buffer = NULL; + __free_page(req->completion_data); } if (unlikely(error)) { @@ -3945,15 +3944,14 @@ static int skd_acquire_msix(struct skd_device *skdev) for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) entries[i].entry = i; - rc = pci_enable_msix_range(pdev, entries, - SKD_MIN_MSIX_COUNT, SKD_MAX_MSIX_COUNT); - if (rc < 0) { + rc = pci_enable_msix_exact(pdev, entries, SKD_MAX_MSIX_COUNT); + if (rc) { pr_err("(%s): failed to enable MSI-X %d\n", skd_name(skdev), rc); goto msix_out; } - skdev->msix_count = rc; + skdev->msix_count = SKD_MAX_MSIX_COUNT; skdev->msix_entries = kzalloc(sizeof(struct skd_msix_entry) * skdev->msix_count, GFP_KERNEL); if (!skdev->msix_entries) { diff --git a/drivers/block/swim.c b/drivers/block/swim.c index b02d53a399f..6b44bbe528b 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -549,7 +549,7 @@ static void redo_fd_request(struct request_queue *q) case READ: err = floppy_read_sectors(fs, blk_rq_pos(req), blk_rq_cur_sectors(req), - req->buffer); + bio_data(req->bio)); break; } done: diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index c74f7b56e7c..523ee8fd4c1 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -342,7 +342,7 @@ static void start_request(struct floppy_state *fs) swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", req->rq_disk->disk_name, req->cmd, (long)blk_rq_pos(req), blk_rq_sectors(req), - req->buffer); + bio_data(req->bio)); swim3_dbg(" errors=%d current_nr_sectors=%u\n", req->errors, blk_rq_cur_sectors(req)); #endif @@ -479,11 +479,11 @@ static inline void setup_transfer(struct floppy_state *fs) /* Set up 3 dma commands: write preamble, data, postamble */ init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble)); ++cp; - init_dma(cp, OUTPUT_MORE, req->buffer, 512); + init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512); ++cp; init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble)); } else { - init_dma(cp, INPUT_LAST, req->buffer, n * 512); + init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512); } ++cp; out_le16(&cp->command, DBDMA_STOP); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 6d8a87f252d..f63d358f3d9 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -30,6 +30,9 @@ struct virtio_blk /* The disk structure for the kernel. */ struct gendisk *disk; + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + /* Process context for config space updates */ struct work_struct config_work; @@ -112,7 +115,7 @@ static int __virtblk_add_req(struct virtqueue *vq, static inline void virtblk_request_done(struct request *req) { - struct virtblk_req *vbr = req->special; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); int error = virtblk_result(vbr); if (req->cmd_type == REQ_TYPE_BLOCK_PC) { @@ -144,21 +147,22 @@ static void virtblk_done(struct virtqueue *vq) if (unlikely(virtqueue_is_broken(vq))) break; } while (!virtqueue_enable_cb(vq)); - spin_unlock_irqrestore(&vblk->vq_lock, flags); /* In case queue is stopped waiting for more buffers. */ if (req_done) - blk_mq_start_stopped_hw_queues(vblk->disk->queue); + blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); + spin_unlock_irqrestore(&vblk->vq_lock, flags); } static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) { struct virtio_blk *vblk = hctx->queue->queuedata; - struct virtblk_req *vbr = req->special; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); unsigned long flags; unsigned int num; const bool last = (req->cmd_flags & REQ_END) != 0; int err; + bool notify = false; BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); @@ -202,8 +206,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) err = __virtblk_add_req(vblk->vq, vbr, vbr->sg, num); if (err) { virtqueue_kick(vblk->vq); - spin_unlock_irqrestore(&vblk->vq_lock, flags); blk_mq_stop_hw_queue(hctx); + spin_unlock_irqrestore(&vblk->vq_lock, flags); /* Out of mem doesn't actually happen, since we fall back * to direct descriptors */ if (err == -ENOMEM || err == -ENOSPC) @@ -211,10 +215,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) return BLK_MQ_RQ_QUEUE_ERROR; } - if (last) - virtqueue_kick(vblk->vq); - + if (last && virtqueue_kick_prepare(vblk->vq)) + notify = true; spin_unlock_irqrestore(&vblk->vq_lock, flags); + + if (notify) + virtqueue_notify(vblk->vq); return BLK_MQ_RQ_QUEUE_OK; } @@ -480,33 +486,27 @@ static const struct device_attribute dev_attr_cache_type_rw = __ATTR(cache_type, S_IRUGO|S_IWUSR, virtblk_cache_type_show, virtblk_cache_type_store); -static struct blk_mq_ops virtio_mq_ops = { - .queue_rq = virtio_queue_rq, - .map_queue = blk_mq_map_queue, - .alloc_hctx = blk_mq_alloc_single_hw_queue, - .free_hctx = blk_mq_free_single_hw_queue, - .complete = virtblk_request_done, -}; - -static struct blk_mq_reg virtio_mq_reg = { - .ops = &virtio_mq_ops, - .nr_hw_queues = 1, - .queue_depth = 0, /* Set in virtblk_probe */ - .numa_node = NUMA_NO_NODE, - .flags = BLK_MQ_F_SHOULD_MERGE, -}; -module_param_named(queue_depth, virtio_mq_reg.queue_depth, uint, 0444); - -static int virtblk_init_vbr(void *data, struct blk_mq_hw_ctx *hctx, - struct request *rq, unsigned int nr) +static int virtblk_init_request(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int request_idx, + unsigned int numa_node) { struct virtio_blk *vblk = data; - struct virtblk_req *vbr = rq->special; + struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq); sg_init_table(vbr->sg, vblk->sg_elems); return 0; } +static struct blk_mq_ops virtio_mq_ops = { + .queue_rq = virtio_queue_rq, + .map_queue = blk_mq_map_queue, + .complete = virtblk_request_done, + .init_request = virtblk_init_request, +}; + +static unsigned int virtblk_queue_depth; +module_param_named(queue_depth, virtblk_queue_depth, uint, 0444); + static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; @@ -561,24 +561,34 @@ static int virtblk_probe(struct virtio_device *vdev) } /* Default queue sizing is to fill the ring. */ - if (!virtio_mq_reg.queue_depth) { - virtio_mq_reg.queue_depth = vblk->vq->num_free; + if (!virtblk_queue_depth) { + virtblk_queue_depth = vblk->vq->num_free; /* ... but without indirect descs, we use 2 descs per req */ if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC)) - virtio_mq_reg.queue_depth /= 2; + virtblk_queue_depth /= 2; } - virtio_mq_reg.cmd_size = + + memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); + vblk->tag_set.ops = &virtio_mq_ops; + vblk->tag_set.nr_hw_queues = 1; + vblk->tag_set.queue_depth = virtblk_queue_depth; + vblk->tag_set.numa_node = NUMA_NO_NODE; + vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; + vblk->tag_set.cmd_size = sizeof(struct virtblk_req) + sizeof(struct scatterlist) * sg_elems; + vblk->tag_set.driver_data = vblk; + + err = blk_mq_alloc_tag_set(&vblk->tag_set); + if (err) + goto out_put_disk; - q = vblk->disk->queue = blk_mq_init_queue(&virtio_mq_reg, vblk); + q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); if (!q) { err = -ENOMEM; - goto out_put_disk; + goto out_free_tags; } - blk_mq_init_commands(q, virtblk_init_vbr, vblk); - q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); @@ -679,6 +689,8 @@ static int virtblk_probe(struct virtio_device *vdev) out_del_disk: del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); +out_free_tags: + blk_mq_free_tag_set(&vblk->tag_set); out_put_disk: put_disk(vblk->disk); out_free_vq: @@ -705,6 +717,8 @@ static void virtblk_remove(struct virtio_device *vdev) del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); + blk_mq_free_tag_set(&vblk->tag_set); + /* Stop all the virtqueues. */ vdev->config->reset(vdev); @@ -749,7 +763,7 @@ static int virtblk_restore(struct virtio_device *vdev) vblk->config_enable = true; ret = init_vq(vdev->priv); if (!ret) - blk_mq_start_stopped_hw_queues(vblk->disk->queue); + blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); return ret; } diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index be052773ad0..f65b807e323 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -314,7 +314,7 @@ struct xen_blkif { unsigned long long st_rd_sect; unsigned long long st_wr_sect; - wait_queue_head_t waiting_to_free; + struct work_struct free_work; /* Thread shutdown wait queue. */ wait_queue_head_t shutdown_wq; }; @@ -361,7 +361,7 @@ struct pending_req { #define xen_blkif_put(_b) \ do { \ if (atomic_dec_and_test(&(_b)->refcnt)) \ - wake_up(&(_b)->waiting_to_free);\ + schedule_work(&(_b)->free_work);\ } while (0) struct phys_req { diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 9a547e6b6eb..3a8b810b498 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -35,12 +35,26 @@ static void connect(struct backend_info *); static int connect_ring(struct backend_info *); static void backend_changed(struct xenbus_watch *, const char **, unsigned int); +static void xen_blkif_free(struct xen_blkif *blkif); +static void xen_vbd_free(struct xen_vbd *vbd); struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be) { return be->dev; } +/* + * The last request could free the device from softirq context and + * xen_blkif_free() can sleep. + */ +static void xen_blkif_deferred_free(struct work_struct *work) +{ + struct xen_blkif *blkif; + + blkif = container_of(work, struct xen_blkif, free_work); + xen_blkif_free(blkif); +} + static int blkback_name(struct xen_blkif *blkif, char *buf) { char *devpath, *devname; @@ -121,7 +135,6 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) init_completion(&blkif->drain_complete); atomic_set(&blkif->drain, 0); blkif->st_print = jiffies; - init_waitqueue_head(&blkif->waiting_to_free); blkif->persistent_gnts.rb_node = NULL; spin_lock_init(&blkif->free_pages_lock); INIT_LIST_HEAD(&blkif->free_pages); @@ -132,6 +145,7 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid) INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants); INIT_LIST_HEAD(&blkif->pending_free); + INIT_WORK(&blkif->free_work, xen_blkif_deferred_free); for (i = 0; i < XEN_BLKIF_REQS; i++) { req = kzalloc(sizeof(*req), GFP_KERNEL); @@ -231,7 +245,7 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, return 0; } -static void xen_blkif_disconnect(struct xen_blkif *blkif) +static int xen_blkif_disconnect(struct xen_blkif *blkif) { if (blkif->xenblkd) { kthread_stop(blkif->xenblkd); @@ -239,9 +253,12 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif) blkif->xenblkd = NULL; } - atomic_dec(&blkif->refcnt); - wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0); - atomic_inc(&blkif->refcnt); + /* The above kthread_stop() guarantees that at this point we + * don't have any discard_io or other_io requests. So, checking + * for inflight IO is enough. + */ + if (atomic_read(&blkif->inflight) > 0) + return -EBUSY; if (blkif->irq) { unbind_from_irqhandler(blkif->irq, blkif); @@ -252,6 +269,8 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif) xenbus_unmap_ring_vfree(blkif->be->dev, blkif->blk_ring); blkif->blk_rings.common.sring = NULL; } + + return 0; } static void xen_blkif_free(struct xen_blkif *blkif) @@ -259,8 +278,8 @@ static void xen_blkif_free(struct xen_blkif *blkif) struct pending_req *req, *n; int i = 0, j; - if (!atomic_dec_and_test(&blkif->refcnt)) - BUG(); + xen_blkif_disconnect(blkif); + xen_vbd_free(&blkif->vbd); /* Remove all persistent grants and the cache of ballooned pages. */ xen_blkbk_free_caches(blkif); @@ -449,16 +468,15 @@ static int xen_blkbk_remove(struct xenbus_device *dev) be->backend_watch.node = NULL; } + dev_set_drvdata(&dev->dev, NULL); + if (be->blkif) { xen_blkif_disconnect(be->blkif); - xen_vbd_free(&be->blkif->vbd); - xen_blkif_free(be->blkif); - be->blkif = NULL; + xen_blkif_put(be->blkif); } kfree(be->mode); kfree(be); - dev_set_drvdata(&dev->dev, NULL); return 0; } @@ -481,10 +499,15 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info struct xenbus_device *dev = be->dev; struct xen_blkif *blkif = be->blkif; int err; - int state = 0; + int state = 0, discard_enable; struct block_device *bdev = be->blkif->vbd.bdev; struct request_queue *q = bdev_get_queue(bdev); + err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d", + &discard_enable); + if (err == 1 && !discard_enable) + return; + if (blk_queue_discard(q)) { err = xenbus_printf(xbt, dev->nodename, "discard-granularity", "%u", @@ -700,7 +723,11 @@ static void frontend_changed(struct xenbus_device *dev, * Enforce precondition before potential leak point. * xen_blkif_disconnect() is idempotent. */ - xen_blkif_disconnect(be->blkif); + err = xen_blkif_disconnect(be->blkif); + if (err) { + xenbus_dev_fatal(dev, err, "pending I/O"); + break; + } err = connect_ring(be); if (err) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index efe1b476173..5deb235bd18 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -612,10 +612,10 @@ static void do_blkif_request(struct request_queue *rq) } pr_debug("do_blk_req %p: cmd %p, sec %lx, " - "(%u/%u) buffer:%p [%s]\n", + "(%u/%u) [%s]\n", req, req->cmd, (unsigned long)blk_rq_pos(req), blk_rq_cur_sectors(req), blk_rq_sectors(req), - req->buffer, rq_data_dir(req) ? "write" : "read"); + rq_data_dir(req) ? "write" : "read"); if (blkif_queue_request(req)) { blk_requeue_request(rq, req); @@ -1635,36 +1635,24 @@ blkfront_closing(struct blkfront_info *info) static void blkfront_setup_discard(struct blkfront_info *info) { int err; - char *type; unsigned int discard_granularity; unsigned int discard_alignment; unsigned int discard_secure; - type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL); - if (IS_ERR(type)) - return; - - info->feature_secdiscard = 0; - if (strncmp(type, "phy", 3) == 0) { - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "discard-granularity", "%u", &discard_granularity, - "discard-alignment", "%u", &discard_alignment, - NULL); - if (!err) { - info->feature_discard = 1; - info->discard_granularity = discard_granularity; - info->discard_alignment = discard_alignment; - } - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "discard-secure", "%d", &discard_secure, - NULL); - if (!err) - info->feature_secdiscard = discard_secure; - - } else if (strncmp(type, "file", 4) == 0) - info->feature_discard = 1; - - kfree(type); + info->feature_discard = 1; + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "discard-granularity", "%u", &discard_granularity, + "discard-alignment", "%u", &discard_alignment, + NULL); + if (!err) { + info->discard_granularity = discard_granularity; + info->discard_alignment = discard_alignment; + } + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "discard-secure", "%d", &discard_secure, + NULL); + if (!err) + info->feature_secdiscard = !!discard_secure; } static int blkfront_setup_indirect(struct blkfront_info *info) diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 1393b8871a2..ab3ea62e5df 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -661,7 +661,7 @@ static void ace_fsm_dostate(struct ace_device *ace) rq_data_dir(req)); ace->req = req; - ace->data_ptr = req->buffer; + ace->data_ptr = bio_data(req->bio); ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); @@ -733,7 +733,7 @@ static void ace_fsm_dostate(struct ace_device *ace) * blk_rq_sectors(ace->req), * blk_rq_cur_sectors(ace->req)); */ - ace->data_ptr = ace->req->buffer; + ace->data_ptr = bio_data(ace->req->bio); ace->data_count = blk_rq_cur_sectors(ace->req) * 16; ace_fsm_yieldirq(ace); break; diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c index 27de5046708..968f9e52eff 100644 --- a/drivers/block/z2ram.c +++ b/drivers/block/z2ram.c @@ -87,13 +87,15 @@ static void do_z2_request(struct request_queue *q) while (len) { unsigned long addr = start & Z2RAM_CHUNKMASK; unsigned long size = Z2RAM_CHUNKSIZE - addr; + void *buffer = bio_data(req->bio); + if (len < size) size = len; addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ]; if (rq_data_dir(req) == READ) - memcpy(req->buffer, (char *)addr, size); + memcpy(buffer, (char *)addr, size); else - memcpy((char *)addr, req->buffer, size); + memcpy((char *)addr, buffer, size); start += size; len -= size; } diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index be571fef185..a83b57e57b6 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x04CA, 0x3004) }, { USB_DEVICE(0x04CA, 0x3005) }, { USB_DEVICE(0x04CA, 0x3006) }, + { USB_DEVICE(0x04CA, 0x3007) }, { USB_DEVICE(0x04CA, 0x3008) }, { USB_DEVICE(0x04CA, 0x300b) }, { USB_DEVICE(0x0930, 0x0219) }, @@ -131,6 +132,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index f338b0c5a8d..a7dfbf9a3af 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -152,6 +152,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, @@ -1485,10 +1486,8 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_BCM92035) hdev->setup = btusb_setup_bcm92035; - if (id->driver_info & BTUSB_INTEL) { - usb_enable_autosuspend(data->udev); + if (id->driver_info & BTUSB_INTEL) hdev->setup = btusb_setup_intel; - } /* Interface numbers are hardcoded in the specification */ data->isoc = usb_ifnum_to_if(data->udev, 1); diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 552373c4e36..a118ec1650f 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -4,6 +4,14 @@ menu "Bus devices" +config BRCMSTB_GISB_ARB + bool "Broadcom STB GISB bus arbiter" + depends on ARM + help + Driver for the Broadcom Set Top Box System-on-a-chip internal bus + arbiter. This driver provides timeout and target abort error handling + and internal bus master decoding. + config IMX_WEIM bool "Freescale EIM DRIVER" depends on ARCH_MXC @@ -41,4 +49,14 @@ config ARM_CCI help Driver supporting the CCI cache coherent interconnect for ARM platforms. + +config VEXPRESS_CONFIG + bool "Versatile Express configuration bus" + default y if ARCH_VEXPRESS + depends on ARM || ARM64 + depends on OF + select REGMAP + help + Platform configuration infrastructure for the ARM Ltd. + Versatile Express. endmenu diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 8947bdd0de8..6a4ea7e4af1 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -2,6 +2,7 @@ # Makefile for the bus drivers. # +obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o obj-$(CONFIG_IMX_WEIM) += imx-weim.o obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o @@ -10,3 +11,5 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o # CCI cache coherent interconnect for ARM platforms obj-$(CONFIG_ARM_CCI) += arm-cci.o + +obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c new file mode 100644 index 00000000000..6159b7752a6 --- /dev/null +++ b/drivers/bus/brcmstb_gisb.c @@ -0,0 +1,289 @@ +/* + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/init.h> +#include <linux/types.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/sysfs.h> +#include <linux/io.h> +#include <linux/string.h> +#include <linux/device.h> +#include <linux/list.h> +#include <linux/of.h> +#include <linux/bitops.h> + +#include <asm/bug.h> +#include <asm/signal.h> + +#define ARB_TIMER 0x008 +#define ARB_ERR_CAP_CLR 0x7e4 +#define ARB_ERR_CAP_CLEAR (1 << 0) +#define ARB_ERR_CAP_HI_ADDR 0x7e8 +#define ARB_ERR_CAP_ADDR 0x7ec +#define ARB_ERR_CAP_DATA 0x7f0 +#define ARB_ERR_CAP_STATUS 0x7f4 +#define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) +#define ARB_ERR_CAP_STATUS_TEA (1 << 11) +#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2) +#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c +#define ARB_ERR_CAP_STATUS_WRITE (1 << 1) +#define ARB_ERR_CAP_STATUS_VALID (1 << 0) +#define ARB_ERR_CAP_MASTER 0x7f8 + +struct brcmstb_gisb_arb_device { + void __iomem *base; + struct mutex lock; + struct list_head next; + u32 valid_mask; + const char *master_names[sizeof(u32) * BITS_PER_BYTE]; +}; + +static LIST_HEAD(brcmstb_gisb_arb_device_list); + +static ssize_t gisb_arb_get_timeout(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); + u32 timeout; + + mutex_lock(&gdev->lock); + timeout = ioread32(gdev->base + ARB_TIMER); + mutex_unlock(&gdev->lock); + + return sprintf(buf, "%d", timeout); +} + +static ssize_t gisb_arb_set_timeout(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct platform_device *pdev = to_platform_device(dev); + struct brcmstb_gisb_arb_device *gdev = platform_get_drvdata(pdev); + int val, ret; + + ret = kstrtoint(buf, 10, &val); + if (ret < 0) + return ret; + + if (val == 0 || val >= 0xffffffff) + return -EINVAL; + + mutex_lock(&gdev->lock); + iowrite32(val, gdev->base + ARB_TIMER); + mutex_unlock(&gdev->lock); + + return count; +} + +static const char * +brcmstb_gisb_master_to_str(struct brcmstb_gisb_arb_device *gdev, + u32 masters) +{ + u32 mask = gdev->valid_mask & masters; + + if (hweight_long(mask) != 1) + return NULL; + + return gdev->master_names[ffs(mask) - 1]; +} + +static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, + const char *reason) +{ + u32 cap_status; + unsigned long arb_addr; + u32 master; + const char *m_name; + char m_fmt[11]; + + cap_status = ioread32(gdev->base + ARB_ERR_CAP_STATUS); + + /* Invalid captured address, bail out */ + if (!(cap_status & ARB_ERR_CAP_STATUS_VALID)) + return 1; + + /* Read the address and master */ + arb_addr = ioread32(gdev->base + ARB_ERR_CAP_ADDR) & 0xffffffff; +#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) + arb_addr |= (u64)ioread32(gdev->base + ARB_ERR_CAP_HI_ADDR) << 32; +#endif + master = ioread32(gdev->base + ARB_ERR_CAP_MASTER); + + m_name = brcmstb_gisb_master_to_str(gdev, master); + if (!m_name) { + snprintf(m_fmt, sizeof(m_fmt), "0x%08x", master); + m_name = m_fmt; + } + + pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n", + __func__, reason, arb_addr, + cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R', + cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "", + m_name); + + /* clear the GISB error */ + iowrite32(ARB_ERR_CAP_CLEAR, gdev->base + ARB_ERR_CAP_CLR); + + return 0; +} + +static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr, + struct pt_regs *regs) +{ + int ret = 0; + struct brcmstb_gisb_arb_device *gdev; + + /* iterate over each GISB arb registered handlers */ + list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next) + ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error"); + /* + * If it was an imprecise abort, then we need to correct the + * return address to be _after_ the instruction. + */ + if (fsr & (1 << 10)) + regs->ARM_pc += 4; + + return ret; +} + +void __init brcmstb_hook_fault_code(void) +{ + hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0, + "imprecise external abort"); +} + +static irqreturn_t brcmstb_gisb_timeout_handler(int irq, void *dev_id) +{ + brcmstb_gisb_arb_decode_addr(dev_id, "timeout"); + + return IRQ_HANDLED; +} + +static irqreturn_t brcmstb_gisb_tea_handler(int irq, void *dev_id) +{ + brcmstb_gisb_arb_decode_addr(dev_id, "target abort"); + + return IRQ_HANDLED; +} + +static DEVICE_ATTR(gisb_arb_timeout, S_IWUSR | S_IRUGO, + gisb_arb_get_timeout, gisb_arb_set_timeout); + +static struct attribute *gisb_arb_sysfs_attrs[] = { + &dev_attr_gisb_arb_timeout.attr, + NULL, +}; + +static struct attribute_group gisb_arb_sysfs_attr_group = { + .attrs = gisb_arb_sysfs_attrs, +}; + +static int brcmstb_gisb_arb_probe(struct platform_device *pdev) +{ + struct device_node *dn = pdev->dev.of_node; + struct brcmstb_gisb_arb_device *gdev; + struct resource *r; + int err, timeout_irq, tea_irq; + unsigned int num_masters, j = 0; + int i, first, last; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + timeout_irq = platform_get_irq(pdev, 0); + tea_irq = platform_get_irq(pdev, 1); + + gdev = devm_kzalloc(&pdev->dev, sizeof(*gdev), GFP_KERNEL); + if (!gdev) + return -ENOMEM; + + mutex_init(&gdev->lock); + INIT_LIST_HEAD(&gdev->next); + + gdev->base = devm_request_and_ioremap(&pdev->dev, r); + if (!gdev->base) + return -ENOMEM; + + err = devm_request_irq(&pdev->dev, timeout_irq, + brcmstb_gisb_timeout_handler, 0, pdev->name, + gdev); + if (err < 0) + return err; + + err = devm_request_irq(&pdev->dev, tea_irq, + brcmstb_gisb_tea_handler, 0, pdev->name, + gdev); + if (err < 0) + return err; + + /* If we do not have a valid mask, assume all masters are enabled */ + if (of_property_read_u32(dn, "brcm,gisb-arb-master-mask", + &gdev->valid_mask)) + gdev->valid_mask = 0xffffffff; + + /* Proceed with reading the litteral names if we agree on the + * number of masters + */ + num_masters = of_property_count_strings(dn, + "brcm,gisb-arb-master-names"); + if (hweight_long(gdev->valid_mask) == num_masters) { + first = ffs(gdev->valid_mask) - 1; + last = fls(gdev->valid_mask) - 1; + + for (i = first; i < last; i++) { + if (!(gdev->valid_mask & BIT(i))) + continue; + + of_property_read_string_index(dn, + "brcm,gisb-arb-master-names", j, + &gdev->master_names[i]); + j++; + } + } + + err = sysfs_create_group(&pdev->dev.kobj, &gisb_arb_sysfs_attr_group); + if (err) + return err; + + platform_set_drvdata(pdev, gdev); + + list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list); + + dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n", + gdev->base, timeout_irq, tea_irq); + + return 0; +} + +static const struct of_device_id brcmstb_gisb_arb_of_match[] = { + { .compatible = "brcm,gisb-arb" }, + { }, +}; + +static struct platform_driver brcmstb_gisb_arb_driver = { + .probe = brcmstb_gisb_arb_probe, + .driver = { + .name = "brcm-gisb-arb", + .owner = THIS_MODULE, + .of_match_table = brcmstb_gisb_arb_of_match, + }, +}; + +static int __init brcm_gisb_driver_init(void) +{ + return platform_driver_register(&brcmstb_gisb_arb_driver); +} + +module_init(brcm_gisb_driver_init); diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index 293e2e0a0a8..26c3779d871 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -56,6 +56,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/debugfs.h> +#include <linux/log2.h> /* * DDR target is the same on all platforms. @@ -222,12 +223,6 @@ static int mvebu_mbus_window_conflicts(struct mvebu_mbus_state *mbus, */ if ((u64)base < wend && end > wbase) return 0; - - /* - * Check if target/attribute conflicts - */ - if (target == wtarget && attr == wattr) - return 0; } return 1; @@ -266,6 +261,17 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus, mbus->soc->win_cfg_offset(win); u32 ctrl, remap_addr; + if (!is_power_of_2(size)) { + WARN(true, "Invalid MBus window size: 0x%zx\n", size); + return -EINVAL; + } + + if ((base & (phys_addr_t)(size - 1)) != 0) { + WARN(true, "Invalid MBus base/size: %pa len 0x%zx\n", &base, + size); + return -EINVAL; + } + ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) | (attr << WIN_CTRL_ATTR_SHIFT) | (target << WIN_CTRL_TGT_SHIFT) | @@ -413,6 +419,10 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v) win, (unsigned long long)wbase, (unsigned long long)(wbase + wsize), wtarget, wattr); + if (!is_power_of_2(wsize) || + ((wbase & (u64)(wsize - 1)) != 0)) + seq_puts(seq, " (Invalid base/size!!)"); + if (win < mbus->soc->num_remappable_wins) { seq_printf(seq, " (remap %016llx)\n", (unsigned long long)wremap); @@ -694,7 +704,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, phys_addr_t sdramwins_phys_base, size_t sdramwins_size) { - struct device_node *np; int win; mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size); @@ -707,12 +716,6 @@ static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus, return -ENOMEM; } - np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"); - if (np) { - mbus->hw_io_coherency = 1; - of_node_put(np); - } - for (win = 0; win < mbus->soc->num_wins; win++) mvebu_mbus_disable_window(mbus, win); @@ -882,7 +885,7 @@ static void __init mvebu_mbus_get_pcie_resources(struct device_node *np, } } -int __init mvebu_mbus_dt_init(void) +int __init mvebu_mbus_dt_init(bool is_coherent) { struct resource mbuswins_res, sdramwins_res; struct device_node *np, *controller; @@ -920,6 +923,8 @@ int __init mvebu_mbus_dt_init(void) return -EINVAL; } + mbus_state.hw_io_coherency = is_coherent; + /* Get optional pcie-{mem,io}-aperture properties */ mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture, &mbus_state.pcie_io_aperture); diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c index feeecae623f..531ae591783 100644 --- a/drivers/bus/omap_l3_noc.c +++ b/drivers/bus/omap_l3_noc.c @@ -1,43 +1,45 @@ /* - * OMAP4XXX L3 Interconnect error handling driver + * OMAP L3 Interconnect error handling driver * - * Copyright (C) 2011 Texas Corporation + * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/ * Santosh Shilimkar <santosh.shilimkar@ti.com> * Sricharan <r.sricharan@ti.com> * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA */ -#include <linux/module.h> #include <linux/init.h> -#include <linux/io.h> -#include <linux/platform_device.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include "omap_l3_noc.h" -/* - * Interrupt Handler for L3 error detection. - * 1) Identify the L3 clockdomain partition to which the error belongs to. - * 2) Identify the slave where the error information is logged - * 3) Print the logged information. - * 4) Add dump stack to provide kernel trace. +/** + * l3_handle_target() - Handle Target specific parse and reporting + * @l3: pointer to l3 struct + * @base: base address of clkdm + * @flag_mux: flagmux corresponding to the event + * @err_src: error source index of the slave (target) * - * Two Types of errors : + * This does the second part of the error interrupt handling: + * 3) Parse in the slave information + * 4) Print the logged information. + * 5) Add dump stack to provide kernel trace. + * 6) Clear the source if known. + * + * This handles two types of errors: * 1) Custom errors in L3 : * Target like DMM/FW/EMIF generates SRESP=ERR error * 2) Standard L3 error: @@ -53,214 +55,264 @@ * can be trapped as well. But the trapping is implemented as part * secure software and hence need not be implemented here. */ -static irqreturn_t l3_interrupt_handler(int irq, void *_l3) +static int l3_handle_target(struct omap_l3 *l3, void __iomem *base, + struct l3_flagmux_data *flag_mux, int err_src) { + int k; + u32 std_err_main, clear, masterid; + u8 op_code, m_req_info; + void __iomem *l3_targ_base; + void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr; + void __iomem *l3_targ_hdr, *l3_targ_info; + struct l3_target_data *l3_targ_inst; + struct l3_masters_data *master; + char *target_name, *master_name = "UN IDENTIFIED"; + char *err_description; + char err_string[30] = { 0 }; + char info_string[60] = { 0 }; + + /* We DONOT expect err_src to go out of bounds */ + BUG_ON(err_src > MAX_CLKDM_TARGETS); + + if (err_src < flag_mux->num_targ_data) { + l3_targ_inst = &flag_mux->l3_targ[err_src]; + target_name = l3_targ_inst->name; + l3_targ_base = base + l3_targ_inst->offset; + } else { + target_name = L3_TARGET_NOT_SUPPORTED; + } - struct omap4_l3 *l3 = _l3; - int inttype, i, k; + if (target_name == L3_TARGET_NOT_SUPPORTED) + return -ENODEV; + + /* Read the stderrlog_main_source from clk domain */ + l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN; + l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB; + + std_err_main = readl_relaxed(l3_targ_stderr); + + switch (std_err_main & CUSTOM_ERROR) { + case STANDARD_ERROR: + err_description = "Standard"; + snprintf(err_string, sizeof(err_string), + ": At Address: 0x%08X ", + readl_relaxed(l3_targ_slvofslsb)); + + l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR; + l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR; + l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO; + break; + + case CUSTOM_ERROR: + err_description = "Custom"; + + l3_targ_mstaddr = l3_targ_base + + L3_TARG_STDERRLOG_CINFO_MSTADDR; + l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE; + l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO; + break; + + default: + /* Nothing to be handled here as of now */ + return 0; + } + + /* STDERRLOG_MSTADDR Stores the NTTP master address. */ + masterid = (readl_relaxed(l3_targ_mstaddr) & + l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask); + + for (k = 0, master = l3->l3_masters; k < l3->num_masters; + k++, master++) { + if (masterid == master->id) { + master_name = master->name; + break; + } + } + + op_code = readl_relaxed(l3_targ_hdr) & 0x7; + + m_req_info = readl_relaxed(l3_targ_info) & 0xF; + snprintf(info_string, sizeof(info_string), + ": %s in %s mode during %s access", + (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access", + (m_req_info & BIT(1)) ? "Supervisor" : "User", + (m_req_info & BIT(3)) ? "Debug" : "Functional"); + + WARN(true, + "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n", + dev_name(l3->dev), + err_description, + master_name, target_name, + l3_transaction_type[op_code], + err_string, info_string); + + /* clear the std error log*/ + clear = std_err_main | CLEAR_STDERR_LOG; + writel_relaxed(clear, l3_targ_stderr); + + return 0; +} + +/** + * l3_interrupt_handler() - interrupt handler for l3 events + * @irq: irq number + * @_l3: pointer to l3 structure + * + * Interrupt Handler for L3 error detection. + * 1) Identify the L3 clockdomain partition to which the error belongs to. + * 2) Identify the slave where the error information is logged + * ... handle the slave event.. + * 7) if the slave is unknown, mask out the slave. + */ +static irqreturn_t l3_interrupt_handler(int irq, void *_l3) +{ + struct omap_l3 *l3 = _l3; + int inttype, i, ret; int err_src = 0; - u32 std_err_main, err_reg, clear, masterid; - void __iomem *base, *l3_targ_base; - char *target_name, *master_name = "UN IDENTIFIED"; + u32 err_reg, mask_val; + void __iomem *base, *mask_reg; + struct l3_flagmux_data *flag_mux; /* Get the Type of interrupt */ inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR; - for (i = 0; i < L3_MODULES; i++) { + for (i = 0; i < l3->num_modules; i++) { /* * Read the regerr register of the clock domain * to determine the source */ base = l3->l3_base[i]; - err_reg = __raw_readl(base + l3_flagmux[i] + - + L3_FLAGMUX_REGERR0 + (inttype << 3)); + flag_mux = l3->l3_flagmux[i]; + err_reg = readl_relaxed(base + flag_mux->offset + + L3_FLAGMUX_REGERR0 + (inttype << 3)); + + err_reg &= ~(inttype ? flag_mux->mask_app_bits : + flag_mux->mask_dbg_bits); /* Get the corresponding error and analyse */ if (err_reg) { /* Identify the source from control status register */ err_src = __ffs(err_reg); - /* Read the stderrlog_main_source from clk domain */ - l3_targ_base = base + *(l3_targ[i] + err_src); - std_err_main = __raw_readl(l3_targ_base + - L3_TARG_STDERRLOG_MAIN); - masterid = __raw_readl(l3_targ_base + - L3_TARG_STDERRLOG_MSTADDR); - - switch (std_err_main & CUSTOM_ERROR) { - case STANDARD_ERROR: - target_name = - l3_targ_inst_name[i][err_src]; - WARN(true, "L3 standard error: TARGET:%s at address 0x%x\n", - target_name, - __raw_readl(l3_targ_base + - L3_TARG_STDERRLOG_SLVOFSLSB)); - /* clear the std error log*/ - clear = std_err_main | CLEAR_STDERR_LOG; - writel(clear, l3_targ_base + - L3_TARG_STDERRLOG_MAIN); - break; - - case CUSTOM_ERROR: - target_name = - l3_targ_inst_name[i][err_src]; - for (k = 0; k < NUM_OF_L3_MASTERS; k++) { - if (masterid == l3_masters[k].id) - master_name = - l3_masters[k].name; - } - WARN(true, "L3 custom error: MASTER:%s TARGET:%s\n", - master_name, target_name); - /* clear the std error log*/ - clear = std_err_main | CLEAR_STDERR_LOG; - writel(clear, l3_targ_base + - L3_TARG_STDERRLOG_MAIN); - break; - - default: - /* Nothing to be handled here as of now */ - break; + ret = l3_handle_target(l3, base, flag_mux, err_src); + + /* + * Certain plaforms may have "undocumented" status + * pending on boot. So dont generate a severe warning + * here. Just mask it off to prevent the error from + * reoccuring and locking up the system. + */ + if (ret) { + dev_err(l3->dev, + "L3 %s error: target %d mod:%d %s\n", + inttype ? "debug" : "application", + err_src, i, "(unclearable)"); + + mask_reg = base + flag_mux->offset + + L3_FLAGMUX_MASK0 + (inttype << 3); + mask_val = readl_relaxed(mask_reg); + mask_val &= ~(1 << err_src); + writel_relaxed(mask_val, mask_reg); + + /* Mark these bits as to be ignored */ + if (inttype) + flag_mux->mask_app_bits |= 1 << err_src; + else + flag_mux->mask_dbg_bits |= 1 << err_src; } - /* Error found so break the for loop */ - break; + + /* Error found so break the for loop */ + break; } } return IRQ_HANDLED; } -static int omap4_l3_probe(struct platform_device *pdev) +static const struct of_device_id l3_noc_match[] = { + {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data}, + {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data}, + {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, l3_noc_match); + +static int omap_l3_probe(struct platform_device *pdev) { - static struct omap4_l3 *l3; - struct resource *res; - int ret; + const struct of_device_id *of_id; + static struct omap_l3 *l3; + int ret, i, res_idx; + + of_id = of_match_device(l3_noc_match, &pdev->dev); + if (!of_id) { + dev_err(&pdev->dev, "OF data missing\n"); + return -EINVAL; + } - l3 = kzalloc(sizeof(*l3), GFP_KERNEL); + l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL); if (!l3) return -ENOMEM; + memcpy(l3, of_id->data, sizeof(*l3)); + l3->dev = &pdev->dev; platform_set_drvdata(pdev, l3); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "couldn't find resource 0\n"); - ret = -ENODEV; - goto err0; - } - - l3->l3_base[0] = ioremap(res->start, resource_size(res)); - if (!l3->l3_base[0]) { - dev_err(&pdev->dev, "ioremap failed\n"); - ret = -ENOMEM; - goto err0; - } - - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(&pdev->dev, "couldn't find resource 1\n"); - ret = -ENODEV; - goto err1; - } - - l3->l3_base[1] = ioremap(res->start, resource_size(res)); - if (!l3->l3_base[1]) { - dev_err(&pdev->dev, "ioremap failed\n"); - ret = -ENOMEM; - goto err1; - } - res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - if (!res) { - dev_err(&pdev->dev, "couldn't find resource 2\n"); - ret = -ENODEV; - goto err2; - } + /* Get mem resources */ + for (i = 0, res_idx = 0; i < l3->num_modules; i++) { + struct resource *res; - l3->l3_base[2] = ioremap(res->start, resource_size(res)); - if (!l3->l3_base[2]) { - dev_err(&pdev->dev, "ioremap failed\n"); - ret = -ENOMEM; - goto err2; + if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) { + /* First entry cannot be submodule */ + BUG_ON(i == 0); + l3->l3_base[i] = l3->l3_base[i - 1]; + continue; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx); + l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(l3->l3_base[i])) { + dev_err(l3->dev, "ioremap %d failed\n", i); + return PTR_ERR(l3->l3_base[i]); + } + res_idx++; } /* * Setup interrupt Handlers */ l3->debug_irq = platform_get_irq(pdev, 0); - ret = request_irq(l3->debug_irq, - l3_interrupt_handler, - IRQF_DISABLED, "l3-dbg-irq", l3); + ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler, + IRQF_DISABLED, "l3-dbg-irq", l3); if (ret) { - pr_crit("L3: request_irq failed to register for 0x%x\n", - l3->debug_irq); - goto err3; + dev_err(l3->dev, "request_irq failed for %d\n", + l3->debug_irq); + return ret; } l3->app_irq = platform_get_irq(pdev, 1); - ret = request_irq(l3->app_irq, - l3_interrupt_handler, - IRQF_DISABLED, "l3-app-irq", l3); - if (ret) { - pr_crit("L3: request_irq failed to register for 0x%x\n", - l3->app_irq); - goto err4; - } + ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler, + IRQF_DISABLED, "l3-app-irq", l3); + if (ret) + dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq); - return 0; - -err4: - free_irq(l3->debug_irq, l3); -err3: - iounmap(l3->l3_base[2]); -err2: - iounmap(l3->l3_base[1]); -err1: - iounmap(l3->l3_base[0]); -err0: - kfree(l3); return ret; } -static int omap4_l3_remove(struct platform_device *pdev) -{ - struct omap4_l3 *l3 = platform_get_drvdata(pdev); - - free_irq(l3->app_irq, l3); - free_irq(l3->debug_irq, l3); - iounmap(l3->l3_base[0]); - iounmap(l3->l3_base[1]); - iounmap(l3->l3_base[2]); - kfree(l3); - - return 0; -} - -#if defined(CONFIG_OF) -static const struct of_device_id l3_noc_match[] = { - {.compatible = "ti,omap4-l3-noc", }, - {}, -}; -MODULE_DEVICE_TABLE(of, l3_noc_match); -#else -#define l3_noc_match NULL -#endif - -static struct platform_driver omap4_l3_driver = { - .probe = omap4_l3_probe, - .remove = omap4_l3_remove, +static struct platform_driver omap_l3_driver = { + .probe = omap_l3_probe, .driver = { .name = "omap_l3_noc", .owner = THIS_MODULE, - .of_match_table = l3_noc_match, + .of_match_table = of_match_ptr(l3_noc_match), }, }; -static int __init omap4_l3_init(void) +static int __init omap_l3_init(void) { - return platform_driver_register(&omap4_l3_driver); + return platform_driver_register(&omap_l3_driver); } -postcore_initcall_sync(omap4_l3_init); +postcore_initcall_sync(omap_l3_init); -static void __exit omap4_l3_exit(void) +static void __exit omap_l3_exit(void) { - platform_driver_unregister(&omap4_l3_driver); + platform_driver_unregister(&omap_l3_driver); } -module_exit(omap4_l3_exit); +module_exit(omap_l3_exit); diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h index a6ce34dc481..551e0106143 100644 --- a/drivers/bus/omap_l3_noc.h +++ b/drivers/bus/omap_l3_noc.h @@ -1,29 +1,25 @@ /* - * OMAP4XXX L3 Interconnect error handling driver header + * OMAP L3 Interconnect error handling driver header * - * Copyright (C) 2011 Texas Corporation + * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/ * Santosh Shilimkar <santosh.shilimkar@ti.com> * sricharan <r.sricharan@ti.com> * * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 - * USA */ -#ifndef __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H -#define __ARCH_ARM_MACH_OMAP2_L3_INTERCONNECT_3XXX_H +#ifndef __OMAP_L3_NOC_H +#define __OMAP_L3_NOC_H + +#define MAX_L3_MODULES 3 +#define MAX_CLKDM_TARGETS 31 -#define L3_MODULES 3 #define CLEAR_STDERR_LOG (1 << 31) #define CUSTOM_ERROR 0x2 #define STANDARD_ERROR 0x0 @@ -33,63 +29,165 @@ /* L3 TARG register offsets */ #define L3_TARG_STDERRLOG_MAIN 0x48 +#define L3_TARG_STDERRLOG_HDR 0x4c +#define L3_TARG_STDERRLOG_MSTADDR 0x50 +#define L3_TARG_STDERRLOG_INFO 0x58 #define L3_TARG_STDERRLOG_SLVOFSLSB 0x5c -#define L3_TARG_STDERRLOG_MSTADDR 0x68 +#define L3_TARG_STDERRLOG_CINFO_INFO 0x64 +#define L3_TARG_STDERRLOG_CINFO_MSTADDR 0x68 +#define L3_TARG_STDERRLOG_CINFO_OPCODE 0x6c #define L3_FLAGMUX_REGERR0 0xc +#define L3_FLAGMUX_MASK0 0x8 + +#define L3_TARGET_NOT_SUPPORTED NULL + +#define L3_BASE_IS_SUBMODULE ((void __iomem *)(1 << 0)) + +static const char * const l3_transaction_type[] = { + /* 0 0 0 */ "Idle", + /* 0 0 1 */ "Write", + /* 0 1 0 */ "Read", + /* 0 1 1 */ "ReadEx", + /* 1 0 0 */ "Read Link", + /* 1 0 1 */ "Write Non-Posted", + /* 1 1 0 */ "Write Conditional", + /* 1 1 1 */ "Write Broadcast", +}; -#define NUM_OF_L3_MASTERS (sizeof(l3_masters)/sizeof(l3_masters[0])) - -static u32 l3_flagmux[L3_MODULES] = { - 0x500, - 0x1000, - 0X0200 -}; - -/* L3 Target standard Error register offsets */ -static u32 l3_targ_inst_clk1[] = { - 0x100, /* DMM1 */ - 0x200, /* DMM2 */ - 0x300, /* ABE */ - 0x400, /* L4CFG */ - 0x600, /* CLK2 PWR DISC */ - 0x0, /* Host CLK1 */ - 0x900 /* L4 Wakeup */ -}; - -static u32 l3_targ_inst_clk2[] = { - 0x500, /* CORTEX M3 */ - 0x300, /* DSS */ - 0x100, /* GPMC */ - 0x400, /* ISS */ - 0x700, /* IVAHD */ - 0xD00, /* missing in TRM corresponds to AES1*/ - 0x900, /* L4 PER0*/ - 0x200, /* OCMRAM */ - 0x100, /* missing in TRM corresponds to GPMC sERROR*/ - 0x600, /* SGX */ - 0x800, /* SL2 */ - 0x1600, /* C2C */ - 0x1100, /* missing in TRM corresponds PWR DISC CLK1*/ - 0xF00, /* missing in TRM corrsponds to SHA1*/ - 0xE00, /* missing in TRM corresponds to AES2*/ - 0xC00, /* L4 PER3 */ - 0xA00, /* L4 PER1*/ - 0xB00, /* L4 PER2*/ - 0x0, /* HOST CLK2 */ - 0x1800, /* CAL */ - 0x1700 /* LLI */ -}; - -static u32 l3_targ_inst_clk3[] = { - 0x0100 /* EMUSS */, - 0x0300, /* DEBUGSS_CT_TBR */ - 0x0 /* HOST CLK3 */ -}; - -static struct l3_masters_data { +/** + * struct l3_masters_data - L3 Master information + * @id: ID of the L3 Master + * @name: master name + */ +struct l3_masters_data { u32 id; - char name[10]; -} l3_masters[] = { + char *name; +}; + +/** + * struct l3_target_data - L3 Target information + * @offset: Offset from base for L3 Target + * @name: Target name + * + * Target information is organized indexed by bit field definitions. + */ +struct l3_target_data { + u32 offset; + char *name; +}; + +/** + * struct l3_flagmux_data - Flag Mux information + * @offset: offset from base for flagmux register + * @l3_targ: array indexed by flagmux index (bit offset) pointing to the + * target data. unsupported ones are marked with + * L3_TARGET_NOT_SUPPORTED + * @num_targ_data: number of entries in target data + * @mask_app_bits: ignore these from raw application irq status + * @mask_dbg_bits: ignore these from raw debug irq status + */ +struct l3_flagmux_data { + u32 offset; + struct l3_target_data *l3_targ; + u8 num_targ_data; + u32 mask_app_bits; + u32 mask_dbg_bits; +}; + + +/** + * struct omap_l3 - Description of data relevant for L3 bus. + * @dev: device representing the bus (populated runtime) + * @l3_base: base addresses of modules (populated runtime if 0) + * if set to L3_BASE_IS_SUBMODULE, then uses previous + * module index as the base address + * @l3_flag_mux: array containing flag mux data per module + * offset from corresponding module base indexed per + * module. + * @num_modules: number of clock domains / modules. + * @l3_masters: array pointing to master data containing name and register + * offset for the master. + * @num_master: number of masters + * @mst_addr_mask: Mask representing MSTADDR information of NTTP packet + * @debug_irq: irq number of the debug interrupt (populated runtime) + * @app_irq: irq number of the application interrupt (populated runtime) + */ +struct omap_l3 { + struct device *dev; + + void __iomem *l3_base[MAX_L3_MODULES]; + struct l3_flagmux_data **l3_flagmux; + int num_modules; + + struct l3_masters_data *l3_masters; + int num_masters; + u32 mst_addr_mask; + + int debug_irq; + int app_irq; +}; + +static struct l3_target_data omap_l3_target_data_clk1[] = { + {0x100, "DMM1",}, + {0x200, "DMM2",}, + {0x300, "ABE",}, + {0x400, "L4CFG",}, + {0x600, "CLK2PWRDISC",}, + {0x0, "HOSTCLK1",}, + {0x900, "L4WAKEUP",}, +}; + +static struct l3_flagmux_data omap_l3_flagmux_clk1 = { + .offset = 0x500, + .l3_targ = omap_l3_target_data_clk1, + .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk1), +}; + + +static struct l3_target_data omap_l3_target_data_clk2[] = { + {0x500, "CORTEXM3",}, + {0x300, "DSS",}, + {0x100, "GPMC",}, + {0x400, "ISS",}, + {0x700, "IVAHD",}, + {0xD00, "AES1",}, + {0x900, "L4PER0",}, + {0x200, "OCMRAM",}, + {0x100, "GPMCsERROR",}, + {0x600, "SGX",}, + {0x800, "SL2",}, + {0x1600, "C2C",}, + {0x1100, "PWRDISCCLK1",}, + {0xF00, "SHA1",}, + {0xE00, "AES2",}, + {0xC00, "L4PER3",}, + {0xA00, "L4PER1",}, + {0xB00, "L4PER2",}, + {0x0, "HOSTCLK2",}, + {0x1800, "CAL",}, + {0x1700, "LLI",}, +}; + +static struct l3_flagmux_data omap_l3_flagmux_clk2 = { + .offset = 0x1000, + .l3_targ = omap_l3_target_data_clk2, + .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk2), +}; + + +static struct l3_target_data omap_l3_target_data_clk3[] = { + {0x0100, "EMUSS",}, + {0x0300, "DEBUG SOURCE",}, + {0x0, "HOST CLK3",}, +}; + +static struct l3_flagmux_data omap_l3_flagmux_clk3 = { + .offset = 0x0200, + .l3_targ = omap_l3_target_data_clk3, + .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3), +}; + +static struct l3_masters_data omap_l3_masters[] = { { 0x0 , "MPU"}, { 0x10, "CS_ADP"}, { 0x14, "xxx"}, @@ -117,60 +215,261 @@ static struct l3_masters_data { { 0xC8, "USBHOSTFS"} }; -static char *l3_targ_inst_name[L3_MODULES][21] = { - { - "DMM1", - "DMM2", - "ABE", - "L4CFG", - "CLK2 PWR DISC", - "HOST CLK1", - "L4 WAKEUP" - }, - { - "CORTEX M3" , - "DSS ", - "GPMC ", - "ISS ", - "IVAHD ", - "AES1", - "L4 PER0", - "OCMRAM ", - "GPMC sERROR", - "SGX ", - "SL2 ", - "C2C ", - "PWR DISC CLK1", - "SHA1", - "AES2", - "L4 PER3", - "L4 PER1", - "L4 PER2", - "HOST CLK2", - "CAL", - "LLI" - }, - { - "EMUSS", - "DEBUG SOURCE", - "HOST CLK3" - }, -}; - -static u32 *l3_targ[L3_MODULES] = { - l3_targ_inst_clk1, - l3_targ_inst_clk2, - l3_targ_inst_clk3, -}; - -struct omap4_l3 { - struct device *dev; - struct clk *ick; +static struct l3_flagmux_data *omap_l3_flagmux[] = { + &omap_l3_flagmux_clk1, + &omap_l3_flagmux_clk2, + &omap_l3_flagmux_clk3, +}; + +static const struct omap_l3 omap_l3_data = { + .l3_flagmux = omap_l3_flagmux, + .num_modules = ARRAY_SIZE(omap_l3_flagmux), + .l3_masters = omap_l3_masters, + .num_masters = ARRAY_SIZE(omap_l3_masters), + /* The 6 MSBs of register field used to distinguish initiator */ + .mst_addr_mask = 0xFC, +}; - /* memory base */ - void __iomem *l3_base[L3_MODULES]; +/* DRA7 data */ +static struct l3_target_data dra_l3_target_data_clk1[] = { + {0x2a00, "AES1",}, + {0x0200, "DMM_P1",}, + {0x0600, "DSP2_SDMA",}, + {0x0b00, "EVE2",}, + {0x1300, "DMM_P2",}, + {0x2c00, "AES2",}, + {0x0300, "DSP1_SDMA",}, + {0x0a00, "EVE1",}, + {0x0c00, "EVE3",}, + {0x0d00, "EVE4",}, + {0x2900, "DSS",}, + {0x0100, "GPMC",}, + {0x3700, "PCIE1",}, + {0x1600, "IVA_CONFIG",}, + {0x1800, "IVA_SL2IF",}, + {0x0500, "L4_CFG",}, + {0x1d00, "L4_WKUP",}, + {0x3800, "PCIE2",}, + {0x3300, "SHA2_1",}, + {0x1200, "GPU",}, + {0x1000, "IPU1",}, + {0x1100, "IPU2",}, + {0x2000, "TPCC_EDMA",}, + {0x2e00, "TPTC1_EDMA",}, + {0x2b00, "TPTC2_EDMA",}, + {0x0700, "VCP1",}, + {0x2500, "L4_PER2_P3",}, + {0x0e00, "L4_PER3_P3",}, + {0x2200, "MMU1",}, + {0x1400, "PRUSS1",}, + {0x1500, "PRUSS2"}, + {0x0800, "VCP1",}, +}; - int debug_irq; - int app_irq; +static struct l3_flagmux_data dra_l3_flagmux_clk1 = { + .offset = 0x803500, + .l3_targ = dra_l3_target_data_clk1, + .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk1), +}; + +static struct l3_target_data dra_l3_target_data_clk2[] = { + {0x0, "HOST CLK1",}, + {0x0, "HOST CLK2",}, + {0xdead, L3_TARGET_NOT_SUPPORTED,}, + {0x3400, "SHA2_2",}, + {0x0900, "BB2D",}, + {0xdead, L3_TARGET_NOT_SUPPORTED,}, + {0x2100, "L4_PER1_P3",}, + {0x1c00, "L4_PER1_P1",}, + {0x1f00, "L4_PER1_P2",}, + {0x2300, "L4_PER2_P1",}, + {0x2400, "L4_PER2_P2",}, + {0x2600, "L4_PER3_P1",}, + {0x2700, "L4_PER3_P2",}, + {0x2f00, "MCASP1",}, + {0x3000, "MCASP2",}, + {0x3100, "MCASP3",}, + {0x2800, "MMU2",}, + {0x0f00, "OCMC_RAM1",}, + {0x1700, "OCMC_RAM2",}, + {0x1900, "OCMC_RAM3",}, + {0x1e00, "OCMC_ROM",}, + {0x3900, "QSPI",}, +}; + +static struct l3_flagmux_data dra_l3_flagmux_clk2 = { + .offset = 0x803600, + .l3_targ = dra_l3_target_data_clk2, + .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk2), +}; + +static struct l3_target_data dra_l3_target_data_clk3[] = { + {0x0100, "L3_INSTR"}, + {0x0300, "DEBUGSS_CT_TBR"}, + {0x0, "HOST CLK3"}, +}; + +static struct l3_flagmux_data dra_l3_flagmux_clk3 = { + .offset = 0x200, + .l3_targ = dra_l3_target_data_clk3, + .num_targ_data = ARRAY_SIZE(dra_l3_target_data_clk3), +}; + +static struct l3_masters_data dra_l3_masters[] = { + { 0x0, "MPU" }, + { 0x4, "CS_DAP" }, + { 0x5, "IEEE1500_2_OCP" }, + { 0x8, "DSP1_MDMA" }, + { 0x9, "DSP1_CFG" }, + { 0xA, "DSP1_DMA" }, + { 0xB, "DSP2_MDMA" }, + { 0xC, "DSP2_CFG" }, + { 0xD, "DSP2_DMA" }, + { 0xE, "IVA" }, + { 0x10, "EVE1_P1" }, + { 0x11, "EVE2_P1" }, + { 0x12, "EVE3_P1" }, + { 0x13, "EVE4_P1" }, + { 0x14, "PRUSS1 PRU1" }, + { 0x15, "PRUSS1 PRU2" }, + { 0x16, "PRUSS2 PRU1" }, + { 0x17, "PRUSS2 PRU2" }, + { 0x18, "IPU1" }, + { 0x19, "IPU2" }, + { 0x1A, "SDMA" }, + { 0x1B, "CDMA" }, + { 0x1C, "TC1_EDMA" }, + { 0x1D, "TC2_EDMA" }, + { 0x20, "DSS" }, + { 0x21, "MMU1" }, + { 0x22, "PCIE1" }, + { 0x23, "MMU2" }, + { 0x24, "VIP1" }, + { 0x25, "VIP2" }, + { 0x26, "VIP3" }, + { 0x27, "VPE" }, + { 0x28, "GPU_P1" }, + { 0x29, "BB2D" }, + { 0x29, "GPU_P2" }, + { 0x2B, "GMAC_SW" }, + { 0x2C, "USB3" }, + { 0x2D, "USB2_SS" }, + { 0x2E, "USB2_ULPI_SS1" }, + { 0x2F, "USB2_ULPI_SS2" }, + { 0x30, "CSI2_1" }, + { 0x31, "CSI2_2" }, + { 0x33, "SATA" }, + { 0x34, "EVE1_P2" }, + { 0x35, "EVE2_P2" }, + { 0x36, "EVE3_P2" }, + { 0x37, "EVE4_P2" } }; -#endif + +static struct l3_flagmux_data *dra_l3_flagmux[] = { + &dra_l3_flagmux_clk1, + &dra_l3_flagmux_clk2, + &dra_l3_flagmux_clk3, +}; + +static const struct omap_l3 dra_l3_data = { + .l3_base = { [1] = L3_BASE_IS_SUBMODULE }, + .l3_flagmux = dra_l3_flagmux, + .num_modules = ARRAY_SIZE(dra_l3_flagmux), + .l3_masters = dra_l3_masters, + .num_masters = ARRAY_SIZE(dra_l3_masters), + /* The 6 MSBs of register field used to distinguish initiator */ + .mst_addr_mask = 0xFC, +}; + +/* AM4372 data */ +static struct l3_target_data am4372_l3_target_data_200f[] = { + {0xf00, "EMIF",}, + {0x1200, "DES",}, + {0x400, "OCMCRAM",}, + {0x700, "TPTC0",}, + {0x800, "TPTC1",}, + {0x900, "TPTC2"}, + {0xb00, "TPCC",}, + {0xd00, "DEBUGSS",}, + {0xdead, L3_TARGET_NOT_SUPPORTED,}, + {0x200, "SHA",}, + {0xc00, "SGX530",}, + {0x500, "AES0",}, + {0xa00, "L4_FAST",}, + {0x300, "MPUSS_L2_RAM",}, + {0x100, "ICSS",}, +}; + +static struct l3_flagmux_data am4372_l3_flagmux_200f = { + .offset = 0x1000, + .l3_targ = am4372_l3_target_data_200f, + .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_200f), +}; + +static struct l3_target_data am4372_l3_target_data_100s[] = { + {0x100, "L4_PER_0",}, + {0x200, "L4_PER_1",}, + {0x300, "L4_PER_2",}, + {0x400, "L4_PER_3",}, + {0x800, "McASP0",}, + {0x900, "McASP1",}, + {0xC00, "MMCHS2",}, + {0x700, "GPMC",}, + {0xD00, "L4_FW",}, + {0xdead, L3_TARGET_NOT_SUPPORTED,}, + {0x500, "ADCTSC",}, + {0xE00, "L4_WKUP",}, + {0xA00, "MAG_CARD",}, +}; + +static struct l3_flagmux_data am4372_l3_flagmux_100s = { + .offset = 0x600, + .l3_targ = am4372_l3_target_data_100s, + .num_targ_data = ARRAY_SIZE(am4372_l3_target_data_100s), +}; + +static struct l3_masters_data am4372_l3_masters[] = { + { 0x0, "M1 (128-bit)"}, + { 0x1, "M2 (64-bit)"}, + { 0x4, "DAP"}, + { 0x5, "P1500"}, + { 0xC, "ICSS0"}, + { 0xD, "ICSS1"}, + { 0x14, "Wakeup Processor"}, + { 0x18, "TPTC0 Read"}, + { 0x19, "TPTC0 Write"}, + { 0x1A, "TPTC1 Read"}, + { 0x1B, "TPTC1 Write"}, + { 0x1C, "TPTC2 Read"}, + { 0x1D, "TPTC2 Write"}, + { 0x20, "SGX530"}, + { 0x21, "OCP WP Traffic Probe"}, + { 0x22, "OCP WP DMA Profiling"}, + { 0x23, "OCP WP Event Trace"}, + { 0x25, "DSS"}, + { 0x28, "Crypto DMA RD"}, + { 0x29, "Crypto DMA WR"}, + { 0x2C, "VPFE0"}, + { 0x2D, "VPFE1"}, + { 0x30, "GEMAC"}, + { 0x34, "USB0 RD"}, + { 0x35, "USB0 WR"}, + { 0x36, "USB1 RD"}, + { 0x37, "USB1 WR"}, +}; + +static struct l3_flagmux_data *am4372_l3_flagmux[] = { + &am4372_l3_flagmux_200f, + &am4372_l3_flagmux_100s, +}; + +static const struct omap_l3 am4372_l3_data = { + .l3_flagmux = am4372_l3_flagmux, + .num_modules = ARRAY_SIZE(am4372_l3_flagmux), + .l3_masters = am4372_l3_masters, + .num_masters = ARRAY_SIZE(am4372_l3_masters), + /* All 6 bits of register field used to distinguish initiator */ + .mst_addr_mask = 0x3F, +}; + +#endif /* __OMAP_L3_NOC_H */ diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c new file mode 100644 index 00000000000..a64763b6b5f --- /dev/null +++ b/drivers/bus/vexpress-config.c @@ -0,0 +1,202 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/vexpress.h> + + +struct vexpress_config_bridge { + struct vexpress_config_bridge_ops *ops; + void *context; +}; + + +static DEFINE_MUTEX(vexpress_config_mutex); +static struct class *vexpress_config_class; +static u32 vexpress_config_site_master = VEXPRESS_SITE_MASTER; + + +void vexpress_config_set_master(u32 site) +{ + vexpress_config_site_master = site; +} + +u32 vexpress_config_get_master(void) +{ + return vexpress_config_site_master; +} + +void vexpress_config_lock(void *arg) +{ + mutex_lock(&vexpress_config_mutex); +} + +void vexpress_config_unlock(void *arg) +{ + mutex_unlock(&vexpress_config_mutex); +} + + +static void vexpress_config_find_prop(struct device_node *node, + const char *name, u32 *val) +{ + /* Default value */ + *val = 0; + + of_node_get(node); + while (node) { + if (of_property_read_u32(node, name, val) == 0) { + of_node_put(node); + return; + } + node = of_get_next_parent(node); + } +} + +int vexpress_config_get_topo(struct device_node *node, u32 *site, + u32 *position, u32 *dcc) +{ + vexpress_config_find_prop(node, "arm,vexpress,site", site); + if (*site == VEXPRESS_SITE_MASTER) + *site = vexpress_config_site_master; + if (WARN_ON(vexpress_config_site_master == VEXPRESS_SITE_MASTER)) + return -EINVAL; + vexpress_config_find_prop(node, "arm,vexpress,position", position); + vexpress_config_find_prop(node, "arm,vexpress,dcc", dcc); + + return 0; +} + + +static void vexpress_config_devres_release(struct device *dev, void *res) +{ + struct vexpress_config_bridge *bridge = dev_get_drvdata(dev->parent); + struct regmap *regmap = res; + + bridge->ops->regmap_exit(regmap, bridge->context); +} + +struct regmap *devm_regmap_init_vexpress_config(struct device *dev) +{ + struct vexpress_config_bridge *bridge; + struct regmap *regmap; + struct regmap **res; + + if (WARN_ON(dev->parent->class != vexpress_config_class)) + return ERR_PTR(-ENODEV); + + bridge = dev_get_drvdata(dev->parent); + if (WARN_ON(!bridge)) + return ERR_PTR(-EINVAL); + + res = devres_alloc(vexpress_config_devres_release, sizeof(*res), + GFP_KERNEL); + if (!res) + return ERR_PTR(-ENOMEM); + + regmap = bridge->ops->regmap_init(dev, bridge->context); + if (IS_ERR(regmap)) { + devres_free(res); + return regmap; + } + + *res = regmap; + devres_add(dev, res); + + return regmap; +} +EXPORT_SYMBOL_GPL(devm_regmap_init_vexpress_config); + +struct device *vexpress_config_bridge_register(struct device *parent, + struct vexpress_config_bridge_ops *ops, void *context) +{ + struct device *dev; + struct vexpress_config_bridge *bridge; + + if (!vexpress_config_class) { + vexpress_config_class = class_create(THIS_MODULE, + "vexpress-config"); + if (IS_ERR(vexpress_config_class)) + return (void *)vexpress_config_class; + } + + dev = device_create(vexpress_config_class, parent, 0, + NULL, "%s.bridge", dev_name(parent)); + + if (IS_ERR(dev)) + return dev; + + bridge = devm_kmalloc(dev, sizeof(*bridge), GFP_KERNEL); + if (!bridge) { + put_device(dev); + device_unregister(dev); + return ERR_PTR(-ENOMEM); + } + bridge->ops = ops; + bridge->context = context; + + dev_set_drvdata(dev, bridge); + + dev_dbg(parent, "Registered bridge '%s', parent node %p\n", + dev_name(dev), parent->of_node); + + return dev; +} + + +static int vexpress_config_node_match(struct device *dev, const void *data) +{ + const struct device_node *node = data; + + dev_dbg(dev, "Parent node %p, looking for %p\n", + dev->parent->of_node, node); + + return dev->parent->of_node == node; +} + +static int vexpress_config_populate(struct device_node *node) +{ + struct device_node *bridge; + struct device *parent; + + bridge = of_parse_phandle(node, "arm,vexpress,config-bridge", 0); + if (!bridge) + return -EINVAL; + + parent = class_find_device(vexpress_config_class, NULL, bridge, + vexpress_config_node_match); + if (WARN_ON(!parent)) + return -ENODEV; + + return of_platform_populate(node, NULL, NULL, parent); +} + +static int __init vexpress_config_init(void) +{ + int err = 0; + struct device_node *node; + + /* Need the config devices early, before the "normal" devices... */ + for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") { + err = vexpress_config_populate(node); + if (err) + break; + } + + return err; +} +postcore_initcall(vexpress_config_init); + diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 8a3aff724d9..49ac5662585 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -312,36 +312,24 @@ static const char *mrw_format_status[] = { static const char *mrw_address_space[] = { "DMA", "GAA" }; -#if (ERRLOGMASK!=CD_NOTHING) -#define cdinfo(type, fmt, args...) \ +#if (ERRLOGMASK != CD_NOTHING) +#define cd_dbg(type, fmt, ...) \ do { \ if ((ERRLOGMASK & type) || debug == 1) \ - pr_info(fmt, ##args); \ + pr_debug(fmt, ##__VA_ARGS__); \ } while (0) #else -#define cdinfo(type, fmt, args...) \ +#define cd_dbg(type, fmt, ...) \ do { \ if (0 && (ERRLOGMASK & type) || debug == 1) \ - pr_info(fmt, ##args); \ + pr_debug(fmt, ##__VA_ARGS__); \ } while (0) #endif -/* These are used to simplify getting data in from and back to user land */ -#define IOCTL_IN(arg, type, in) \ - if (copy_from_user(&(in), (type __user *) (arg), sizeof (in))) \ - return -EFAULT; - -#define IOCTL_OUT(arg, type, out) \ - if (copy_to_user((type __user *) (arg), &(out), sizeof (out))) \ - return -EFAULT; - /* The (cdo->capability & ~cdi->mask & CDC_XXX) construct was used in a lot of places. This macro makes the code more clear. */ #define CDROM_CAN(type) (cdi->ops->capability & ~cdi->mask & (type)) -/* used in the audio ioctls */ -#define CHECKAUDIO if ((ret=check_for_audio_disc(cdi, cdo))) return ret - /* * Another popular OS uses 7 seconds as the hard timeout for default * commands, so it is a good choice for us as well. @@ -349,21 +337,6 @@ do { \ #define CDROM_DEF_TIMEOUT (7 * HZ) /* Not-exported routines. */ -static int open_for_data(struct cdrom_device_info * cdi); -static int check_for_audio_disc(struct cdrom_device_info * cdi, - struct cdrom_device_ops * cdo); -static void sanitize_format(union cdrom_addr *addr, - u_char * curr, u_char requested); -static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, - unsigned long arg); - -int cdrom_get_last_written(struct cdrom_device_info *, long *); -static int cdrom_get_next_writable(struct cdrom_device_info *, long *); -static void cdrom_count_tracks(struct cdrom_device_info *, tracktype*); - -static int cdrom_mrw_exit(struct cdrom_device_info *cdi); - -static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di); static void cdrom_sysctl_register(void); @@ -382,113 +355,65 @@ static int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, return -EIO; } -/* This macro makes sure we don't have to check on cdrom_device_ops - * existence in the run-time routines below. Change_capability is a - * hack to have the capability flags defined const, while we can still - * change it here without gcc complaining at every line. - */ -#define ENSURE(call, bits) if (cdo->call == NULL) *change_capability &= ~(bits) - -int register_cdrom(struct cdrom_device_info *cdi) -{ - static char banner_printed; - struct cdrom_device_ops *cdo = cdi->ops; - int *change_capability = (int *)&cdo->capability; /* hack */ - - cdinfo(CD_OPEN, "entering register_cdrom\n"); - - if (cdo->open == NULL || cdo->release == NULL) - return -EINVAL; - if (!banner_printed) { - pr_info("Uniform CD-ROM driver " REVISION "\n"); - banner_printed = 1; - cdrom_sysctl_register(); - } - - ENSURE(drive_status, CDC_DRIVE_STATUS ); - if (cdo->check_events == NULL && cdo->media_changed == NULL) - *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC); - ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); - ENSURE(lock_door, CDC_LOCK); - ENSURE(select_speed, CDC_SELECT_SPEED); - ENSURE(get_last_session, CDC_MULTI_SESSION); - ENSURE(get_mcn, CDC_MCN); - ENSURE(reset, CDC_RESET); - ENSURE(generic_packet, CDC_GENERIC_PACKET); - cdi->mc_flags = 0; - cdo->n_minors = 0; - cdi->options = CDO_USE_FFLAGS; - - if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY)) - cdi->options |= (int) CDO_AUTO_CLOSE; - if (autoeject==1 && CDROM_CAN(CDC_OPEN_TRAY)) - cdi->options |= (int) CDO_AUTO_EJECT; - if (lockdoor==1) - cdi->options |= (int) CDO_LOCK; - if (check_media_type==1) - cdi->options |= (int) CDO_CHECK_TYPE; - - if (CDROM_CAN(CDC_MRW_W)) - cdi->exit = cdrom_mrw_exit; - - if (cdi->disk) - cdi->cdda_method = CDDA_BPC_FULL; - else - cdi->cdda_method = CDDA_OLD; - - if (!cdo->generic_packet) - cdo->generic_packet = cdrom_dummy_generic_packet; - - cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); - mutex_lock(&cdrom_mutex); - list_add(&cdi->list, &cdrom_list); - mutex_unlock(&cdrom_mutex); - return 0; -} -#undef ENSURE - -void unregister_cdrom(struct cdrom_device_info *cdi) +static int cdrom_flush_cache(struct cdrom_device_info *cdi) { - cdinfo(CD_OPEN, "entering unregister_cdrom\n"); + struct packet_command cgc; - mutex_lock(&cdrom_mutex); - list_del(&cdi->list); - mutex_unlock(&cdrom_mutex); + init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); + cgc.cmd[0] = GPCMD_FLUSH_CACHE; - if (cdi->exit) - cdi->exit(cdi); + cgc.timeout = 5 * 60 * HZ; - cdi->ops->n_minors--; - cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); + return cdi->ops->generic_packet(cdi, &cgc); } -int cdrom_get_media_event(struct cdrom_device_info *cdi, - struct media_event_desc *med) +/* requires CD R/RW */ +static int cdrom_get_disc_info(struct cdrom_device_info *cdi, + disc_information *di) { + struct cdrom_device_ops *cdo = cdi->ops; struct packet_command cgc; - unsigned char buffer[8]; - struct event_header *eh = (struct event_header *) buffer; + int ret, buflen; - init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); - cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION; - cgc.cmd[1] = 1; /* IMMED */ - cgc.cmd[4] = 1 << 4; /* media event */ - cgc.cmd[8] = sizeof(buffer); + /* set up command and get the disc info */ + init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); + cgc.cmd[0] = GPCMD_READ_DISC_INFO; + cgc.cmd[8] = cgc.buflen = 2; cgc.quiet = 1; - if (cdi->ops->generic_packet(cdi, &cgc)) - return 1; + ret = cdo->generic_packet(cdi, &cgc); + if (ret) + return ret; - if (be16_to_cpu(eh->data_len) < sizeof(*med)) - return 1; + /* not all drives have the same disc_info length, so requeue + * packet with the length the drive tells us it can supply + */ + buflen = be16_to_cpu(di->disc_information_length) + + sizeof(di->disc_information_length); - if (eh->nea || eh->notification_class != 0x4) - return 1; + if (buflen > sizeof(disc_information)) + buflen = sizeof(disc_information); - memcpy(med, &buffer[sizeof(*eh)], sizeof(*med)); - return 0; + cgc.cmd[8] = cgc.buflen = buflen; + ret = cdo->generic_packet(cdi, &cgc); + if (ret) + return ret; + + /* return actual fill size */ + return buflen; } +/* This macro makes sure we don't have to check on cdrom_device_ops + * existence in the run-time routines below. Change_capability is a + * hack to have the capability flags defined const, while we can still + * change it here without gcc complaining at every line. + */ +#define ENSURE(call, bits) \ +do { \ + if (cdo->call == NULL) \ + *change_capability &= ~(bits); \ +} while (0) + /* * the first prototypes used 0x2c as the page code for the mrw mode page, * subsequently this was changed to 0x03. probe the one used by this drive @@ -605,18 +530,6 @@ static int cdrom_mrw_bgformat_susp(struct cdrom_device_info *cdi, int immed) return cdi->ops->generic_packet(cdi, &cgc); } -static int cdrom_flush_cache(struct cdrom_device_info *cdi) -{ - struct packet_command cgc; - - init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); - cgc.cmd[0] = GPCMD_FLUSH_CACHE; - - cgc.timeout = 5 * 60 * HZ; - - return cdi->ops->generic_packet(cdi, &cgc); -} - static int cdrom_mrw_exit(struct cdrom_device_info *cdi) { disc_information di; @@ -650,17 +563,19 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space) cgc.buffer = buffer; cgc.buflen = sizeof(buffer); - if ((ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0))) + ret = cdrom_mode_sense(cdi, &cgc, cdi->mrw_mode_page, 0); + if (ret) return ret; - mph = (struct mode_page_header *) buffer; + mph = (struct mode_page_header *)buffer; offset = be16_to_cpu(mph->desc_length); size = be16_to_cpu(mph->mode_data_length) + 2; buffer[offset + 3] = space; cgc.buflen = size; - if ((ret = cdrom_mode_select(cdi, &cgc))) + ret = cdrom_mode_select(cdi, &cgc); + if (ret) return ret; pr_info("%s: mrw address space %s selected\n", @@ -668,6 +583,106 @@ static int cdrom_mrw_set_lba_space(struct cdrom_device_info *cdi, int space) return 0; } +int register_cdrom(struct cdrom_device_info *cdi) +{ + static char banner_printed; + struct cdrom_device_ops *cdo = cdi->ops; + int *change_capability = (int *)&cdo->capability; /* hack */ + + cd_dbg(CD_OPEN, "entering register_cdrom\n"); + + if (cdo->open == NULL || cdo->release == NULL) + return -EINVAL; + if (!banner_printed) { + pr_info("Uniform CD-ROM driver " REVISION "\n"); + banner_printed = 1; + cdrom_sysctl_register(); + } + + ENSURE(drive_status, CDC_DRIVE_STATUS); + if (cdo->check_events == NULL && cdo->media_changed == NULL) + *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC); + ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY); + ENSURE(lock_door, CDC_LOCK); + ENSURE(select_speed, CDC_SELECT_SPEED); + ENSURE(get_last_session, CDC_MULTI_SESSION); + ENSURE(get_mcn, CDC_MCN); + ENSURE(reset, CDC_RESET); + ENSURE(generic_packet, CDC_GENERIC_PACKET); + cdi->mc_flags = 0; + cdo->n_minors = 0; + cdi->options = CDO_USE_FFLAGS; + + if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY)) + cdi->options |= (int) CDO_AUTO_CLOSE; + if (autoeject == 1 && CDROM_CAN(CDC_OPEN_TRAY)) + cdi->options |= (int) CDO_AUTO_EJECT; + if (lockdoor == 1) + cdi->options |= (int) CDO_LOCK; + if (check_media_type == 1) + cdi->options |= (int) CDO_CHECK_TYPE; + + if (CDROM_CAN(CDC_MRW_W)) + cdi->exit = cdrom_mrw_exit; + + if (cdi->disk) + cdi->cdda_method = CDDA_BPC_FULL; + else + cdi->cdda_method = CDDA_OLD; + + if (!cdo->generic_packet) + cdo->generic_packet = cdrom_dummy_generic_packet; + + cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name); + mutex_lock(&cdrom_mutex); + list_add(&cdi->list, &cdrom_list); + mutex_unlock(&cdrom_mutex); + return 0; +} +#undef ENSURE + +void unregister_cdrom(struct cdrom_device_info *cdi) +{ + cd_dbg(CD_OPEN, "entering unregister_cdrom\n"); + + mutex_lock(&cdrom_mutex); + list_del(&cdi->list); + mutex_unlock(&cdrom_mutex); + + if (cdi->exit) + cdi->exit(cdi); + + cdi->ops->n_minors--; + cd_dbg(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name); +} + +int cdrom_get_media_event(struct cdrom_device_info *cdi, + struct media_event_desc *med) +{ + struct packet_command cgc; + unsigned char buffer[8]; + struct event_header *eh = (struct event_header *)buffer; + + init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ); + cgc.cmd[0] = GPCMD_GET_EVENT_STATUS_NOTIFICATION; + cgc.cmd[1] = 1; /* IMMED */ + cgc.cmd[4] = 1 << 4; /* media event */ + cgc.cmd[8] = sizeof(buffer); + cgc.quiet = 1; + + if (cdi->ops->generic_packet(cdi, &cgc)) + return 1; + + if (be16_to_cpu(eh->data_len) < sizeof(*med)) + return 1; + + if (eh->nea || eh->notification_class != 0x4) + return 1; + + memcpy(med, &buffer[sizeof(*eh)], sizeof(*med)); + return 0; +} + static int cdrom_get_random_writable(struct cdrom_device_info *cdi, struct rwrt_feature_desc *rfd) { @@ -839,7 +854,7 @@ static int cdrom_ram_open_write(struct cdrom_device_info *cdi) else if (CDF_RWRT == be16_to_cpu(rfd.feature_code)) ret = !rfd.curr; - cdinfo(CD_OPEN, "can open for random write\n"); + cd_dbg(CD_OPEN, "can open for random write\n"); return ret; } @@ -928,12 +943,12 @@ static void cdrom_dvd_rw_close_write(struct cdrom_device_info *cdi) struct packet_command cgc; if (cdi->mmc3_profile != 0x1a) { - cdinfo(CD_CLOSE, "%s: No DVD+RW\n", cdi->name); + cd_dbg(CD_CLOSE, "%s: No DVD+RW\n", cdi->name); return; } if (!cdi->media_written) { - cdinfo(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name); + cd_dbg(CD_CLOSE, "%s: DVD+RW media clean\n", cdi->name); return; } @@ -969,82 +984,74 @@ static int cdrom_close_write(struct cdrom_device_info *cdi) #endif } -/* We use the open-option O_NONBLOCK to indicate that the - * purpose of opening is only for subsequent ioctl() calls; no device - * integrity checks are performed. - * - * We hope that all cd-player programs will adopt this convention. It - * is in their own interest: device control becomes a lot easier - * this way. - */ -int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t mode) +/* badly broken, I know. Is due for a fixup anytime. */ +static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype *tracks) { - int ret; - - cdinfo(CD_OPEN, "entering cdrom_open\n"); - - /* open is event synchronization point, check events first */ - check_disk_change(bdev); - - /* if this was a O_NONBLOCK open and we should honor the flags, - * do a quick open without drive/disc integrity checks. */ - cdi->use_count++; - if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) { - ret = cdi->ops->open(cdi, 1); - } else { - ret = open_for_data(cdi); - if (ret) - goto err; - cdrom_mmc3_profile(cdi); - if (mode & FMODE_WRITE) { - ret = -EROFS; - if (cdrom_open_write(cdi)) - goto err_release; - if (!CDROM_CAN(CDC_RAM)) - goto err_release; - ret = 0; - cdi->media_written = 0; - } + struct cdrom_tochdr header; + struct cdrom_tocentry entry; + int ret, i; + tracks->data = 0; + tracks->audio = 0; + tracks->cdi = 0; + tracks->xa = 0; + tracks->error = 0; + cd_dbg(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n"); + /* Grab the TOC header so we can see how many tracks there are */ + ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header); + if (ret) { + if (ret == -ENOMEDIUM) + tracks->error = CDS_NO_DISC; + else + tracks->error = CDS_NO_INFO; + return; } - - if (ret) - goto err; - - cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", - cdi->name, cdi->use_count); - return 0; -err_release: - if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { - cdi->ops->lock_door(cdi, 0); - cdinfo(CD_OPEN, "door unlocked.\n"); + /* check what type of tracks are on this disc */ + entry.cdte_format = CDROM_MSF; + for (i = header.cdth_trk0; i <= header.cdth_trk1; i++) { + entry.cdte_track = i; + if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) { + tracks->error = CDS_NO_INFO; + return; + } + if (entry.cdte_ctrl & CDROM_DATA_TRACK) { + if (entry.cdte_format == 0x10) + tracks->cdi++; + else if (entry.cdte_format == 0x20) + tracks->xa++; + else + tracks->data++; + } else { + tracks->audio++; + } + cd_dbg(CD_COUNT_TRACKS, "track %d: format=%d, ctrl=%d\n", + i, entry.cdte_format, entry.cdte_ctrl); } - cdi->ops->release(cdi); -err: - cdi->use_count--; - return ret; + cd_dbg(CD_COUNT_TRACKS, "disc has %d tracks: %d=audio %d=data %d=Cd-I %d=XA\n", + header.cdth_trk1, tracks->audio, tracks->data, + tracks->cdi, tracks->xa); } static -int open_for_data(struct cdrom_device_info * cdi) +int open_for_data(struct cdrom_device_info *cdi) { int ret; struct cdrom_device_ops *cdo = cdi->ops; tracktype tracks; - cdinfo(CD_OPEN, "entering open_for_data\n"); + cd_dbg(CD_OPEN, "entering open_for_data\n"); /* Check if the driver can report drive status. If it can, we can do clever things. If it can't, well, we at least tried! */ if (cdo->drive_status != NULL) { ret = cdo->drive_status(cdi, CDSL_CURRENT); - cdinfo(CD_OPEN, "drive_status=%d\n", ret); + cd_dbg(CD_OPEN, "drive_status=%d\n", ret); if (ret == CDS_TRAY_OPEN) { - cdinfo(CD_OPEN, "the tray is open...\n"); + cd_dbg(CD_OPEN, "the tray is open...\n"); /* can/may i close it? */ if (CDROM_CAN(CDC_CLOSE_TRAY) && cdi->options & CDO_AUTO_CLOSE) { - cdinfo(CD_OPEN, "trying to close the tray.\n"); + cd_dbg(CD_OPEN, "trying to close the tray\n"); ret=cdo->tray_move(cdi,0); if (ret) { - cdinfo(CD_OPEN, "bummer. tried to close the tray but failed.\n"); + cd_dbg(CD_OPEN, "bummer. tried to close the tray but failed.\n"); /* Ignore the error from the low level driver. We don't care why it couldn't close the tray. We only care @@ -1054,19 +1061,19 @@ int open_for_data(struct cdrom_device_info * cdi) goto clean_up_and_return; } } else { - cdinfo(CD_OPEN, "bummer. this drive can't close the tray.\n"); + cd_dbg(CD_OPEN, "bummer. this drive can't close the tray.\n"); ret=-ENOMEDIUM; goto clean_up_and_return; } /* Ok, the door should be closed now.. Check again */ ret = cdo->drive_status(cdi, CDSL_CURRENT); if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) { - cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n"); - cdinfo(CD_OPEN, "tray might not contain a medium.\n"); + cd_dbg(CD_OPEN, "bummer. the tray is still not closed.\n"); + cd_dbg(CD_OPEN, "tray might not contain a medium\n"); ret=-ENOMEDIUM; goto clean_up_and_return; } - cdinfo(CD_OPEN, "the tray is now closed.\n"); + cd_dbg(CD_OPEN, "the tray is now closed\n"); } /* the door should be closed now, check for the disc */ ret = cdo->drive_status(cdi, CDSL_CURRENT); @@ -1077,7 +1084,7 @@ int open_for_data(struct cdrom_device_info * cdi) } cdrom_count_tracks(cdi, &tracks); if (tracks.error == CDS_NO_DISC) { - cdinfo(CD_OPEN, "bummer. no disc.\n"); + cd_dbg(CD_OPEN, "bummer. no disc.\n"); ret=-ENOMEDIUM; goto clean_up_and_return; } @@ -1087,34 +1094,34 @@ int open_for_data(struct cdrom_device_info * cdi) if (cdi->options & CDO_CHECK_TYPE) { /* give people a warning shot, now that CDO_CHECK_TYPE is the default case! */ - cdinfo(CD_OPEN, "bummer. wrong media type.\n"); - cdinfo(CD_WARNING, "pid %d must open device O_NONBLOCK!\n", - (unsigned int)task_pid_nr(current)); + cd_dbg(CD_OPEN, "bummer. wrong media type.\n"); + cd_dbg(CD_WARNING, "pid %d must open device O_NONBLOCK!\n", + (unsigned int)task_pid_nr(current)); ret=-EMEDIUMTYPE; goto clean_up_and_return; } else { - cdinfo(CD_OPEN, "wrong media type, but CDO_CHECK_TYPE not set.\n"); + cd_dbg(CD_OPEN, "wrong media type, but CDO_CHECK_TYPE not set\n"); } } - cdinfo(CD_OPEN, "all seems well, opening the device.\n"); + cd_dbg(CD_OPEN, "all seems well, opening the devicen"); /* all seems well, we can open the device */ ret = cdo->open(cdi, 0); /* open for data */ - cdinfo(CD_OPEN, "opening the device gave me %d.\n", ret); + cd_dbg(CD_OPEN, "opening the device gave me %d\n", ret); /* After all this careful checking, we shouldn't have problems opening the device, but we don't want the device locked if this somehow fails... */ if (ret) { - cdinfo(CD_OPEN, "open device failed.\n"); + cd_dbg(CD_OPEN, "open device failed\n"); goto clean_up_and_return; } if (CDROM_CAN(CDC_LOCK) && (cdi->options & CDO_LOCK)) { cdo->lock_door(cdi, 1); - cdinfo(CD_OPEN, "door locked.\n"); + cd_dbg(CD_OPEN, "door locked\n"); } - cdinfo(CD_OPEN, "device opened successfully.\n"); + cd_dbg(CD_OPEN, "device opened successfully\n"); return ret; /* Something failed. Try to unlock the drive, because some drivers @@ -1123,14 +1130,70 @@ int open_for_data(struct cdrom_device_info * cdi) This ensures that the drive gets unlocked after a mount fails. This is a goto to avoid bloating the driver with redundant code. */ clean_up_and_return: - cdinfo(CD_OPEN, "open failed.\n"); + cd_dbg(CD_OPEN, "open failed\n"); if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { cdo->lock_door(cdi, 0); - cdinfo(CD_OPEN, "door unlocked.\n"); + cd_dbg(CD_OPEN, "door unlocked\n"); } return ret; } +/* We use the open-option O_NONBLOCK to indicate that the + * purpose of opening is only for subsequent ioctl() calls; no device + * integrity checks are performed. + * + * We hope that all cd-player programs will adopt this convention. It + * is in their own interest: device control becomes a lot easier + * this way. + */ +int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, + fmode_t mode) +{ + int ret; + + cd_dbg(CD_OPEN, "entering cdrom_open\n"); + + /* open is event synchronization point, check events first */ + check_disk_change(bdev); + + /* if this was a O_NONBLOCK open and we should honor the flags, + * do a quick open without drive/disc integrity checks. */ + cdi->use_count++; + if ((mode & FMODE_NDELAY) && (cdi->options & CDO_USE_FFLAGS)) { + ret = cdi->ops->open(cdi, 1); + } else { + ret = open_for_data(cdi); + if (ret) + goto err; + cdrom_mmc3_profile(cdi); + if (mode & FMODE_WRITE) { + ret = -EROFS; + if (cdrom_open_write(cdi)) + goto err_release; + if (!CDROM_CAN(CDC_RAM)) + goto err_release; + ret = 0; + cdi->media_written = 0; + } + } + + if (ret) + goto err; + + cd_dbg(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", + cdi->name, cdi->use_count); + return 0; +err_release: + if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { + cdi->ops->lock_door(cdi, 0); + cd_dbg(CD_OPEN, "door unlocked\n"); + } + cdi->ops->release(cdi); +err: + cdi->use_count--; + return ret; +} + /* This code is similar to that in open_for_data. The routine is called whenever an audio play operation is requested. */ @@ -1139,21 +1202,21 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi, { int ret; tracktype tracks; - cdinfo(CD_OPEN, "entering check_for_audio_disc\n"); + cd_dbg(CD_OPEN, "entering check_for_audio_disc\n"); if (!(cdi->options & CDO_CHECK_TYPE)) return 0; if (cdo->drive_status != NULL) { ret = cdo->drive_status(cdi, CDSL_CURRENT); - cdinfo(CD_OPEN, "drive_status=%d\n", ret); + cd_dbg(CD_OPEN, "drive_status=%d\n", ret); if (ret == CDS_TRAY_OPEN) { - cdinfo(CD_OPEN, "the tray is open...\n"); + cd_dbg(CD_OPEN, "the tray is open...\n"); /* can/may i close it? */ if (CDROM_CAN(CDC_CLOSE_TRAY) && cdi->options & CDO_AUTO_CLOSE) { - cdinfo(CD_OPEN, "trying to close the tray.\n"); + cd_dbg(CD_OPEN, "trying to close the tray\n"); ret=cdo->tray_move(cdi,0); if (ret) { - cdinfo(CD_OPEN, "bummer. tried to close tray but failed.\n"); + cd_dbg(CD_OPEN, "bummer. tried to close tray but failed.\n"); /* Ignore the error from the low level driver. We don't care why it couldn't close the tray. We only care @@ -1162,20 +1225,20 @@ static int check_for_audio_disc(struct cdrom_device_info * cdi, return -ENOMEDIUM; } } else { - cdinfo(CD_OPEN, "bummer. this driver can't close the tray.\n"); + cd_dbg(CD_OPEN, "bummer. this driver can't close the tray.\n"); return -ENOMEDIUM; } /* Ok, the door should be closed now.. Check again */ ret = cdo->drive_status(cdi, CDSL_CURRENT); if ((ret == CDS_NO_DISC) || (ret==CDS_TRAY_OPEN)) { - cdinfo(CD_OPEN, "bummer. the tray is still not closed.\n"); + cd_dbg(CD_OPEN, "bummer. the tray is still not closed.\n"); return -ENOMEDIUM; } if (ret!=CDS_DISC_OK) { - cdinfo(CD_OPEN, "bummer. disc isn't ready.\n"); + cd_dbg(CD_OPEN, "bummer. disc isn't ready.\n"); return -EIO; } - cdinfo(CD_OPEN, "the tray is now closed.\n"); + cd_dbg(CD_OPEN, "the tray is now closed\n"); } } cdrom_count_tracks(cdi, &tracks); @@ -1193,17 +1256,18 @@ void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode) struct cdrom_device_ops *cdo = cdi->ops; int opened_for_data; - cdinfo(CD_CLOSE, "entering cdrom_release\n"); + cd_dbg(CD_CLOSE, "entering cdrom_release\n"); if (cdi->use_count > 0) cdi->use_count--; if (cdi->use_count == 0) { - cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name); + cd_dbg(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", + cdi->name); cdrom_dvd_rw_close_write(cdi); if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) { - cdinfo(CD_CLOSE, "Unlocking door!\n"); + cd_dbg(CD_CLOSE, "Unlocking door!\n"); cdo->lock_door(cdi, 0); } } @@ -1262,7 +1326,7 @@ static int cdrom_slot_status(struct cdrom_device_info *cdi, int slot) struct cdrom_changer_info *info; int ret; - cdinfo(CD_CHANGER, "entering cdrom_slot_status()\n"); + cd_dbg(CD_CHANGER, "entering cdrom_slot_status()\n"); if (cdi->sanyo_slot) return CDS_NO_INFO; @@ -1292,7 +1356,7 @@ int cdrom_number_of_slots(struct cdrom_device_info *cdi) int nslots = 1; struct cdrom_changer_info *info; - cdinfo(CD_CHANGER, "entering cdrom_number_of_slots()\n"); + cd_dbg(CD_CHANGER, "entering cdrom_number_of_slots()\n"); /* cdrom_read_mech_status requires a valid value for capacity: */ cdi->capacity = 0; @@ -1313,7 +1377,7 @@ static int cdrom_load_unload(struct cdrom_device_info *cdi, int slot) { struct packet_command cgc; - cdinfo(CD_CHANGER, "entering cdrom_load_unload()\n"); + cd_dbg(CD_CHANGER, "entering cdrom_load_unload()\n"); if (cdi->sanyo_slot && slot < 0) return 0; @@ -1342,7 +1406,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot) int curslot; int ret; - cdinfo(CD_CHANGER, "entering cdrom_select_disc()\n"); + cd_dbg(CD_CHANGER, "entering cdrom_select_disc()\n"); if (!CDROM_CAN(CDC_SELECT_DISC)) return -EDRIVE_CANT_DO_THIS; @@ -1476,51 +1540,6 @@ int cdrom_media_changed(struct cdrom_device_info *cdi) return media_changed(cdi, 0); } -/* badly broken, I know. Is due for a fixup anytime. */ -static void cdrom_count_tracks(struct cdrom_device_info *cdi, tracktype* tracks) -{ - struct cdrom_tochdr header; - struct cdrom_tocentry entry; - int ret, i; - tracks->data=0; - tracks->audio=0; - tracks->cdi=0; - tracks->xa=0; - tracks->error=0; - cdinfo(CD_COUNT_TRACKS, "entering cdrom_count_tracks\n"); - /* Grab the TOC header so we can see how many tracks there are */ - if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCHDR, &header))) { - if (ret == -ENOMEDIUM) - tracks->error = CDS_NO_DISC; - else - tracks->error = CDS_NO_INFO; - return; - } - /* check what type of tracks are on this disc */ - entry.cdte_format = CDROM_MSF; - for (i = header.cdth_trk0; i <= header.cdth_trk1; i++) { - entry.cdte_track = i; - if (cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &entry)) { - tracks->error=CDS_NO_INFO; - return; - } - if (entry.cdte_ctrl & CDROM_DATA_TRACK) { - if (entry.cdte_format == 0x10) - tracks->cdi++; - else if (entry.cdte_format == 0x20) - tracks->xa++; - else - tracks->data++; - } else - tracks->audio++; - cdinfo(CD_COUNT_TRACKS, "track %d: format=%d, ctrl=%d\n", - i, entry.cdte_format, entry.cdte_ctrl); - } - cdinfo(CD_COUNT_TRACKS, "disc has %d tracks: %d=audio %d=data %d=Cd-I %d=XA\n", - header.cdth_trk1, tracks->audio, tracks->data, - tracks->cdi, tracks->xa); -} - /* Requests to the low-level drivers will /always/ be done in the following format convention: @@ -1632,7 +1651,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) switch (ai->type) { /* LU data send */ case DVD_LU_SEND_AGID: - cdinfo(CD_DVD, "entering DVD_LU_SEND_AGID\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_AGID\n"); cgc.quiet = 1; setup_report_key(&cgc, ai->lsa.agid, 0); @@ -1644,7 +1663,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) break; case DVD_LU_SEND_KEY1: - cdinfo(CD_DVD, "entering DVD_LU_SEND_KEY1\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_KEY1\n"); setup_report_key(&cgc, ai->lsk.agid, 2); if ((ret = cdo->generic_packet(cdi, &cgc))) @@ -1655,7 +1674,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) break; case DVD_LU_SEND_CHALLENGE: - cdinfo(CD_DVD, "entering DVD_LU_SEND_CHALLENGE\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_CHALLENGE\n"); setup_report_key(&cgc, ai->lsc.agid, 1); if ((ret = cdo->generic_packet(cdi, &cgc))) @@ -1667,7 +1686,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) /* Post-auth key */ case DVD_LU_SEND_TITLE_KEY: - cdinfo(CD_DVD, "entering DVD_LU_SEND_TITLE_KEY\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_TITLE_KEY\n"); cgc.quiet = 1; setup_report_key(&cgc, ai->lstk.agid, 4); cgc.cmd[5] = ai->lstk.lba; @@ -1686,7 +1705,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) break; case DVD_LU_SEND_ASF: - cdinfo(CD_DVD, "entering DVD_LU_SEND_ASF\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_ASF\n"); setup_report_key(&cgc, ai->lsasf.agid, 5); if ((ret = cdo->generic_packet(cdi, &cgc))) @@ -1697,7 +1716,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) /* LU data receive (LU changes state) */ case DVD_HOST_SEND_CHALLENGE: - cdinfo(CD_DVD, "entering DVD_HOST_SEND_CHALLENGE\n"); + cd_dbg(CD_DVD, "entering DVD_HOST_SEND_CHALLENGE\n"); setup_send_key(&cgc, ai->hsc.agid, 1); buf[1] = 0xe; copy_chal(&buf[4], ai->hsc.chal); @@ -1709,7 +1728,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) break; case DVD_HOST_SEND_KEY2: - cdinfo(CD_DVD, "entering DVD_HOST_SEND_KEY2\n"); + cd_dbg(CD_DVD, "entering DVD_HOST_SEND_KEY2\n"); setup_send_key(&cgc, ai->hsk.agid, 3); buf[1] = 0xa; copy_key(&buf[4], ai->hsk.key); @@ -1724,7 +1743,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) /* Misc */ case DVD_INVALIDATE_AGID: cgc.quiet = 1; - cdinfo(CD_DVD, "entering DVD_INVALIDATE_AGID\n"); + cd_dbg(CD_DVD, "entering DVD_INVALIDATE_AGID\n"); setup_report_key(&cgc, ai->lsa.agid, 0x3f); if ((ret = cdo->generic_packet(cdi, &cgc))) return ret; @@ -1732,7 +1751,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) /* Get region settings */ case DVD_LU_SEND_RPC_STATE: - cdinfo(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n"); + cd_dbg(CD_DVD, "entering DVD_LU_SEND_RPC_STATE\n"); setup_report_key(&cgc, 0, 8); memset(&rpc_state, 0, sizeof(rpc_state_t)); cgc.buffer = (char *) &rpc_state; @@ -1749,7 +1768,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) /* Set region settings */ case DVD_HOST_SEND_RPC_STATE: - cdinfo(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n"); + cd_dbg(CD_DVD, "entering DVD_HOST_SEND_RPC_STATE\n"); setup_send_key(&cgc, 0, 6); buf[1] = 6; buf[4] = ai->hrpcs.pdrc; @@ -1759,7 +1778,7 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai) break; default: - cdinfo(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type); + cd_dbg(CD_WARNING, "Invalid DVD key ioctl (%d)\n", ai->type); return -ENOTTY; } @@ -1891,7 +1910,8 @@ static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s, s->bca.len = buf[0] << 8 | buf[1]; if (s->bca.len < 12 || s->bca.len > 188) { - cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len); + cd_dbg(CD_WARNING, "Received invalid BCA length (%d)\n", + s->bca.len); ret = -EIO; goto out; } @@ -1927,14 +1947,13 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s, s->manufact.len = buf[0] << 8 | buf[1]; if (s->manufact.len < 0) { - cdinfo(CD_WARNING, "Received invalid manufacture info length" - " (%d)\n", s->manufact.len); + cd_dbg(CD_WARNING, "Received invalid manufacture info length (%d)\n", + s->manufact.len); ret = -EIO; } else { if (s->manufact.len > 2048) { - cdinfo(CD_WARNING, "Received invalid manufacture info " - "length (%d): truncating to 2048\n", - s->manufact.len); + cd_dbg(CD_WARNING, "Received invalid manufacture info length (%d): truncating to 2048\n", + s->manufact.len); s->manufact.len = 2048; } memcpy(s->manufact.value, &buf[4], s->manufact.len); @@ -1965,8 +1984,8 @@ static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s, return dvd_read_manufact(cdi, s, cgc); default: - cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n", - s->type); + cd_dbg(CD_WARNING, ": Invalid DVD structure read requested (%d)\n", + s->type); return -EINVAL; } } @@ -2255,7 +2274,7 @@ static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi, u8 requested_format; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMMULTISESSION\n"); if (!(cdi->ops->capability & CDC_MULTI_SESSION)) return -ENOSYS; @@ -2277,13 +2296,13 @@ static int cdrom_ioctl_multisession(struct cdrom_device_info *cdi, if (copy_to_user(argp, &ms_info, sizeof(ms_info))) return -EFAULT; - cdinfo(CD_DO_IOCTL, "CDROMMULTISESSION successful\n"); + cd_dbg(CD_DO_IOCTL, "CDROMMULTISESSION successful\n"); return 0; } static int cdrom_ioctl_eject(struct cdrom_device_info *cdi) { - cdinfo(CD_DO_IOCTL, "entering CDROMEJECT\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMEJECT\n"); if (!CDROM_CAN(CDC_OPEN_TRAY)) return -ENOSYS; @@ -2300,7 +2319,7 @@ static int cdrom_ioctl_eject(struct cdrom_device_info *cdi) static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi) { - cdinfo(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMCLOSETRAY\n"); if (!CDROM_CAN(CDC_CLOSE_TRAY)) return -ENOSYS; @@ -2310,7 +2329,7 @@ static int cdrom_ioctl_closetray(struct cdrom_device_info *cdi) static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMEJECT_SW\n"); if (!CDROM_CAN(CDC_OPEN_TRAY)) return -ENOSYS; @@ -2329,7 +2348,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, struct cdrom_changer_info *info; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_MEDIA_CHANGED\n"); if (!CDROM_CAN(CDC_MEDIA_CHANGED)) return -ENOSYS; @@ -2355,7 +2374,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_SET_OPTIONS\n"); /* * Options need to be in sync with capability. @@ -2383,7 +2402,7 @@ static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi, static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_CLEAR_OPTIONS\n"); cdi->options &= ~(int) arg; return cdi->options; @@ -2392,7 +2411,7 @@ static int cdrom_ioctl_clear_options(struct cdrom_device_info *cdi, static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_SELECT_SPEED\n"); if (!CDROM_CAN(CDC_SELECT_SPEED)) return -ENOSYS; @@ -2402,7 +2421,7 @@ static int cdrom_ioctl_select_speed(struct cdrom_device_info *cdi, static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_SELECT_DISC\n"); if (!CDROM_CAN(CDC_SELECT_DISC)) return -ENOSYS; @@ -2420,14 +2439,14 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi, if (cdi->ops->select_disc) return cdi->ops->select_disc(cdi, arg); - cdinfo(CD_CHANGER, "Using generic cdrom_select_disc()\n"); + cd_dbg(CD_CHANGER, "Using generic cdrom_select_disc()\n"); return cdrom_select_disc(cdi, arg); } static int cdrom_ioctl_reset(struct cdrom_device_info *cdi, struct block_device *bdev) { - cdinfo(CD_DO_IOCTL, "entering CDROM_RESET\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_RESET\n"); if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -2440,7 +2459,7 @@ static int cdrom_ioctl_reset(struct cdrom_device_info *cdi, static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "%socking door.\n", arg ? "L" : "Unl"); + cd_dbg(CD_DO_IOCTL, "%socking door\n", arg ? "L" : "Unl"); if (!CDROM_CAN(CDC_LOCK)) return -EDRIVE_CANT_DO_THIS; @@ -2459,7 +2478,7 @@ static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi, static int cdrom_ioctl_debug(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "%sabling debug.\n", arg ? "En" : "Dis"); + cd_dbg(CD_DO_IOCTL, "%sabling debug\n", arg ? "En" : "Dis"); if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -2469,7 +2488,7 @@ static int cdrom_ioctl_debug(struct cdrom_device_info *cdi, static int cdrom_ioctl_get_capability(struct cdrom_device_info *cdi) { - cdinfo(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_GET_CAPABILITY\n"); return (cdi->ops->capability & ~cdi->mask); } @@ -2485,7 +2504,7 @@ static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn mcn; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_GET_MCN\n"); if (!(cdi->ops->capability & CDC_MCN)) return -ENOSYS; @@ -2495,14 +2514,14 @@ static int cdrom_ioctl_get_mcn(struct cdrom_device_info *cdi, if (copy_to_user(argp, &mcn, sizeof(mcn))) return -EFAULT; - cdinfo(CD_DO_IOCTL, "CDROM_GET_MCN successful\n"); + cd_dbg(CD_DO_IOCTL, "CDROM_GET_MCN successful\n"); return 0; } static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi, unsigned long arg) { - cdinfo(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_DRIVE_STATUS\n"); if (!(cdi->ops->capability & CDC_DRIVE_STATUS)) return -ENOSYS; @@ -2535,7 +2554,7 @@ static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi) { tracktype tracks; - cdinfo(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_DISC_STATUS\n"); cdrom_count_tracks(cdi, &tracks); if (tracks.error) @@ -2557,13 +2576,13 @@ static int cdrom_ioctl_disc_status(struct cdrom_device_info *cdi) return CDS_DATA_1; /* Policy mode off */ - cdinfo(CD_WARNING,"This disc doesn't have any tracks I recognize!\n"); + cd_dbg(CD_WARNING, "This disc doesn't have any tracks I recognize!\n"); return CDS_NO_INFO; } static int cdrom_ioctl_changer_nslots(struct cdrom_device_info *cdi) { - cdinfo(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_CHANGER_NSLOTS\n"); return cdi->capacity; } @@ -2574,7 +2593,7 @@ static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi, u8 requested, back; int ret; - /* cdinfo(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ + /* cd_dbg(CD_DO_IOCTL,"entering CDROMSUBCHNL\n");*/ if (copy_from_user(&q, argp, sizeof(q))) return -EFAULT; @@ -2594,7 +2613,7 @@ static int cdrom_ioctl_get_subchnl(struct cdrom_device_info *cdi, if (copy_to_user(argp, &q, sizeof(q))) return -EFAULT; - /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ + /* cd_dbg(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ return 0; } @@ -2604,7 +2623,7 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi, struct cdrom_tochdr header; int ret; - /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ + /* cd_dbg(CD_DO_IOCTL, "entering CDROMREADTOCHDR\n"); */ if (copy_from_user(&header, argp, sizeof(header))) return -EFAULT; @@ -2615,7 +2634,7 @@ static int cdrom_ioctl_read_tochdr(struct cdrom_device_info *cdi, if (copy_to_user(argp, &header, sizeof(header))) return -EFAULT; - /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */ + /* cd_dbg(CD_DO_IOCTL, "CDROMREADTOCHDR successful\n"); */ return 0; } @@ -2626,7 +2645,7 @@ static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi, u8 requested_format; int ret; - /* cdinfo(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ + /* cd_dbg(CD_DO_IOCTL, "entering CDROMREADTOCENTRY\n"); */ if (copy_from_user(&entry, argp, sizeof(entry))) return -EFAULT; @@ -2643,7 +2662,7 @@ static int cdrom_ioctl_read_tocentry(struct cdrom_device_info *cdi, if (copy_to_user(argp, &entry, sizeof(entry))) return -EFAULT; - /* cdinfo(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */ + /* cd_dbg(CD_DO_IOCTL, "CDROMREADTOCENTRY successful\n"); */ return 0; } @@ -2652,7 +2671,7 @@ static int cdrom_ioctl_play_msf(struct cdrom_device_info *cdi, { struct cdrom_msf msf; - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; @@ -2667,7 +2686,7 @@ static int cdrom_ioctl_play_trkind(struct cdrom_device_info *cdi, struct cdrom_ti ti; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYTRKIND\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; @@ -2684,7 +2703,7 @@ static int cdrom_ioctl_volctrl(struct cdrom_device_info *cdi, { struct cdrom_volctrl volume; - cdinfo(CD_DO_IOCTL, "entering CDROMVOLCTRL\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMVOLCTRL\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; @@ -2699,7 +2718,7 @@ static int cdrom_ioctl_volread(struct cdrom_device_info *cdi, struct cdrom_volctrl volume; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROMVOLREAD\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMVOLREAD\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; @@ -2718,7 +2737,7 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi, { int ret; - cdinfo(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n"); + cd_dbg(CD_DO_IOCTL, "doing audio ioctl (start/stop/pause/resume)\n"); if (!CDROM_CAN(CDC_PLAY_AUDIO)) return -ENOSYS; @@ -2729,103 +2748,6 @@ static int cdrom_ioctl_audioctl(struct cdrom_device_info *cdi, } /* - * Just about every imaginable ioctl is supported in the Uniform layer - * these days. - * ATAPI / SCSI specific code now mainly resides in mmc_ioctl(). - */ -int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, - fmode_t mode, unsigned int cmd, unsigned long arg) -{ - void __user *argp = (void __user *)arg; - int ret; - - /* - * Try the generic SCSI command ioctl's first. - */ - ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); - if (ret != -ENOTTY) - return ret; - - switch (cmd) { - case CDROMMULTISESSION: - return cdrom_ioctl_multisession(cdi, argp); - case CDROMEJECT: - return cdrom_ioctl_eject(cdi); - case CDROMCLOSETRAY: - return cdrom_ioctl_closetray(cdi); - case CDROMEJECT_SW: - return cdrom_ioctl_eject_sw(cdi, arg); - case CDROM_MEDIA_CHANGED: - return cdrom_ioctl_media_changed(cdi, arg); - case CDROM_SET_OPTIONS: - return cdrom_ioctl_set_options(cdi, arg); - case CDROM_CLEAR_OPTIONS: - return cdrom_ioctl_clear_options(cdi, arg); - case CDROM_SELECT_SPEED: - return cdrom_ioctl_select_speed(cdi, arg); - case CDROM_SELECT_DISC: - return cdrom_ioctl_select_disc(cdi, arg); - case CDROMRESET: - return cdrom_ioctl_reset(cdi, bdev); - case CDROM_LOCKDOOR: - return cdrom_ioctl_lock_door(cdi, arg); - case CDROM_DEBUG: - return cdrom_ioctl_debug(cdi, arg); - case CDROM_GET_CAPABILITY: - return cdrom_ioctl_get_capability(cdi); - case CDROM_GET_MCN: - return cdrom_ioctl_get_mcn(cdi, argp); - case CDROM_DRIVE_STATUS: - return cdrom_ioctl_drive_status(cdi, arg); - case CDROM_DISC_STATUS: - return cdrom_ioctl_disc_status(cdi); - case CDROM_CHANGER_NSLOTS: - return cdrom_ioctl_changer_nslots(cdi); - } - - /* - * Use the ioctls that are implemented through the generic_packet() - * interface. this may look at bit funny, but if -ENOTTY is - * returned that particular ioctl is not implemented and we - * let it go through the device specific ones. - */ - if (CDROM_CAN(CDC_GENERIC_PACKET)) { - ret = mmc_ioctl(cdi, cmd, arg); - if (ret != -ENOTTY) - return ret; - } - - /* - * Note: most of the cdinfo() calls are commented out here, - * because they fill up the sys log when CD players poll - * the drive. - */ - switch (cmd) { - case CDROMSUBCHNL: - return cdrom_ioctl_get_subchnl(cdi, argp); - case CDROMREADTOCHDR: - return cdrom_ioctl_read_tochdr(cdi, argp); - case CDROMREADTOCENTRY: - return cdrom_ioctl_read_tocentry(cdi, argp); - case CDROMPLAYMSF: - return cdrom_ioctl_play_msf(cdi, argp); - case CDROMPLAYTRKIND: - return cdrom_ioctl_play_trkind(cdi, argp); - case CDROMVOLCTRL: - return cdrom_ioctl_volctrl(cdi, argp); - case CDROMVOLREAD: - return cdrom_ioctl_volread(cdi, argp); - case CDROMSTART: - case CDROMSTOP: - case CDROMPAUSE: - case CDROMRESUME: - return cdrom_ioctl_audioctl(cdi, cmd); - } - - return -ENOSYS; -} - -/* * Required when we need to use READ_10 to issue other than 2048 block * reads */ @@ -2854,10 +2776,158 @@ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size) return cdo->generic_packet(cdi, &cgc); } +static int cdrom_get_track_info(struct cdrom_device_info *cdi, + __u16 track, __u8 type, track_information *ti) +{ + struct cdrom_device_ops *cdo = cdi->ops; + struct packet_command cgc; + int ret, buflen; + + init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); + cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; + cgc.cmd[1] = type & 3; + cgc.cmd[4] = (track & 0xff00) >> 8; + cgc.cmd[5] = track & 0xff; + cgc.cmd[8] = 8; + cgc.quiet = 1; + + ret = cdo->generic_packet(cdi, &cgc); + if (ret) + return ret; + + buflen = be16_to_cpu(ti->track_information_length) + + sizeof(ti->track_information_length); + + if (buflen > sizeof(track_information)) + buflen = sizeof(track_information); + + cgc.cmd[8] = cgc.buflen = buflen; + ret = cdo->generic_packet(cdi, &cgc); + if (ret) + return ret; + + /* return actual fill size */ + return buflen; +} + +/* return the last written block on the CD-R media. this is for the udf + file system. */ +int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written) +{ + struct cdrom_tocentry toc; + disc_information di; + track_information ti; + __u32 last_track; + int ret = -1, ti_size; + + if (!CDROM_CAN(CDC_GENERIC_PACKET)) + goto use_toc; + + ret = cdrom_get_disc_info(cdi, &di); + if (ret < (int)(offsetof(typeof(di), last_track_lsb) + + sizeof(di.last_track_lsb))) + goto use_toc; + + /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */ + last_track = (di.last_track_msb << 8) | di.last_track_lsb; + ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); + if (ti_size < (int)offsetof(typeof(ti), track_start)) + goto use_toc; + + /* if this track is blank, try the previous. */ + if (ti.blank) { + if (last_track == 1) + goto use_toc; + last_track--; + ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); + } + + if (ti_size < (int)(offsetof(typeof(ti), track_size) + + sizeof(ti.track_size))) + goto use_toc; + + /* if last recorded field is valid, return it. */ + if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address) + + sizeof(ti.last_rec_address))) { + *last_written = be32_to_cpu(ti.last_rec_address); + } else { + /* make it up instead */ + *last_written = be32_to_cpu(ti.track_start) + + be32_to_cpu(ti.track_size); + if (ti.free_blocks) + *last_written -= (be32_to_cpu(ti.free_blocks) + 7); + } + return 0; + + /* this is where we end up if the drive either can't do a + GPCMD_READ_DISC_INFO or GPCMD_READ_TRACK_RZONE_INFO or if + it doesn't give enough information or fails. then we return + the toc contents. */ +use_toc: + toc.cdte_format = CDROM_MSF; + toc.cdte_track = CDROM_LEADOUT; + if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc))) + return ret; + sanitize_format(&toc.cdte_addr, &toc.cdte_format, CDROM_LBA); + *last_written = toc.cdte_addr.lba; + return 0; +} + +/* return the next writable block. also for udf file system. */ +static int cdrom_get_next_writable(struct cdrom_device_info *cdi, + long *next_writable) +{ + disc_information di; + track_information ti; + __u16 last_track; + int ret, ti_size; + + if (!CDROM_CAN(CDC_GENERIC_PACKET)) + goto use_last_written; + + ret = cdrom_get_disc_info(cdi, &di); + if (ret < 0 || ret < offsetof(typeof(di), last_track_lsb) + + sizeof(di.last_track_lsb)) + goto use_last_written; + + /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */ + last_track = (di.last_track_msb << 8) | di.last_track_lsb; + ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); + if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start)) + goto use_last_written; + + /* if this track is blank, try the previous. */ + if (ti.blank) { + if (last_track == 1) + goto use_last_written; + last_track--; + ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); + if (ti_size < 0) + goto use_last_written; + } + + /* if next recordable address field is valid, use it. */ + if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable) + + sizeof(ti.next_writable)) { + *next_writable = be32_to_cpu(ti.next_writable); + return 0; + } + +use_last_written: + ret = cdrom_get_last_written(cdi, next_writable); + if (ret) { + *next_writable = 0; + return ret; + } else { + *next_writable += 7; + return 0; + } +} + static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, - void __user *arg, - struct packet_command *cgc, - int cmd) + void __user *arg, + struct packet_command *cgc, + int cmd) { struct request_sense sense; struct cdrom_msf msf; @@ -2876,7 +2946,8 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, blocksize = CD_FRAMESIZE_RAW0; break; } - IOCTL_IN(arg, struct cdrom_msf, msf); + if (copy_from_user(&msf, (struct cdrom_msf __user *)arg, sizeof(msf))) + return -EFAULT; lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0); /* FIXME: we need upper bound checking, too!! */ if (lba < 0) @@ -2891,8 +2962,8 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, cgc->data_direction = CGC_DATA_READ; ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize); if (ret && sense.sense_key == 0x05 && - sense.asc == 0x20 && - sense.ascq == 0x00) { + sense.asc == 0x20 && + sense.ascq == 0x00) { /* * SCSI-II devices are not required to support * READ_CD, so let's try switching block size @@ -2913,12 +2984,14 @@ out: } static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi, - void __user *arg) + void __user *arg) { struct cdrom_read_audio ra; int lba; - IOCTL_IN(arg, struct cdrom_read_audio, ra); + if (copy_from_user(&ra, (struct cdrom_read_audio __user *)arg, + sizeof(ra))) + return -EFAULT; if (ra.addr_format == CDROM_MSF) lba = msf_to_lba(ra.addr.msf.minute, @@ -2937,12 +3010,13 @@ static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi, - void __user *arg) + void __user *arg) { int ret; struct cdrom_subchnl q; u_char requested, back; - IOCTL_IN(arg, struct cdrom_subchnl, q); + if (copy_from_user(&q, (struct cdrom_subchnl __user *)arg, sizeof(q))) + return -EFAULT; requested = q.cdsc_format; if (!((requested == CDROM_MSF) || (requested == CDROM_LBA))) @@ -2954,19 +3028,21 @@ static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi, back = q.cdsc_format; /* local copy */ sanitize_format(&q.cdsc_absaddr, &back, requested); sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); - IOCTL_OUT(arg, struct cdrom_subchnl, q); - /* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ + if (copy_to_user((struct cdrom_subchnl __user *)arg, &q, sizeof(q))) + return -EFAULT; + /* cd_dbg(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */ return 0; } static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi, - void __user *arg, - struct packet_command *cgc) + void __user *arg, + struct packet_command *cgc) { struct cdrom_device_ops *cdo = cdi->ops; struct cdrom_msf msf; - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); - IOCTL_IN(arg, struct cdrom_msf, msf); + cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYMSF\n"); + if (copy_from_user(&msf, (struct cdrom_msf __user *)arg, sizeof(msf))) + return -EFAULT; cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF; cgc->cmd[3] = msf.cdmsf_min0; cgc->cmd[4] = msf.cdmsf_sec0; @@ -2979,13 +3055,14 @@ static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi, - void __user *arg, - struct packet_command *cgc) + void __user *arg, + struct packet_command *cgc) { struct cdrom_device_ops *cdo = cdi->ops; struct cdrom_blk blk; - cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n"); - IOCTL_IN(arg, struct cdrom_blk, blk); + cd_dbg(CD_DO_IOCTL, "entering CDROMPLAYBLK\n"); + if (copy_from_user(&blk, (struct cdrom_blk __user *)arg, sizeof(blk))) + return -EFAULT; cgc->cmd[0] = GPCMD_PLAY_AUDIO_10; cgc->cmd[2] = (blk.from >> 24) & 0xff; cgc->cmd[3] = (blk.from >> 16) & 0xff; @@ -2998,9 +3075,9 @@ static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, - void __user *arg, - struct packet_command *cgc, - unsigned int cmd) + void __user *arg, + struct packet_command *cgc, + unsigned int cmd) { struct cdrom_volctrl volctrl; unsigned char buffer[32]; @@ -3008,9 +3085,11 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, unsigned short offset; int ret; - cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMVOLUME\n"); - IOCTL_IN(arg, struct cdrom_volctrl, volctrl); + if (copy_from_user(&volctrl, (struct cdrom_volctrl __user *)arg, + sizeof(volctrl))) + return -EFAULT; cgc->buffer = buffer; cgc->buflen = 24; @@ -3030,14 +3109,14 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, if (offset + 16 > cgc->buflen) { cgc->buflen = offset + 16; ret = cdrom_mode_sense(cdi, cgc, - GPMODE_AUDIO_CTL_PAGE, 0); + GPMODE_AUDIO_CTL_PAGE, 0); if (ret) return ret; } /* sanity check */ if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE || - buffer[offset + 1] < 14) + buffer[offset + 1] < 14) return -EINVAL; /* now we have the current volume settings. if it was only @@ -3047,7 +3126,9 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, volctrl.channel1 = buffer[offset+11]; volctrl.channel2 = buffer[offset+13]; volctrl.channel3 = buffer[offset+15]; - IOCTL_OUT(arg, struct cdrom_volctrl, volctrl); + if (copy_to_user((struct cdrom_volctrl __user *)arg, &volctrl, + sizeof(volctrl))) + return -EFAULT; return 0; } @@ -3069,11 +3150,11 @@ static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi, - struct packet_command *cgc, - int cmd) + struct packet_command *cgc, + int cmd) { struct cdrom_device_ops *cdo = cdi->ops; - cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n"); cgc->cmd[0] = GPCMD_START_STOP_UNIT; cgc->cmd[1] = 1; cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0; @@ -3082,11 +3163,11 @@ static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi, - struct packet_command *cgc, - int cmd) + struct packet_command *cgc, + int cmd) { struct cdrom_device_ops *cdo = cdi->ops; - cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n"); cgc->cmd[0] = GPCMD_PAUSE_RESUME; cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0; cgc->data_direction = CGC_DATA_NONE; @@ -3094,8 +3175,8 @@ static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi, } static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi, - void __user *arg, - struct packet_command *cgc) + void __user *arg, + struct packet_command *cgc) { int ret; dvd_struct *s; @@ -3108,7 +3189,7 @@ static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi, if (!s) return -ENOMEM; - cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n"); + cd_dbg(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n"); if (copy_from_user(s, arg, size)) { kfree(s); return -EFAULT; @@ -3126,44 +3207,48 @@ out: } static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi, - void __user *arg) + void __user *arg) { int ret; dvd_authinfo ai; if (!CDROM_CAN(CDC_DVD)) return -ENOSYS; - cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n"); - IOCTL_IN(arg, dvd_authinfo, ai); + cd_dbg(CD_DO_IOCTL, "entering DVD_AUTH\n"); + if (copy_from_user(&ai, (dvd_authinfo __user *)arg, sizeof(ai))) + return -EFAULT; ret = dvd_do_auth(cdi, &ai); if (ret) return ret; - IOCTL_OUT(arg, dvd_authinfo, ai); + if (copy_to_user((dvd_authinfo __user *)arg, &ai, sizeof(ai))) + return -EFAULT; return 0; } static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi, - void __user *arg) + void __user *arg) { int ret; long next = 0; - cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n"); ret = cdrom_get_next_writable(cdi, &next); if (ret) return ret; - IOCTL_OUT(arg, long, next); + if (copy_to_user((long __user *)arg, &next, sizeof(next))) + return -EFAULT; return 0; } static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi, - void __user *arg) + void __user *arg) { int ret; long last = 0; - cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n"); + cd_dbg(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n"); ret = cdrom_get_last_written(cdi, &last); if (ret) return ret; - IOCTL_OUT(arg, long, last); + if (copy_to_user((long __user *)arg, &last, sizeof(last))) + return -EFAULT; return 0; } @@ -3212,181 +3297,101 @@ static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, return -ENOTTY; } -static int cdrom_get_track_info(struct cdrom_device_info *cdi, __u16 track, __u8 type, - track_information *ti) -{ - struct cdrom_device_ops *cdo = cdi->ops; - struct packet_command cgc; - int ret, buflen; - - init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ); - cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO; - cgc.cmd[1] = type & 3; - cgc.cmd[4] = (track & 0xff00) >> 8; - cgc.cmd[5] = track & 0xff; - cgc.cmd[8] = 8; - cgc.quiet = 1; - - if ((ret = cdo->generic_packet(cdi, &cgc))) - return ret; - - buflen = be16_to_cpu(ti->track_information_length) + - sizeof(ti->track_information_length); - - if (buflen > sizeof(track_information)) - buflen = sizeof(track_information); - - cgc.cmd[8] = cgc.buflen = buflen; - if ((ret = cdo->generic_packet(cdi, &cgc))) - return ret; - - /* return actual fill size */ - return buflen; -} - -/* requires CD R/RW */ -static int cdrom_get_disc_info(struct cdrom_device_info *cdi, disc_information *di) +/* + * Just about every imaginable ioctl is supported in the Uniform layer + * these days. + * ATAPI / SCSI specific code now mainly resides in mmc_ioctl(). + */ +int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev, + fmode_t mode, unsigned int cmd, unsigned long arg) { - struct cdrom_device_ops *cdo = cdi->ops; - struct packet_command cgc; - int ret, buflen; - - /* set up command and get the disc info */ - init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ); - cgc.cmd[0] = GPCMD_READ_DISC_INFO; - cgc.cmd[8] = cgc.buflen = 2; - cgc.quiet = 1; - - if ((ret = cdo->generic_packet(cdi, &cgc))) - return ret; + void __user *argp = (void __user *)arg; + int ret; - /* not all drives have the same disc_info length, so requeue - * packet with the length the drive tells us it can supply + /* + * Try the generic SCSI command ioctl's first. */ - buflen = be16_to_cpu(di->disc_information_length) + - sizeof(di->disc_information_length); - - if (buflen > sizeof(disc_information)) - buflen = sizeof(disc_information); - - cgc.cmd[8] = cgc.buflen = buflen; - if ((ret = cdo->generic_packet(cdi, &cgc))) + ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp); + if (ret != -ENOTTY) return ret; - /* return actual fill size */ - return buflen; -} - -/* return the last written block on the CD-R media. this is for the udf - file system. */ -int cdrom_get_last_written(struct cdrom_device_info *cdi, long *last_written) -{ - struct cdrom_tocentry toc; - disc_information di; - track_information ti; - __u32 last_track; - int ret = -1, ti_size; - - if (!CDROM_CAN(CDC_GENERIC_PACKET)) - goto use_toc; - - ret = cdrom_get_disc_info(cdi, &di); - if (ret < (int)(offsetof(typeof(di), last_track_lsb) - + sizeof(di.last_track_lsb))) - goto use_toc; - - /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */ - last_track = (di.last_track_msb << 8) | di.last_track_lsb; - ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); - if (ti_size < (int)offsetof(typeof(ti), track_start)) - goto use_toc; - - /* if this track is blank, try the previous. */ - if (ti.blank) { - if (last_track==1) - goto use_toc; - last_track--; - ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); - } - - if (ti_size < (int)(offsetof(typeof(ti), track_size) - + sizeof(ti.track_size))) - goto use_toc; - - /* if last recorded field is valid, return it. */ - if (ti.lra_v && ti_size >= (int)(offsetof(typeof(ti), last_rec_address) - + sizeof(ti.last_rec_address))) { - *last_written = be32_to_cpu(ti.last_rec_address); - } else { - /* make it up instead */ - *last_written = be32_to_cpu(ti.track_start) + - be32_to_cpu(ti.track_size); - if (ti.free_blocks) - *last_written -= (be32_to_cpu(ti.free_blocks) + 7); + switch (cmd) { + case CDROMMULTISESSION: + return cdrom_ioctl_multisession(cdi, argp); + case CDROMEJECT: + return cdrom_ioctl_eject(cdi); + case CDROMCLOSETRAY: + return cdrom_ioctl_closetray(cdi); + case CDROMEJECT_SW: + return cdrom_ioctl_eject_sw(cdi, arg); + case CDROM_MEDIA_CHANGED: + return cdrom_ioctl_media_changed(cdi, arg); + case CDROM_SET_OPTIONS: + return cdrom_ioctl_set_options(cdi, arg); + case CDROM_CLEAR_OPTIONS: + return cdrom_ioctl_clear_options(cdi, arg); + case CDROM_SELECT_SPEED: + return cdrom_ioctl_select_speed(cdi, arg); + case CDROM_SELECT_DISC: + return cdrom_ioctl_select_disc(cdi, arg); + case CDROMRESET: + return cdrom_ioctl_reset(cdi, bdev); + case CDROM_LOCKDOOR: + return cdrom_ioctl_lock_door(cdi, arg); + case CDROM_DEBUG: + return cdrom_ioctl_debug(cdi, arg); + case CDROM_GET_CAPABILITY: + return cdrom_ioctl_get_capability(cdi); + case CDROM_GET_MCN: + return cdrom_ioctl_get_mcn(cdi, argp); + case CDROM_DRIVE_STATUS: + return cdrom_ioctl_drive_status(cdi, arg); + case CDROM_DISC_STATUS: + return cdrom_ioctl_disc_status(cdi); + case CDROM_CHANGER_NSLOTS: + return cdrom_ioctl_changer_nslots(cdi); } - return 0; - /* this is where we end up if the drive either can't do a - GPCMD_READ_DISC_INFO or GPCMD_READ_TRACK_RZONE_INFO or if - it doesn't give enough information or fails. then we return - the toc contents. */ -use_toc: - toc.cdte_format = CDROM_MSF; - toc.cdte_track = CDROM_LEADOUT; - if ((ret = cdi->ops->audio_ioctl(cdi, CDROMREADTOCENTRY, &toc))) - return ret; - sanitize_format(&toc.cdte_addr, &toc.cdte_format, CDROM_LBA); - *last_written = toc.cdte_addr.lba; - return 0; -} - -/* return the next writable block. also for udf file system. */ -static int cdrom_get_next_writable(struct cdrom_device_info *cdi, long *next_writable) -{ - disc_information di; - track_information ti; - __u16 last_track; - int ret, ti_size; - - if (!CDROM_CAN(CDC_GENERIC_PACKET)) - goto use_last_written; - - ret = cdrom_get_disc_info(cdi, &di); - if (ret < 0 || ret < offsetof(typeof(di), last_track_lsb) - + sizeof(di.last_track_lsb)) - goto use_last_written; - - /* if unit didn't return msb, it's zeroed by cdrom_get_disc_info */ - last_track = (di.last_track_msb << 8) | di.last_track_lsb; - ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); - if (ti_size < 0 || ti_size < offsetof(typeof(ti), track_start)) - goto use_last_written; - - /* if this track is blank, try the previous. */ - if (ti.blank) { - if (last_track == 1) - goto use_last_written; - last_track--; - ti_size = cdrom_get_track_info(cdi, last_track, 1, &ti); - if (ti_size < 0) - goto use_last_written; + /* + * Use the ioctls that are implemented through the generic_packet() + * interface. this may look at bit funny, but if -ENOTTY is + * returned that particular ioctl is not implemented and we + * let it go through the device specific ones. + */ + if (CDROM_CAN(CDC_GENERIC_PACKET)) { + ret = mmc_ioctl(cdi, cmd, arg); + if (ret != -ENOTTY) + return ret; } - /* if next recordable address field is valid, use it. */ - if (ti.nwa_v && ti_size >= offsetof(typeof(ti), next_writable) - + sizeof(ti.next_writable)) { - *next_writable = be32_to_cpu(ti.next_writable); - return 0; + /* + * Note: most of the cd_dbg() calls are commented out here, + * because they fill up the sys log when CD players poll + * the drive. + */ + switch (cmd) { + case CDROMSUBCHNL: + return cdrom_ioctl_get_subchnl(cdi, argp); + case CDROMREADTOCHDR: + return cdrom_ioctl_read_tochdr(cdi, argp); + case CDROMREADTOCENTRY: + return cdrom_ioctl_read_tocentry(cdi, argp); + case CDROMPLAYMSF: + return cdrom_ioctl_play_msf(cdi, argp); + case CDROMPLAYTRKIND: + return cdrom_ioctl_play_trkind(cdi, argp); + case CDROMVOLCTRL: + return cdrom_ioctl_volctrl(cdi, argp); + case CDROMVOLREAD: + return cdrom_ioctl_volread(cdi, argp); + case CDROMSTART: + case CDROMSTOP: + case CDROMPAUSE: + case CDROMRESUME: + return cdrom_ioctl_audioctl(cdi, cmd); } -use_last_written: - if ((ret = cdrom_get_last_written(cdi, next_writable))) { - *next_writable = 0; - return ret; - } else { - *next_writable += 7; - return 0; - } + return -ENOSYS; } EXPORT_SYMBOL(cdrom_get_last_written); diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index 51e75ad9642..584bc312640 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -602,7 +602,7 @@ static void gdrom_readdisk_dma(struct work_struct *work) spin_unlock(&gdrom_lock); block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; block_cnt = blk_rq_sectors(req)/GD_TO_BLK; - __raw_writel(virt_to_phys(req->buffer), GDROM_DMA_STARTADDR_REG); + __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG); __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); __raw_writel(1, GDROM_DMA_DIRECTION_REG); __raw_writel(1, GDROM_DMA_ENABLE_REG); diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 8121b4c70ed..b29703324e9 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c @@ -730,6 +730,7 @@ static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg) agp_copy_info(agp_bridge, &kerninfo); + memset(&userinfo, 0, sizeof(userinfo)); userinfo.version.major = kerninfo.version.major; userinfo.version.minor = kerninfo.version.minor; userinfo.bridge_id = kerninfo.device->vendor | diff --git a/drivers/char/random.c b/drivers/char/random.c index 6b75713d953..06cea7ff3a7 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -902,6 +902,7 @@ void add_disk_randomness(struct gendisk *disk) add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool)); } +EXPORT_SYMBOL_GPL(add_disk_randomness); #endif /********************************************************************* @@ -995,8 +996,11 @@ retry: ibytes = min_t(size_t, ibytes, have_bytes - reserved); if (ibytes < min) ibytes = 0; - entropy_count = max_t(int, 0, - entropy_count - (ibytes << (ENTROPY_SHIFT + 3))); + if (have_bytes >= ibytes + reserved) + entropy_count -= ibytes << (ENTROPY_SHIFT + 3); + else + entropy_count = reserved << (ENTROPY_SHIFT + 3); + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) goto retry; diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c index b3ea223585b..61dcc8011ec 100644 --- a/drivers/char/tpm/tpm_ppi.c +++ b/drivers/char/tpm/tpm_ppi.c @@ -328,13 +328,11 @@ int tpm_add_ppi(struct kobject *parent) /* Cache TPM ACPI handle and version string */ acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, ppi_callback, NULL, NULL, &tpm_ppi_handle); - if (tpm_ppi_handle == NULL) - return -ENODEV; - - return sysfs_create_group(parent, &ppi_attr_grp); + return tpm_ppi_handle ? sysfs_create_group(parent, &ppi_attr_grp) : 0; } void tpm_remove_ppi(struct kobject *parent) { - sysfs_remove_group(parent, &ppi_attr_grp); + if (tpm_ppi_handle) + sysfs_remove_group(parent, &ppi_attr_grp); } diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 6f56d3a4f01..3a2196481b1 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -30,14 +30,7 @@ config COMMON_CLK_WM831X Supports the clocking subsystem of the WM831x/2x series of PMICs from Wolfson Microlectronics. -config COMMON_CLK_VERSATILE - bool "Clock driver for ARM Reference designs" - depends on ARCH_INTEGRATOR || ARCH_REALVIEW || ARCH_VEXPRESS || ARM64 - ---help--- - Supports clocking on ARM Reference designs: - - Integrator/AP and Integrator/CP - - RealView PB1176, EB, PB11MP and PBX - - Versatile Express +source "drivers/clk/versatile/Kconfig" config COMMON_CLK_MAX77686 tristate "Clock driver for Maxim 77686 MFD" @@ -115,3 +108,5 @@ endmenu source "drivers/clk/bcm/Kconfig" source "drivers/clk/mvebu/Kconfig" + +source "drivers/clk/samsung/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 5f8a28735c9..17d7f13d19a 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -41,7 +41,7 @@ obj-$(CONFIG_PLAT_ORION) += mvebu/ obj-$(CONFIG_ARCH_MXS) += mxs/ obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/ obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/ -obj-$(CONFIG_PLAT_SAMSUNG) += samsung/ +obj-$(CONFIG_COMMON_CLK_SAMSUNG) += samsung/ obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += shmobile/ obj-$(CONFIG_ARCH_SIRF) += sirf/ obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/ diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile index 46c1d3d0d66..4998aee5926 100644 --- a/drivers/clk/at91/Makefile +++ b/drivers/clk/at91/Makefile @@ -2,8 +2,8 @@ # Makefile for at91 specific clk # -obj-y += pmc.o -obj-y += clk-main.o clk-pll.o clk-plldiv.o clk-master.o +obj-y += pmc.o sckc.o +obj-y += clk-slow.o clk-main.o clk-pll.o clk-plldiv.o clk-master.o obj-y += clk-system.o clk-peripheral.o clk-programmable.o obj-$(CONFIG_HAVE_AT91_UTMI) += clk-utmi.o diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index 8e9e8cc0412..733306131b9 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -30,99 +30,546 @@ #define MAINF_LOOP_MIN_WAIT (USEC_PER_SEC / SLOW_CLOCK_FREQ) #define MAINF_LOOP_MAX_WAIT MAINFRDY_TIMEOUT -struct clk_main { +#define MOR_KEY_MASK (0xff << 16) + +struct clk_main_osc { struct clk_hw hw; struct at91_pmc *pmc; - unsigned long rate; unsigned int irq; wait_queue_head_t wait; }; -#define to_clk_main(hw) container_of(hw, struct clk_main, hw) +#define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw) + +struct clk_main_rc_osc { + struct clk_hw hw; + struct at91_pmc *pmc; + unsigned int irq; + wait_queue_head_t wait; + unsigned long frequency; + unsigned long accuracy; +}; + +#define to_clk_main_rc_osc(hw) container_of(hw, struct clk_main_rc_osc, hw) + +struct clk_rm9200_main { + struct clk_hw hw; + struct at91_pmc *pmc; +}; + +#define to_clk_rm9200_main(hw) container_of(hw, struct clk_rm9200_main, hw) -static irqreturn_t clk_main_irq_handler(int irq, void *dev_id) +struct clk_sam9x5_main { + struct clk_hw hw; + struct at91_pmc *pmc; + unsigned int irq; + wait_queue_head_t wait; + u8 parent; +}; + +#define to_clk_sam9x5_main(hw) container_of(hw, struct clk_sam9x5_main, hw) + +static irqreturn_t clk_main_osc_irq_handler(int irq, void *dev_id) { - struct clk_main *clkmain = (struct clk_main *)dev_id; + struct clk_main_osc *osc = dev_id; - wake_up(&clkmain->wait); - disable_irq_nosync(clkmain->irq); + wake_up(&osc->wait); + disable_irq_nosync(osc->irq); return IRQ_HANDLED; } -static int clk_main_prepare(struct clk_hw *hw) +static int clk_main_osc_prepare(struct clk_hw *hw) { - struct clk_main *clkmain = to_clk_main(hw); - struct at91_pmc *pmc = clkmain->pmc; - unsigned long halt_time, timeout; + struct clk_main_osc *osc = to_clk_main_osc(hw); + struct at91_pmc *pmc = osc->pmc; u32 tmp; + tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK; + if (tmp & AT91_PMC_OSCBYPASS) + return 0; + + if (!(tmp & AT91_PMC_MOSCEN)) { + tmp |= AT91_PMC_MOSCEN | AT91_PMC_KEY; + pmc_write(pmc, AT91_CKGR_MOR, tmp); + } + while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS)) { - enable_irq(clkmain->irq); - wait_event(clkmain->wait, + enable_irq(osc->irq); + wait_event(osc->wait, pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS); } - if (clkmain->rate) - return 0; + return 0; +} + +static void clk_main_osc_unprepare(struct clk_hw *hw) +{ + struct clk_main_osc *osc = to_clk_main_osc(hw); + struct at91_pmc *pmc = osc->pmc; + u32 tmp = pmc_read(pmc, AT91_CKGR_MOR); + + if (tmp & AT91_PMC_OSCBYPASS) + return; + + if (!(tmp & AT91_PMC_MOSCEN)) + return; + + tmp &= ~(AT91_PMC_KEY | AT91_PMC_MOSCEN); + pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY); +} + +static int clk_main_osc_is_prepared(struct clk_hw *hw) +{ + struct clk_main_osc *osc = to_clk_main_osc(hw); + struct at91_pmc *pmc = osc->pmc; + u32 tmp = pmc_read(pmc, AT91_CKGR_MOR); + + if (tmp & AT91_PMC_OSCBYPASS) + return 1; + + return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS) && + (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN)); +} + +static const struct clk_ops main_osc_ops = { + .prepare = clk_main_osc_prepare, + .unprepare = clk_main_osc_unprepare, + .is_prepared = clk_main_osc_is_prepared, +}; + +static struct clk * __init +at91_clk_register_main_osc(struct at91_pmc *pmc, + unsigned int irq, + const char *name, + const char *parent_name, + bool bypass) +{ + int ret; + struct clk_main_osc *osc; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!pmc || !irq || !name || !parent_name) + return ERR_PTR(-EINVAL); + + osc = kzalloc(sizeof(*osc), GFP_KERNEL); + if (!osc) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &main_osc_ops; + init.parent_names = &parent_name; + init.num_parents = 1; + init.flags = CLK_IGNORE_UNUSED; + + osc->hw.init = &init; + osc->pmc = pmc; + osc->irq = irq; + + init_waitqueue_head(&osc->wait); + irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); + ret = request_irq(osc->irq, clk_main_osc_irq_handler, + IRQF_TRIGGER_HIGH, name, osc); + if (ret) + return ERR_PTR(ret); + + if (bypass) + pmc_write(pmc, AT91_CKGR_MOR, + (pmc_read(pmc, AT91_CKGR_MOR) & + ~(MOR_KEY_MASK | AT91_PMC_MOSCEN)) | + AT91_PMC_OSCBYPASS | AT91_PMC_KEY); + + clk = clk_register(NULL, &osc->hw); + if (IS_ERR(clk)) { + free_irq(irq, osc); + kfree(osc); + } + + return clk; +} + +void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np, + struct at91_pmc *pmc) +{ + struct clk *clk; + unsigned int irq; + const char *name = np->name; + const char *parent_name; + bool bypass; + + of_property_read_string(np, "clock-output-names", &name); + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + parent_name = of_clk_get_parent_name(np, 0); + + irq = irq_of_parse_and_map(np, 0); + if (!irq) + return; + + clk = at91_clk_register_main_osc(pmc, irq, name, parent_name, bypass); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} + +static irqreturn_t clk_main_rc_osc_irq_handler(int irq, void *dev_id) +{ + struct clk_main_rc_osc *osc = dev_id; + + wake_up(&osc->wait); + disable_irq_nosync(osc->irq); + + return IRQ_HANDLED; +} + +static int clk_main_rc_osc_prepare(struct clk_hw *hw) +{ + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); + struct at91_pmc *pmc = osc->pmc; + u32 tmp; + + tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK; + + if (!(tmp & AT91_PMC_MOSCRCEN)) { + tmp |= AT91_PMC_MOSCRCEN | AT91_PMC_KEY; + pmc_write(pmc, AT91_CKGR_MOR, tmp); + } + + while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS)) { + enable_irq(osc->irq); + wait_event(osc->wait, + pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS); + } + + return 0; +} + +static void clk_main_rc_osc_unprepare(struct clk_hw *hw) +{ + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); + struct at91_pmc *pmc = osc->pmc; + u32 tmp = pmc_read(pmc, AT91_CKGR_MOR); + + if (!(tmp & AT91_PMC_MOSCRCEN)) + return; + + tmp &= ~(MOR_KEY_MASK | AT91_PMC_MOSCRCEN); + pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY); +} + +static int clk_main_rc_osc_is_prepared(struct clk_hw *hw) +{ + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); + struct at91_pmc *pmc = osc->pmc; + + return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS) && + (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCRCEN)); +} + +static unsigned long clk_main_rc_osc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); + + return osc->frequency; +} + +static unsigned long clk_main_rc_osc_recalc_accuracy(struct clk_hw *hw, + unsigned long parent_acc) +{ + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); + + return osc->accuracy; +} + +static const struct clk_ops main_rc_osc_ops = { + .prepare = clk_main_rc_osc_prepare, + .unprepare = clk_main_rc_osc_unprepare, + .is_prepared = clk_main_rc_osc_is_prepared, + .recalc_rate = clk_main_rc_osc_recalc_rate, + .recalc_accuracy = clk_main_rc_osc_recalc_accuracy, +}; + +static struct clk * __init +at91_clk_register_main_rc_osc(struct at91_pmc *pmc, + unsigned int irq, + const char *name, + u32 frequency, u32 accuracy) +{ + int ret; + struct clk_main_rc_osc *osc; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!pmc || !irq || !name || !frequency) + return ERR_PTR(-EINVAL); + + osc = kzalloc(sizeof(*osc), GFP_KERNEL); + if (!osc) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &main_rc_osc_ops; + init.parent_names = NULL; + init.num_parents = 0; + init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED; + + osc->hw.init = &init; + osc->pmc = pmc; + osc->irq = irq; + osc->frequency = frequency; + osc->accuracy = accuracy; + + init_waitqueue_head(&osc->wait); + irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); + ret = request_irq(osc->irq, clk_main_rc_osc_irq_handler, + IRQF_TRIGGER_HIGH, name, osc); + if (ret) + return ERR_PTR(ret); + + clk = clk_register(NULL, &osc->hw); + if (IS_ERR(clk)) { + free_irq(irq, osc); + kfree(osc); + } + + return clk; +} + +void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np, + struct at91_pmc *pmc) +{ + struct clk *clk; + unsigned int irq; + u32 frequency = 0; + u32 accuracy = 0; + const char *name = np->name; + + of_property_read_string(np, "clock-output-names", &name); + of_property_read_u32(np, "clock-frequency", &frequency); + of_property_read_u32(np, "clock-accuracy", &accuracy); + + irq = irq_of_parse_and_map(np, 0); + if (!irq) + return; + + clk = at91_clk_register_main_rc_osc(pmc, irq, name, frequency, + accuracy); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} + + +static int clk_main_probe_frequency(struct at91_pmc *pmc) +{ + unsigned long prep_time, timeout; + u32 tmp; timeout = jiffies + usecs_to_jiffies(MAINFRDY_TIMEOUT); do { - halt_time = jiffies; + prep_time = jiffies; tmp = pmc_read(pmc, AT91_CKGR_MCFR); if (tmp & AT91_PMC_MAINRDY) return 0; usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT); - } while (time_before(halt_time, timeout)); + } while (time_before(prep_time, timeout)); - return 0; + return -ETIMEDOUT; } -static int clk_main_is_prepared(struct clk_hw *hw) +static unsigned long clk_main_recalc_rate(struct at91_pmc *pmc, + unsigned long parent_rate) { - struct clk_main *clkmain = to_clk_main(hw); + u32 tmp; + + if (parent_rate) + return parent_rate; + + tmp = pmc_read(pmc, AT91_CKGR_MCFR); + if (!(tmp & AT91_PMC_MAINRDY)) + return 0; - return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCS); + return ((tmp & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV; } -static unsigned long clk_main_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static int clk_rm9200_main_prepare(struct clk_hw *hw) { - u32 tmp; - struct clk_main *clkmain = to_clk_main(hw); + struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw); + + return clk_main_probe_frequency(clkmain->pmc); +} + +static int clk_rm9200_main_is_prepared(struct clk_hw *hw) +{ + struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw); + + return !!(pmc_read(clkmain->pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINRDY); +} + +static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw); + + return clk_main_recalc_rate(clkmain->pmc, parent_rate); +} + +static const struct clk_ops rm9200_main_ops = { + .prepare = clk_rm9200_main_prepare, + .is_prepared = clk_rm9200_main_is_prepared, + .recalc_rate = clk_rm9200_main_recalc_rate, +}; + +static struct clk * __init +at91_clk_register_rm9200_main(struct at91_pmc *pmc, + const char *name, + const char *parent_name) +{ + struct clk_rm9200_main *clkmain; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!pmc || !name) + return ERR_PTR(-EINVAL); + + if (!parent_name) + return ERR_PTR(-EINVAL); + + clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL); + if (!clkmain) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &rm9200_main_ops; + init.parent_names = &parent_name; + init.num_parents = 1; + init.flags = 0; + + clkmain->hw.init = &init; + clkmain->pmc = pmc; + + clk = clk_register(NULL, &clkmain->hw); + if (IS_ERR(clk)) + kfree(clkmain); + + return clk; +} + +void __init of_at91rm9200_clk_main_setup(struct device_node *np, + struct at91_pmc *pmc) +{ + struct clk *clk; + const char *parent_name; + const char *name = np->name; + + parent_name = of_clk_get_parent_name(np, 0); + of_property_read_string(np, "clock-output-names", &name); + + clk = at91_clk_register_rm9200_main(pmc, name, parent_name); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} + +static irqreturn_t clk_sam9x5_main_irq_handler(int irq, void *dev_id) +{ + struct clk_sam9x5_main *clkmain = dev_id; + + wake_up(&clkmain->wait); + disable_irq_nosync(clkmain->irq); + + return IRQ_HANDLED; +} + +static int clk_sam9x5_main_prepare(struct clk_hw *hw) +{ + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); struct at91_pmc *pmc = clkmain->pmc; - if (clkmain->rate) - return clkmain->rate; + while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) { + enable_irq(clkmain->irq); + wait_event(clkmain->wait, + pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS); + } + + return clk_main_probe_frequency(pmc); +} - tmp = pmc_read(pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINF; - clkmain->rate = (tmp * parent_rate) / MAINF_DIV; +static int clk_sam9x5_main_is_prepared(struct clk_hw *hw) +{ + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); - return clkmain->rate; + return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS); } -static const struct clk_ops main_ops = { - .prepare = clk_main_prepare, - .is_prepared = clk_main_is_prepared, - .recalc_rate = clk_main_recalc_rate, +static unsigned long clk_sam9x5_main_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); + + return clk_main_recalc_rate(clkmain->pmc, parent_rate); +} + +static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); + struct at91_pmc *pmc = clkmain->pmc; + u32 tmp; + + if (index > 1) + return -EINVAL; + + tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK; + + if (index && !(tmp & AT91_PMC_MOSCSEL)) + pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL); + else if (!index && (tmp & AT91_PMC_MOSCSEL)) + pmc_write(pmc, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL); + + while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) { + enable_irq(clkmain->irq); + wait_event(clkmain->wait, + pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS); + } + + return 0; +} + +static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw) +{ + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); + + return !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN); +} + +static const struct clk_ops sam9x5_main_ops = { + .prepare = clk_sam9x5_main_prepare, + .is_prepared = clk_sam9x5_main_is_prepared, + .recalc_rate = clk_sam9x5_main_recalc_rate, + .set_parent = clk_sam9x5_main_set_parent, + .get_parent = clk_sam9x5_main_get_parent, }; static struct clk * __init -at91_clk_register_main(struct at91_pmc *pmc, - unsigned int irq, - const char *name, - const char *parent_name, - unsigned long rate) +at91_clk_register_sam9x5_main(struct at91_pmc *pmc, + unsigned int irq, + const char *name, + const char **parent_names, + int num_parents) { int ret; - struct clk_main *clkmain; + struct clk_sam9x5_main *clkmain; struct clk *clk = NULL; struct clk_init_data init; if (!pmc || !irq || !name) return ERR_PTR(-EINVAL); - if (!rate && !parent_name) + if (!parent_names || !num_parents) return ERR_PTR(-EINVAL); clkmain = kzalloc(sizeof(*clkmain), GFP_KERNEL); @@ -130,19 +577,20 @@ at91_clk_register_main(struct at91_pmc *pmc, return ERR_PTR(-ENOMEM); init.name = name; - init.ops = &main_ops; - init.parent_names = parent_name ? &parent_name : NULL; - init.num_parents = parent_name ? 1 : 0; - init.flags = parent_name ? 0 : CLK_IS_ROOT; + init.ops = &sam9x5_main_ops; + init.parent_names = parent_names; + init.num_parents = num_parents; + init.flags = CLK_SET_PARENT_GATE; clkmain->hw.init = &init; - clkmain->rate = rate; clkmain->pmc = pmc; clkmain->irq = irq; + clkmain->parent = !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & + AT91_PMC_MOSCEN); init_waitqueue_head(&clkmain->wait); irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN); - ret = request_irq(clkmain->irq, clk_main_irq_handler, - IRQF_TRIGGER_HIGH, "clk-main", clkmain); + ret = request_irq(clkmain->irq, clk_sam9x5_main_irq_handler, + IRQF_TRIGGER_HIGH, name, clkmain); if (ret) return ERR_PTR(ret); @@ -155,33 +603,36 @@ at91_clk_register_main(struct at91_pmc *pmc, return clk; } - - -static void __init -of_at91_clk_main_setup(struct device_node *np, struct at91_pmc *pmc) +void __init of_at91sam9x5_clk_main_setup(struct device_node *np, + struct at91_pmc *pmc) { struct clk *clk; + const char *parent_names[2]; + int num_parents; unsigned int irq; - const char *parent_name; const char *name = np->name; - u32 rate = 0; + int i; + + num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + if (num_parents <= 0 || num_parents > 2) + return; + + for (i = 0; i < num_parents; ++i) { + parent_names[i] = of_clk_get_parent_name(np, i); + if (!parent_names[i]) + return; + } - parent_name = of_clk_get_parent_name(np, 0); of_property_read_string(np, "clock-output-names", &name); - of_property_read_u32(np, "clock-frequency", &rate); + irq = irq_of_parse_and_map(np, 0); if (!irq) return; - clk = at91_clk_register_main(pmc, irq, name, parent_name, rate); + clk = at91_clk_register_sam9x5_main(pmc, irq, name, parent_names, + num_parents); if (IS_ERR(clk)) return; of_clk_add_provider(np, of_clk_src_simple_get, clk); } - -void __init of_at91rm9200_clk_main_setup(struct device_node *np, - struct at91_pmc *pmc) -{ - of_at91_clk_main_setup(np, pmc); -} diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c new file mode 100644 index 00000000000..0300c46ee24 --- /dev/null +++ b/drivers/clk/at91/clk-slow.c @@ -0,0 +1,467 @@ +/* + * drivers/clk/at91/clk-slow.c + * + * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/clk/at91_pmc.h> +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/sched.h> +#include <linux/wait.h> + +#include "pmc.h" +#include "sckc.h" + +#define SLOW_CLOCK_FREQ 32768 +#define SLOWCK_SW_CYCLES 5 +#define SLOWCK_SW_TIME_USEC ((SLOWCK_SW_CYCLES * USEC_PER_SEC) / \ + SLOW_CLOCK_FREQ) + +#define AT91_SCKC_CR 0x00 +#define AT91_SCKC_RCEN (1 << 0) +#define AT91_SCKC_OSC32EN (1 << 1) +#define AT91_SCKC_OSC32BYP (1 << 2) +#define AT91_SCKC_OSCSEL (1 << 3) + +struct clk_slow_osc { + struct clk_hw hw; + void __iomem *sckcr; + unsigned long startup_usec; +}; + +#define to_clk_slow_osc(hw) container_of(hw, struct clk_slow_osc, hw) + +struct clk_slow_rc_osc { + struct clk_hw hw; + void __iomem *sckcr; + unsigned long frequency; + unsigned long accuracy; + unsigned long startup_usec; +}; + +#define to_clk_slow_rc_osc(hw) container_of(hw, struct clk_slow_rc_osc, hw) + +struct clk_sam9260_slow { + struct clk_hw hw; + struct at91_pmc *pmc; +}; + +#define to_clk_sam9260_slow(hw) container_of(hw, struct clk_sam9260_slow, hw) + +struct clk_sam9x5_slow { + struct clk_hw hw; + void __iomem *sckcr; + u8 parent; +}; + +#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw) + + +static int clk_slow_osc_prepare(struct clk_hw *hw) +{ + struct clk_slow_osc *osc = to_clk_slow_osc(hw); + void __iomem *sckcr = osc->sckcr; + u32 tmp = readl(sckcr); + + if (tmp & AT91_SCKC_OSC32BYP) + return 0; + + writel(tmp | AT91_SCKC_OSC32EN, sckcr); + + usleep_range(osc->startup_usec, osc->startup_usec + 1); + + return 0; +} + +static void clk_slow_osc_unprepare(struct clk_hw *hw) +{ + struct clk_slow_osc *osc = to_clk_slow_osc(hw); + void __iomem *sckcr = osc->sckcr; + u32 tmp = readl(sckcr); + + if (tmp & AT91_SCKC_OSC32BYP) + return; + + writel(tmp & ~AT91_SCKC_OSC32EN, sckcr); +} + +static int clk_slow_osc_is_prepared(struct clk_hw *hw) +{ + struct clk_slow_osc *osc = to_clk_slow_osc(hw); + void __iomem *sckcr = osc->sckcr; + u32 tmp = readl(sckcr); + + if (tmp & AT91_SCKC_OSC32BYP) + return 1; + + return !!(tmp & AT91_SCKC_OSC32EN); +} + +static const struct clk_ops slow_osc_ops = { + .prepare = clk_slow_osc_prepare, + .unprepare = clk_slow_osc_unprepare, + .is_prepared = clk_slow_osc_is_prepared, +}; + +static struct clk * __init +at91_clk_register_slow_osc(void __iomem *sckcr, + const char *name, + const char *parent_name, + unsigned long startup, + bool bypass) +{ + struct clk_slow_osc *osc; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!sckcr || !name || !parent_name) + return ERR_PTR(-EINVAL); + + osc = kzalloc(sizeof(*osc), GFP_KERNEL); + if (!osc) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &slow_osc_ops; + init.parent_names = &parent_name; + init.num_parents = 1; + init.flags = CLK_IGNORE_UNUSED; + + osc->hw.init = &init; + osc->sckcr = sckcr; + osc->startup_usec = startup; + + if (bypass) + writel((readl(sckcr) & ~AT91_SCKC_OSC32EN) | AT91_SCKC_OSC32BYP, + sckcr); + + clk = clk_register(NULL, &osc->hw); + if (IS_ERR(clk)) + kfree(osc); + + return clk; +} + +void __init of_at91sam9x5_clk_slow_osc_setup(struct device_node *np, + void __iomem *sckcr) +{ + struct clk *clk; + const char *parent_name; + const char *name = np->name; + u32 startup; + bool bypass; + + parent_name = of_clk_get_parent_name(np, 0); + of_property_read_string(np, "clock-output-names", &name); + of_property_read_u32(np, "atmel,startup-time-usec", &startup); + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + clk = at91_clk_register_slow_osc(sckcr, name, parent_name, startup, + bypass); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} + +static unsigned long clk_slow_rc_osc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + + return osc->frequency; +} + +static unsigned long clk_slow_rc_osc_recalc_accuracy(struct clk_hw *hw, + unsigned long parent_acc) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + + return osc->accuracy; +} + +static int clk_slow_rc_osc_prepare(struct clk_hw *hw) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + void __iomem *sckcr = osc->sckcr; + + writel(readl(sckcr) | AT91_SCKC_RCEN, sckcr); + + usleep_range(osc->startup_usec, osc->startup_usec + 1); + + return 0; +} + +static void clk_slow_rc_osc_unprepare(struct clk_hw *hw) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + void __iomem *sckcr = osc->sckcr; + + writel(readl(sckcr) & ~AT91_SCKC_RCEN, sckcr); +} + +static int clk_slow_rc_osc_is_prepared(struct clk_hw *hw) +{ + struct clk_slow_rc_osc *osc = to_clk_slow_rc_osc(hw); + + return !!(readl(osc->sckcr) & AT91_SCKC_RCEN); +} + +static const struct clk_ops slow_rc_osc_ops = { + .prepare = clk_slow_rc_osc_prepare, + .unprepare = clk_slow_rc_osc_unprepare, + .is_prepared = clk_slow_rc_osc_is_prepared, + .recalc_rate = clk_slow_rc_osc_recalc_rate, + .recalc_accuracy = clk_slow_rc_osc_recalc_accuracy, +}; + +static struct clk * __init +at91_clk_register_slow_rc_osc(void __iomem *sckcr, + const char *name, + unsigned long frequency, + unsigned long accuracy, + unsigned long startup) +{ + struct clk_slow_rc_osc *osc; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!sckcr || !name) + return ERR_PTR(-EINVAL); + + osc = kzalloc(sizeof(*osc), GFP_KERNEL); + if (!osc) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &slow_rc_osc_ops; + init.parent_names = NULL; + init.num_parents = 0; + init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED; + + osc->hw.init = &init; + osc->sckcr = sckcr; + osc->frequency = frequency; + osc->accuracy = accuracy; + osc->startup_usec = startup; + + clk = clk_register(NULL, &osc->hw); + if (IS_ERR(clk)) + kfree(osc); + + return clk; +} + +void __init of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np, + void __iomem *sckcr) +{ + struct clk *clk; + u32 frequency = 0; + u32 accuracy = 0; + u32 startup = 0; + const char *name = np->name; + + of_property_read_string(np, "clock-output-names", &name); + of_property_read_u32(np, "clock-frequency", &frequency); + of_property_read_u32(np, "clock-accuracy", &accuracy); + of_property_read_u32(np, "atmel,startup-time-usec", &startup); + + clk = at91_clk_register_slow_rc_osc(sckcr, name, frequency, accuracy, + startup); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} + +static int clk_sam9x5_slow_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw); + void __iomem *sckcr = slowck->sckcr; + u32 tmp; + + if (index > 1) + return -EINVAL; + + tmp = readl(sckcr); + + if ((!index && !(tmp & AT91_SCKC_OSCSEL)) || + (index && (tmp & AT91_SCKC_OSCSEL))) + return 0; + + if (index) + tmp |= AT91_SCKC_OSCSEL; + else + tmp &= ~AT91_SCKC_OSCSEL; + + writel(tmp, sckcr); + + usleep_range(SLOWCK_SW_TIME_USEC, SLOWCK_SW_TIME_USEC + 1); + + return 0; +} + +static u8 clk_sam9x5_slow_get_parent(struct clk_hw *hw) +{ + struct clk_sam9x5_slow *slowck = to_clk_sam9x5_slow(hw); + + return !!(readl(slowck->sckcr) & AT91_SCKC_OSCSEL); +} + +static const struct clk_ops sam9x5_slow_ops = { + .set_parent = clk_sam9x5_slow_set_parent, + .get_parent = clk_sam9x5_slow_get_parent, +}; + +static struct clk * __init +at91_clk_register_sam9x5_slow(void __iomem *sckcr, + const char *name, + const char **parent_names, + int num_parents) +{ + struct clk_sam9x5_slow *slowck; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!sckcr || !name || !parent_names || !num_parents) + return ERR_PTR(-EINVAL); + + slowck = kzalloc(sizeof(*slowck), GFP_KERNEL); + if (!slowck) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &sam9x5_slow_ops; + init.parent_names = parent_names; + init.num_parents = num_parents; + init.flags = 0; + + slowck->hw.init = &init; + slowck->sckcr = sckcr; + slowck->parent = !!(readl(sckcr) & AT91_SCKC_OSCSEL); + + clk = clk_register(NULL, &slowck->hw); + if (IS_ERR(clk)) + kfree(slowck); + + return clk; +} + +void __init of_at91sam9x5_clk_slow_setup(struct device_node *np, + void __iomem *sckcr) +{ + struct clk *clk; + const char *parent_names[2]; + int num_parents; + const char *name = np->name; + int i; + + num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + if (num_parents <= 0 || num_parents > 2) + return; + + for (i = 0; i < num_parents; ++i) { + parent_names[i] = of_clk_get_parent_name(np, i); + if (!parent_names[i]) + return; + } + + of_property_read_string(np, "clock-output-names", &name); + + clk = at91_clk_register_sam9x5_slow(sckcr, name, parent_names, + num_parents); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} + +static u8 clk_sam9260_slow_get_parent(struct clk_hw *hw) +{ + struct clk_sam9260_slow *slowck = to_clk_sam9260_slow(hw); + + return !!(pmc_read(slowck->pmc, AT91_PMC_SR) & AT91_PMC_OSCSEL); +} + +static const struct clk_ops sam9260_slow_ops = { + .get_parent = clk_sam9260_slow_get_parent, +}; + +static struct clk * __init +at91_clk_register_sam9260_slow(struct at91_pmc *pmc, + const char *name, + const char **parent_names, + int num_parents) +{ + struct clk_sam9260_slow *slowck; + struct clk *clk = NULL; + struct clk_init_data init; + + if (!pmc || !name) + return ERR_PTR(-EINVAL); + + if (!parent_names || !num_parents) + return ERR_PTR(-EINVAL); + + slowck = kzalloc(sizeof(*slowck), GFP_KERNEL); + if (!slowck) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &sam9260_slow_ops; + init.parent_names = parent_names; + init.num_parents = num_parents; + init.flags = 0; + + slowck->hw.init = &init; + slowck->pmc = pmc; + + clk = clk_register(NULL, &slowck->hw); + if (IS_ERR(clk)) + kfree(slowck); + + return clk; +} + +void __init of_at91sam9260_clk_slow_setup(struct device_node *np, + struct at91_pmc *pmc) +{ + struct clk *clk; + const char *parent_names[2]; + int num_parents; + const char *name = np->name; + int i; + + num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + if (num_parents <= 0 || num_parents > 1) + return; + + for (i = 0; i < num_parents; ++i) { + parent_names[i] = of_clk_get_parent_name(np, i); + if (!parent_names[i]) + return; + } + + of_property_read_string(np, "clock-output-names", &name); + + clk = at91_clk_register_sam9260_slow(pmc, name, parent_names, + num_parents); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index 6a61477a57e..524196bb35a 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -229,11 +229,28 @@ out_free_pmc: } static const struct of_device_id pmc_clk_ids[] __initconst = { + /* Slow oscillator */ + { + .compatible = "atmel,at91sam9260-clk-slow", + .data = of_at91sam9260_clk_slow_setup, + }, /* Main clock */ { + .compatible = "atmel,at91rm9200-clk-main-osc", + .data = of_at91rm9200_clk_main_osc_setup, + }, + { + .compatible = "atmel,at91sam9x5-clk-main-rc-osc", + .data = of_at91sam9x5_clk_main_rc_osc_setup, + }, + { .compatible = "atmel,at91rm9200-clk-main", .data = of_at91rm9200_clk_main_setup, }, + { + .compatible = "atmel,at91sam9x5-clk-main", + .data = of_at91sam9x5_clk_main_setup, + }, /* PLL clocks */ { .compatible = "atmel,at91rm9200-clk-pll", diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index 441350983cc..6c762597611 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -58,8 +58,17 @@ static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value) int of_at91_get_clk_range(struct device_node *np, const char *propname, struct clk_range *range); +extern void __init of_at91sam9260_clk_slow_setup(struct device_node *np, + struct at91_pmc *pmc); + +extern void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np, + struct at91_pmc *pmc); +extern void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np, + struct at91_pmc *pmc); extern void __init of_at91rm9200_clk_main_setup(struct device_node *np, struct at91_pmc *pmc); +extern void __init of_at91sam9x5_clk_main_setup(struct device_node *np, + struct at91_pmc *pmc); extern void __init of_at91rm9200_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc); diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c new file mode 100644 index 00000000000..1184d76a7ab --- /dev/null +++ b/drivers/clk/at91/sckc.c @@ -0,0 +1,57 @@ +/* + * drivers/clk/at91/sckc.c + * + * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/io.h> + +#include "sckc.h" + +static const struct of_device_id sckc_clk_ids[] __initconst = { + /* Slow clock */ + { + .compatible = "atmel,at91sam9x5-clk-slow-osc", + .data = of_at91sam9x5_clk_slow_osc_setup, + }, + { + .compatible = "atmel,at91sam9x5-clk-slow-rc-osc", + .data = of_at91sam9x5_clk_slow_rc_osc_setup, + }, + { + .compatible = "atmel,at91sam9x5-clk-slow", + .data = of_at91sam9x5_clk_slow_setup, + }, + { /*sentinel*/ } +}; + +static void __init of_at91sam9x5_sckc_setup(struct device_node *np) +{ + struct device_node *childnp; + void (*clk_setup)(struct device_node *, void __iomem *); + const struct of_device_id *clk_id; + void __iomem *regbase = of_iomap(np, 0); + + if (!regbase) + return; + + for_each_child_of_node(np, childnp) { + clk_id = of_match_node(sckc_clk_ids, childnp); + if (!clk_id) + continue; + clk_setup = clk_id->data; + clk_setup(childnp, regbase); + } +} +CLK_OF_DECLARE(at91sam9x5_clk_sckc, "atmel,at91sam9x5-sckc", + of_at91sam9x5_sckc_setup); diff --git a/drivers/clk/at91/sckc.h b/drivers/clk/at91/sckc.h new file mode 100644 index 00000000000..836fcf59820 --- /dev/null +++ b/drivers/clk/at91/sckc.h @@ -0,0 +1,22 @@ +/* + * drivers/clk/at91/sckc.h + * + * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __AT91_SCKC_H_ +#define __AT91_SCKC_H_ + +extern void __init of_at91sam9x5_clk_slow_osc_setup(struct device_node *np, + void __iomem *sckcr); +extern void __init of_at91sam9x5_clk_slow_rc_osc_setup(struct device_node *np, + void __iomem *sckcr); +extern void __init of_at91sam9x5_clk_slow_setup(struct device_node *np, + void __iomem *sckcr); + +#endif /* __AT91_SCKC_H_ */ diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c index c7607feb18d..54a06526f64 100644 --- a/drivers/clk/bcm/clk-kona-setup.c +++ b/drivers/clk/bcm/clk-kona-setup.c @@ -27,7 +27,7 @@ LIST_HEAD(ccu_list); /* The list of set up CCUs */ static bool clk_requires_trigger(struct kona_clk *bcm_clk) { - struct peri_clk_data *peri = bcm_clk->peri; + struct peri_clk_data *peri = bcm_clk->u.peri; struct bcm_clk_sel *sel; struct bcm_clk_div *div; @@ -63,7 +63,7 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) u32 limit; BUG_ON(bcm_clk->type != bcm_clk_peri); - peri = bcm_clk->peri; + peri = bcm_clk->u.peri; name = bcm_clk->name; range = bcm_clk->ccu->range; @@ -81,19 +81,19 @@ static bool peri_clk_data_offsets_valid(struct kona_clk *bcm_clk) div = &peri->div; if (divider_exists(div)) { - if (div->offset > limit) { + if (div->u.s.offset > limit) { pr_err("%s: bad divider offset for %s (%u > %u)\n", - __func__, name, div->offset, limit); + __func__, name, div->u.s.offset, limit); return false; } } div = &peri->pre_div; if (divider_exists(div)) { - if (div->offset > limit) { + if (div->u.s.offset > limit) { pr_err("%s: bad pre-divider offset for %s " "(%u > %u)\n", - __func__, name, div->offset, limit); + __func__, name, div->u.s.offset, limit); return false; } } @@ -249,21 +249,22 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name, { if (divider_is_fixed(div)) { /* Any fixed divider value but 0 is OK */ - if (div->fixed == 0) { + if (div->u.fixed == 0) { pr_err("%s: bad %s fixed value 0 for %s\n", __func__, field_name, clock_name); return false; } return true; } - if (!bitfield_valid(div->shift, div->width, field_name, clock_name)) + if (!bitfield_valid(div->u.s.shift, div->u.s.width, + field_name, clock_name)) return false; if (divider_has_fraction(div)) - if (div->frac_width > div->width) { + if (div->u.s.frac_width > div->u.s.width) { pr_warn("%s: bad %s fraction width for %s (%u > %u)\n", __func__, field_name, clock_name, - div->frac_width, div->width); + div->u.s.frac_width, div->u.s.width); return false; } @@ -278,7 +279,7 @@ static bool div_valid(struct bcm_clk_div *div, const char *field_name, */ static bool kona_dividers_valid(struct kona_clk *bcm_clk) { - struct peri_clk_data *peri = bcm_clk->peri; + struct peri_clk_data *peri = bcm_clk->u.peri; struct bcm_clk_div *div; struct bcm_clk_div *pre_div; u32 limit; @@ -295,7 +296,7 @@ static bool kona_dividers_valid(struct kona_clk *bcm_clk) limit = BITS_PER_BYTE * sizeof(u32); - return div->frac_width + pre_div->frac_width <= limit; + return div->u.s.frac_width + pre_div->u.s.frac_width <= limit; } @@ -328,7 +329,7 @@ peri_clk_data_valid(struct kona_clk *bcm_clk) if (!peri_clk_data_offsets_valid(bcm_clk)) return false; - peri = bcm_clk->peri; + peri = bcm_clk->u.peri; name = bcm_clk->name; gate = &peri->gate; if (gate_exists(gate) && !gate_valid(gate, "gate", name)) @@ -588,12 +589,12 @@ static void bcm_clk_teardown(struct kona_clk *bcm_clk) { switch (bcm_clk->type) { case bcm_clk_peri: - peri_clk_teardown(bcm_clk->data, &bcm_clk->init_data); + peri_clk_teardown(bcm_clk->u.data, &bcm_clk->init_data); break; default: break; } - bcm_clk->data = NULL; + bcm_clk->u.data = NULL; bcm_clk->type = bcm_clk_none; } @@ -644,7 +645,7 @@ struct clk *kona_clk_setup(struct ccu_data *ccu, const char *name, break; } bcm_clk->type = type; - bcm_clk->data = data; + bcm_clk->u.data = data; /* Make sure everything makes sense before we set it up */ if (!kona_clk_valid(bcm_clk)) { diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c index e3d339e0830..db11a87449f 100644 --- a/drivers/clk/bcm/clk-kona.c +++ b/drivers/clk/bcm/clk-kona.c @@ -61,7 +61,7 @@ u64 do_div_round_closest(u64 dividend, unsigned long divisor) /* Convert a divider into the scaled divisor value it represents. */ static inline u64 scaled_div_value(struct bcm_clk_div *div, u32 reg_div) { - return (u64)reg_div + ((u64)1 << div->frac_width); + return (u64)reg_div + ((u64)1 << div->u.s.frac_width); } /* @@ -77,7 +77,7 @@ u64 scaled_div_build(struct bcm_clk_div *div, u32 div_value, u32 billionths) BUG_ON(billionths >= BILLION); combined = (u64)div_value * BILLION + billionths; - combined <<= div->frac_width; + combined <<= div->u.s.frac_width; return do_div_round_closest(combined, BILLION); } @@ -87,7 +87,7 @@ static inline u64 scaled_div_min(struct bcm_clk_div *div) { if (divider_is_fixed(div)) - return (u64)div->fixed; + return (u64)div->u.fixed; return scaled_div_value(div, 0); } @@ -98,9 +98,9 @@ u64 scaled_div_max(struct bcm_clk_div *div) u32 reg_div; if (divider_is_fixed(div)) - return (u64)div->fixed; + return (u64)div->u.fixed; - reg_div = ((u32)1 << div->width) - 1; + reg_div = ((u32)1 << div->u.s.width) - 1; return scaled_div_value(div, reg_div); } @@ -115,7 +115,7 @@ divider(struct bcm_clk_div *div, u64 scaled_div) BUG_ON(scaled_div < scaled_div_min(div)); BUG_ON(scaled_div > scaled_div_max(div)); - return (u32)(scaled_div - ((u64)1 << div->frac_width)); + return (u32)(scaled_div - ((u64)1 << div->u.s.frac_width)); } /* Return a rate scaled for use when dividing by a scaled divisor. */ @@ -125,7 +125,7 @@ scale_rate(struct bcm_clk_div *div, u32 rate) if (divider_is_fixed(div)) return (u64)rate; - return (u64)rate << div->frac_width; + return (u64)rate << div->u.s.frac_width; } /* CCU access */ @@ -398,14 +398,14 @@ static u64 divider_read_scaled(struct ccu_data *ccu, struct bcm_clk_div *div) u32 reg_div; if (divider_is_fixed(div)) - return (u64)div->fixed; + return (u64)div->u.fixed; flags = ccu_lock(ccu); - reg_val = __ccu_read(ccu, div->offset); + reg_val = __ccu_read(ccu, div->u.s.offset); ccu_unlock(ccu, flags); /* Extract the full divider field from the register value */ - reg_div = bitfield_extract(reg_val, div->shift, div->width); + reg_div = bitfield_extract(reg_val, div->u.s.shift, div->u.s.width); /* Return the scaled divisor value it represents */ return scaled_div_value(div, reg_div); @@ -433,16 +433,17 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, * state was defined in the device tree, we just find out * what its current value is rather than updating it. */ - if (div->scaled_div == BAD_SCALED_DIV_VALUE) { - reg_val = __ccu_read(ccu, div->offset); - reg_div = bitfield_extract(reg_val, div->shift, div->width); - div->scaled_div = scaled_div_value(div, reg_div); + if (div->u.s.scaled_div == BAD_SCALED_DIV_VALUE) { + reg_val = __ccu_read(ccu, div->u.s.offset); + reg_div = bitfield_extract(reg_val, div->u.s.shift, + div->u.s.width); + div->u.s.scaled_div = scaled_div_value(div, reg_div); return 0; } /* Convert the scaled divisor to the value we need to record */ - reg_div = divider(div, div->scaled_div); + reg_div = divider(div, div->u.s.scaled_div); /* Clock needs to be enabled before changing the rate */ enabled = __is_clk_gate_enabled(ccu, gate); @@ -452,9 +453,10 @@ static int __div_commit(struct ccu_data *ccu, struct bcm_clk_gate *gate, } /* Replace the divider value and record the result */ - reg_val = __ccu_read(ccu, div->offset); - reg_val = bitfield_replace(reg_val, div->shift, div->width, reg_div); - __ccu_write(ccu, div->offset, reg_val); + reg_val = __ccu_read(ccu, div->u.s.offset); + reg_val = bitfield_replace(reg_val, div->u.s.shift, div->u.s.width, + reg_div); + __ccu_write(ccu, div->u.s.offset, reg_val); /* If the trigger fails we still want to disable the gate */ if (!__clk_trigger(ccu, trig)) @@ -490,11 +492,11 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, BUG_ON(divider_is_fixed(div)); - previous = div->scaled_div; + previous = div->u.s.scaled_div; if (previous == scaled_div) return 0; /* No change */ - div->scaled_div = scaled_div; + div->u.s.scaled_div = scaled_div; flags = ccu_lock(ccu); __ccu_write_enable(ccu); @@ -505,7 +507,7 @@ static int divider_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, ccu_unlock(ccu, flags); if (ret) - div->scaled_div = previous; /* Revert the change */ + div->u.s.scaled_div = previous; /* Revert the change */ return ret; @@ -802,7 +804,7 @@ static int selector_write(struct ccu_data *ccu, struct bcm_clk_gate *gate, static int kona_peri_clk_enable(struct clk_hw *hw) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; return clk_gate(bcm_clk->ccu, bcm_clk->name, gate, true); } @@ -810,7 +812,7 @@ static int kona_peri_clk_enable(struct clk_hw *hw) static void kona_peri_clk_disable(struct clk_hw *hw) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; (void)clk_gate(bcm_clk->ccu, bcm_clk->name, gate, false); } @@ -818,7 +820,7 @@ static void kona_peri_clk_disable(struct clk_hw *hw) static int kona_peri_clk_is_enabled(struct clk_hw *hw) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct bcm_clk_gate *gate = &bcm_clk->peri->gate; + struct bcm_clk_gate *gate = &bcm_clk->u.peri->gate; return is_clk_gate_enabled(bcm_clk->ccu, gate) ? 1 : 0; } @@ -827,7 +829,7 @@ static unsigned long kona_peri_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct peri_clk_data *data = bcm_clk->peri; + struct peri_clk_data *data = bcm_clk->u.peri; return clk_recalc_rate(bcm_clk->ccu, &data->div, &data->pre_div, parent_rate); @@ -837,20 +839,20 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct bcm_clk_div *div = &bcm_clk->peri->div; + struct bcm_clk_div *div = &bcm_clk->u.peri->div; if (!divider_exists(div)) return __clk_get_rate(hw->clk); /* Quietly avoid a zero rate */ - return round_rate(bcm_clk->ccu, div, &bcm_clk->peri->pre_div, + return round_rate(bcm_clk->ccu, div, &bcm_clk->u.peri->pre_div, rate ? rate : 1, *parent_rate, NULL); } static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct peri_clk_data *data = bcm_clk->peri; + struct peri_clk_data *data = bcm_clk->u.peri; struct bcm_clk_sel *sel = &data->sel; struct bcm_clk_trig *trig; int ret; @@ -884,7 +886,7 @@ static int kona_peri_clk_set_parent(struct clk_hw *hw, u8 index) static u8 kona_peri_clk_get_parent(struct clk_hw *hw) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct peri_clk_data *data = bcm_clk->peri; + struct peri_clk_data *data = bcm_clk->u.peri; u8 index; index = selector_read_index(bcm_clk->ccu, &data->sel); @@ -897,7 +899,7 @@ static int kona_peri_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct kona_clk *bcm_clk = to_kona_clk(hw); - struct peri_clk_data *data = bcm_clk->peri; + struct peri_clk_data *data = bcm_clk->u.peri; struct bcm_clk_div *div = &data->div; u64 scaled_div = 0; int ret; @@ -958,7 +960,7 @@ struct clk_ops kona_peri_clk_ops = { static bool __peri_clk_init(struct kona_clk *bcm_clk) { struct ccu_data *ccu = bcm_clk->ccu; - struct peri_clk_data *peri = bcm_clk->peri; + struct peri_clk_data *peri = bcm_clk->u.peri; const char *name = bcm_clk->name; struct bcm_clk_trig *trig; diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h index 5e139adc3dc..dee690951bb 100644 --- a/drivers/clk/bcm/clk-kona.h +++ b/drivers/clk/bcm/clk-kona.h @@ -57,7 +57,7 @@ #define divider_exists(div) FLAG_TEST(div, DIV, EXISTS) #define divider_is_fixed(div) FLAG_TEST(div, DIV, FIXED) #define divider_has_fraction(div) (!divider_is_fixed(div) && \ - (div)->frac_width > 0) + (div)->u.s.frac_width > 0) #define selector_exists(sel) ((sel)->width != 0) #define trigger_exists(trig) FLAG_TEST(trig, TRIG, EXISTS) @@ -244,9 +244,9 @@ struct bcm_clk_div { u32 frac_width; /* field fraction width */ u64 scaled_div; /* scaled divider value */ - }; + } s; u32 fixed; /* non-zero fixed divider value */ - }; + } u; u32 flags; /* BCM_CLK_DIV_FLAGS_* below */ }; @@ -263,28 +263,28 @@ struct bcm_clk_div { /* A fixed (non-zero) divider */ #define FIXED_DIVIDER(_value) \ { \ - .fixed = (_value), \ + .u.fixed = (_value), \ .flags = FLAG(DIV, EXISTS)|FLAG(DIV, FIXED), \ } /* A divider with an integral divisor */ #define DIVIDER(_offset, _shift, _width) \ { \ - .offset = (_offset), \ - .shift = (_shift), \ - .width = (_width), \ - .scaled_div = BAD_SCALED_DIV_VALUE, \ + .u.s.offset = (_offset), \ + .u.s.shift = (_shift), \ + .u.s.width = (_width), \ + .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \ .flags = FLAG(DIV, EXISTS), \ } /* A divider whose divisor has an integer and fractional part */ #define FRAC_DIVIDER(_offset, _shift, _width, _frac_width) \ { \ - .offset = (_offset), \ - .shift = (_shift), \ - .width = (_width), \ - .frac_width = (_frac_width), \ - .scaled_div = BAD_SCALED_DIV_VALUE, \ + .u.s.offset = (_offset), \ + .u.s.shift = (_shift), \ + .u.s.width = (_width), \ + .u.s.frac_width = (_frac_width), \ + .u.s.scaled_div = BAD_SCALED_DIV_VALUE, \ .flags = FLAG(DIV, EXISTS), \ } @@ -380,7 +380,7 @@ struct kona_clk { union { void *data; struct peri_clk_data *peri; - }; + } u; }; #define to_kona_clk(_hw) \ container_of(_hw, struct kona_clk, hw) diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index ec22112e569..3fbee454022 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -144,6 +144,37 @@ static bool _is_valid_div(struct clk_divider *divider, unsigned int div) return true; } +static int _round_up_table(const struct clk_div_table *table, int div) +{ + const struct clk_div_table *clkt; + int up = INT_MAX; + + for (clkt = table; clkt->div; clkt++) { + if (clkt->div == div) + return clkt->div; + else if (clkt->div < div) + continue; + + if ((clkt->div - div) < (up - div)) + up = clkt->div; + } + + return up; +} + +static int _div_round_up(struct clk_divider *divider, + unsigned long parent_rate, unsigned long rate) +{ + int div = DIV_ROUND_UP(parent_rate, rate); + + if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + div = __roundup_pow_of_two(div); + if (divider->table) + div = _round_up_table(divider->table, div); + + return div; +} + static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate) { @@ -159,7 +190,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { parent_rate = *best_parent_rate; - bestdiv = DIV_ROUND_UP(parent_rate, rate); + bestdiv = _div_round_up(divider, parent_rate, rate); bestdiv = bestdiv == 0 ? 1 : bestdiv; bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; return bestdiv; @@ -219,6 +250,10 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, u32 val; div = DIV_ROUND_UP(parent_rate, rate); + + if (!_is_valid_div(divider, div)) + return -EINVAL; + value = _get_val(divider, div); if (value > div_mask(divider)) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index dff0373f53c..7cf2c093cc5 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1984,9 +1984,28 @@ struct clk *__clk_register(struct device *dev, struct clk_hw *hw) } EXPORT_SYMBOL_GPL(__clk_register); -static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) +/** + * clk_register - allocate a new clock, register it and return an opaque cookie + * @dev: device that is registering this clock + * @hw: link to hardware-specific clock data + * + * clk_register is the primary interface for populating the clock tree with new + * clock nodes. It returns a pointer to the newly allocated struct clk which + * cannot be dereferenced by driver code but may be used in conjuction with the + * rest of the clock API. In the event of an error clk_register will return an + * error code; drivers must test for an error code after calling clk_register. + */ +struct clk *clk_register(struct device *dev, struct clk_hw *hw) { int i, ret; + struct clk *clk; + + clk = kzalloc(sizeof(*clk), GFP_KERNEL); + if (!clk) { + pr_err("%s: could not allocate clk\n", __func__); + ret = -ENOMEM; + goto fail_out; + } clk->name = kstrdup(hw->init->name, GFP_KERNEL); if (!clk->name) { @@ -2026,7 +2045,7 @@ static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk) ret = __clk_init(dev, clk); if (!ret) - return 0; + return clk; fail_parent_names_copy: while (--i >= 0) @@ -2035,36 +2054,6 @@ fail_parent_names_copy: fail_parent_names: kfree(clk->name); fail_name: - return ret; -} - -/** - * clk_register - allocate a new clock, register it and return an opaque cookie - * @dev: device that is registering this clock - * @hw: link to hardware-specific clock data - * - * clk_register is the primary interface for populating the clock tree with new - * clock nodes. It returns a pointer to the newly allocated struct clk which - * cannot be dereferenced by driver code but may be used in conjuction with the - * rest of the clock API. In the event of an error clk_register will return an - * error code; drivers must test for an error code after calling clk_register. - */ -struct clk *clk_register(struct device *dev, struct clk_hw *hw) -{ - int ret; - struct clk *clk; - - clk = kzalloc(sizeof(*clk), GFP_KERNEL); - if (!clk) { - pr_err("%s: could not allocate clk\n", __func__); - ret = -ENOMEM; - goto fail_out; - } - - ret = _clk_register(dev, hw, clk); - if (!ret) - return clk; - kfree(clk); fail_out: return ERR_PTR(ret); @@ -2151,9 +2140,10 @@ void clk_unregister(struct clk *clk) if (!hlist_empty(&clk->children)) { struct clk *child; + struct hlist_node *t; /* Reparent all children to the orphan list. */ - hlist_for_each_entry(child, &clk->children, child_node) + hlist_for_each_entry_safe(child, t, &clk->children, child_node) clk_set_parent(child, NULL); } @@ -2173,7 +2163,7 @@ EXPORT_SYMBOL_GPL(clk_unregister); static void devm_clk_release(struct device *dev, void *res) { - clk_unregister(res); + clk_unregister(*(struct clk **)res); } /** @@ -2188,18 +2178,18 @@ static void devm_clk_release(struct device *dev, void *res) struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw) { struct clk *clk; - int ret; + struct clk **clkp; - clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL); - if (!clk) + clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL); + if (!clkp) return ERR_PTR(-ENOMEM); - ret = _clk_register(dev, hw, clk); - if (!ret) { - devres_add(dev, clk); + clk = clk_register(dev, hw); + if (!IS_ERR(clk)) { + *clkp = clk; + devres_add(dev, clkp); } else { - devres_free(clk); - clk = ERR_PTR(ret); + devres_free(clkp); } return clk; diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig new file mode 100644 index 00000000000..84196ecdaa1 --- /dev/null +++ b/drivers/clk/samsung/Kconfig @@ -0,0 +1,26 @@ +config COMMON_CLK_SAMSUNG + bool + select COMMON_CLK + +config S3C2410_COMMON_CLK + bool + select COMMON_CLK_SAMSUNG + help + Build the s3c2410 clock driver based on the common clock framework. + +config S3C2410_COMMON_DCLK + bool + select COMMON_CLK_SAMSUNG + select REGMAP_MMIO + help + Temporary symbol to build the dclk driver based on the common clock + framework. + +config S3C2412_COMMON_CLK + bool + select COMMON_CLK_SAMSUNG + +config S3C2443_COMMON_CLK + bool + select COMMON_CLK_SAMSUNG + diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile index 8eb4799237f..69e81773164 100644 --- a/drivers/clk/samsung/Makefile +++ b/drivers/clk/samsung/Makefile @@ -3,9 +3,16 @@ # obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o +obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o +obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o +obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o +obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o +obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o +obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o +obj-$(CONFIG_S3C2443_COMMON_CLK)+= clk-s3c2443.o obj-$(CONFIG_ARCH_S3C64XX) += clk-s3c64xx.o diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c new file mode 100644 index 00000000000..7a17bd40d1d --- /dev/null +++ b/drivers/clk/samsung/clk-exynos3250.c @@ -0,0 +1,780 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for Exynos3250 SoC. + */ + +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/syscore_ops.h> + +#include <dt-bindings/clock/exynos3250.h> + +#include "clk.h" +#include "clk-pll.h" + +#define SRC_LEFTBUS 0x4200 +#define DIV_LEFTBUS 0x4500 +#define GATE_IP_LEFTBUS 0x4800 +#define SRC_RIGHTBUS 0x8200 +#define DIV_RIGHTBUS 0x8500 +#define GATE_IP_RIGHTBUS 0x8800 +#define GATE_IP_PERIR 0x8960 +#define MPLL_LOCK 0xc010 +#define MPLL_CON0 0xc110 +#define VPLL_LOCK 0xc020 +#define VPLL_CON0 0xc120 +#define UPLL_LOCK 0xc030 +#define UPLL_CON0 0xc130 +#define SRC_TOP0 0xc210 +#define SRC_TOP1 0xc214 +#define SRC_CAM 0xc220 +#define SRC_MFC 0xc228 +#define SRC_G3D 0xc22c +#define SRC_LCD 0xc234 +#define SRC_ISP 0xc238 +#define SRC_FSYS 0xc240 +#define SRC_PERIL0 0xc250 +#define SRC_PERIL1 0xc254 +#define SRC_MASK_TOP 0xc310 +#define SRC_MASK_CAM 0xc320 +#define SRC_MASK_LCD 0xc334 +#define SRC_MASK_ISP 0xc338 +#define SRC_MASK_FSYS 0xc340 +#define SRC_MASK_PERIL0 0xc350 +#define SRC_MASK_PERIL1 0xc354 +#define DIV_TOP 0xc510 +#define DIV_CAM 0xc520 +#define DIV_MFC 0xc528 +#define DIV_G3D 0xc52c +#define DIV_LCD 0xc534 +#define DIV_ISP 0xc538 +#define DIV_FSYS0 0xc540 +#define DIV_FSYS1 0xc544 +#define DIV_FSYS2 0xc548 +#define DIV_PERIL0 0xc550 +#define DIV_PERIL1 0xc554 +#define DIV_PERIL3 0xc55c +#define DIV_PERIL4 0xc560 +#define DIV_PERIL5 0xc564 +#define DIV_CAM1 0xc568 +#define CLKDIV2_RATIO 0xc580 +#define GATE_SCLK_CAM 0xc820 +#define GATE_SCLK_MFC 0xc828 +#define GATE_SCLK_G3D 0xc82c +#define GATE_SCLK_LCD 0xc834 +#define GATE_SCLK_ISP_TOP 0xc838 +#define GATE_SCLK_FSYS 0xc840 +#define GATE_SCLK_PERIL 0xc850 +#define GATE_IP_CAM 0xc920 +#define GATE_IP_MFC 0xc928 +#define GATE_IP_G3D 0xc92c +#define GATE_IP_LCD 0xc934 +#define GATE_IP_ISP 0xc938 +#define GATE_IP_FSYS 0xc940 +#define GATE_IP_PERIL 0xc950 +#define GATE_BLOCK 0xc970 +#define APLL_LOCK 0x14000 +#define APLL_CON0 0x14100 +#define SRC_CPU 0x14200 +#define DIV_CPU0 0x14500 +#define DIV_CPU1 0x14504 + +/* list of PLLs to be registered */ +enum exynos3250_plls { + apll, mpll, vpll, upll, + nr_plls +}; + +static void __iomem *reg_base; + +/* + * Support for CMU save/restore across system suspends + */ +#ifdef CONFIG_PM_SLEEP +static struct samsung_clk_reg_dump *exynos3250_clk_regs; + +static unsigned long exynos3250_cmu_clk_regs[] __initdata = { + SRC_LEFTBUS, + DIV_LEFTBUS, + GATE_IP_LEFTBUS, + SRC_RIGHTBUS, + DIV_RIGHTBUS, + GATE_IP_RIGHTBUS, + GATE_IP_PERIR, + MPLL_LOCK, + MPLL_CON0, + VPLL_LOCK, + VPLL_CON0, + UPLL_LOCK, + UPLL_CON0, + SRC_TOP0, + SRC_TOP1, + SRC_CAM, + SRC_MFC, + SRC_G3D, + SRC_LCD, + SRC_ISP, + SRC_FSYS, + SRC_PERIL0, + SRC_PERIL1, + SRC_MASK_TOP, + SRC_MASK_CAM, + SRC_MASK_LCD, + SRC_MASK_ISP, + SRC_MASK_FSYS, + SRC_MASK_PERIL0, + SRC_MASK_PERIL1, + DIV_TOP, + DIV_CAM, + DIV_MFC, + DIV_G3D, + DIV_LCD, + DIV_ISP, + DIV_FSYS0, + DIV_FSYS1, + DIV_FSYS2, + DIV_PERIL0, + DIV_PERIL1, + DIV_PERIL3, + DIV_PERIL4, + DIV_PERIL5, + DIV_CAM1, + CLKDIV2_RATIO, + GATE_SCLK_CAM, + GATE_SCLK_MFC, + GATE_SCLK_G3D, + GATE_SCLK_LCD, + GATE_SCLK_ISP_TOP, + GATE_SCLK_FSYS, + GATE_SCLK_PERIL, + GATE_IP_CAM, + GATE_IP_MFC, + GATE_IP_G3D, + GATE_IP_LCD, + GATE_IP_ISP, + GATE_IP_FSYS, + GATE_IP_PERIL, + GATE_BLOCK, + APLL_LOCK, + SRC_CPU, + DIV_CPU0, + DIV_CPU1, +}; + +static int exynos3250_clk_suspend(void) +{ + samsung_clk_save(reg_base, exynos3250_clk_regs, + ARRAY_SIZE(exynos3250_cmu_clk_regs)); + return 0; +} + +static void exynos3250_clk_resume(void) +{ + samsung_clk_restore(reg_base, exynos3250_clk_regs, + ARRAY_SIZE(exynos3250_cmu_clk_regs)); +} + +static struct syscore_ops exynos3250_clk_syscore_ops = { + .suspend = exynos3250_clk_suspend, + .resume = exynos3250_clk_resume, +}; + +static void exynos3250_clk_sleep_init(void) +{ + exynos3250_clk_regs = + samsung_clk_alloc_reg_dump(exynos3250_cmu_clk_regs, + ARRAY_SIZE(exynos3250_cmu_clk_regs)); + if (!exynos3250_clk_regs) { + pr_warn("%s: Failed to allocate sleep save data\n", __func__); + goto err; + } + + register_syscore_ops(&exynos3250_clk_syscore_ops); + return; +err: + kfree(exynos3250_clk_regs); +} +#else +static inline void exynos3250_clk_sleep_init(void) { } +#endif + +/* list of all parent clock list */ +PNAME(mout_vpllsrc_p) = { "fin_pll", }; + +PNAME(mout_apll_p) = { "fin_pll", "fout_apll", }; +PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; +PNAME(mout_vpll_p) = { "fin_pll", "fout_vpll", }; +PNAME(mout_upll_p) = { "fin_pll", "fout_upll", }; + +PNAME(mout_mpll_user_p) = { "fin_pll", "div_mpll_pre", }; +PNAME(mout_epll_user_p) = { "fin_pll", "mout_epll", }; +PNAME(mout_core_p) = { "mout_apll", "mout_mpll_user_c", }; +PNAME(mout_hpm_p) = { "mout_apll", "mout_mpll_user_c", }; + +PNAME(mout_ebi_p) = { "div_aclk_200", "div_aclk_160", }; +PNAME(mout_ebi_1_p) = { "mout_ebi", "mout_vpll", }; + +PNAME(mout_gdl_p) = { "mout_mpll_user_l", }; +PNAME(mout_gdr_p) = { "mout_mpll_user_r", }; + +PNAME(mout_aclk_400_mcuisp_sub_p) + = { "fin_pll", "div_aclk_400_mcuisp", }; +PNAME(mout_aclk_266_0_p) = { "div_mpll_pre", "mout_vpll", }; +PNAME(mout_aclk_266_1_p) = { "mout_epll_user", }; +PNAME(mout_aclk_266_p) = { "mout_aclk_266_0", "mout_aclk_266_1", }; +PNAME(mout_aclk_266_sub_p) = { "fin_pll", "div_aclk_266", }; + +PNAME(group_div_mpll_pre_p) = { "div_mpll_pre", }; +PNAME(group_epll_vpll_p) = { "mout_epll_user", "mout_vpll" }; +PNAME(group_sclk_p) = { "xxti", "xusbxti", + "none", "none", + "none", "none", "div_mpll_pre", + "mout_epll_user", "mout_vpll", }; +PNAME(group_sclk_audio_p) = { "audiocdclk", "none", + "none", "none", + "xxti", "xusbxti", + "div_mpll_pre", "mout_epll_user", + "mout_vpll", }; +PNAME(group_sclk_cam_blk_p) = { "xxti", "xusbxti", + "none", "none", "none", + "none", "div_mpll_pre", + "mout_epll_user", "mout_vpll", + "div_cam_blk_320", }; +PNAME(group_sclk_fimd0_p) = { "xxti", "xusbxti", + "m_bitclkhsdiv4_2l", "none", + "none", "none", "div_mpll_pre", + "mout_epll_user", "mout_vpll", + "none", "none", "none", + "div_lcd_blk_145", }; + +PNAME(mout_mfc_p) = { "mout_mfc_0", "mout_mfc_1" }; +PNAME(mout_g3d_p) = { "mout_g3d_0", "mout_g3d_1" }; + +static struct samsung_fixed_factor_clock fixed_factor_clks[] __initdata = { + FFACTOR(0, "sclk_mpll_1600", "mout_mpll", 1, 1, 0), + FFACTOR(0, "sclk_mpll_mif", "mout_mpll", 1, 2, 0), + FFACTOR(0, "sclk_bpll", "fout_bpll", 1, 2, 0), + FFACTOR(0, "div_cam_blk_320", "sclk_mpll_1600", 1, 5, 0), + FFACTOR(0, "div_lcd_blk_145", "sclk_mpll_1600", 1, 11, 0), + + /* HACK: fin_pll hardcoded to xusbxti until detection is implemented. */ + FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0), +}; + +static struct samsung_mux_clock mux_clks[] __initdata = { + /* + * NOTE: Following table is sorted by register address in ascending + * order and then bitfield shift in descending order, as it is done + * in the User's Manual. When adding new entries, please make sure + * that the order is preserved, to avoid merge conflicts and make + * further work with defined data easier. + */ + + /* SRC_LEFTBUS */ + MUX(CLK_MOUT_MPLL_USER_L, "mout_mpll_user_l", mout_mpll_user_p, + SRC_LEFTBUS, 4, 1), + MUX(CLK_MOUT_GDL, "mout_gdl", mout_gdl_p, SRC_LEFTBUS, 0, 1), + + /* SRC_RIGHTBUS */ + MUX(CLK_MOUT_MPLL_USER_R, "mout_mpll_user_r", mout_mpll_user_p, + SRC_RIGHTBUS, 4, 1), + MUX(CLK_MOUT_GDR, "mout_gdr", mout_gdr_p, SRC_RIGHTBUS, 0, 1), + + /* SRC_TOP0 */ + MUX(CLK_MOUT_EBI, "mout_ebi", mout_ebi_p, SRC_TOP0, 28, 1), + MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_div_mpll_pre_p,SRC_TOP0, 24, 1), + MUX(CLK_MOUT_ACLK_160, "mout_aclk_160", group_div_mpll_pre_p, SRC_TOP0, 20, 1), + MUX(CLK_MOUT_ACLK_100, "mout_aclk_100", group_div_mpll_pre_p, SRC_TOP0, 16, 1), + MUX(CLK_MOUT_ACLK_266_1, "mout_aclk_266_1", mout_aclk_266_1_p, SRC_TOP0, 14, 1), + MUX(CLK_MOUT_ACLK_266_0, "mout_aclk_266_0", mout_aclk_266_0_p, SRC_TOP0, 13, 1), + MUX(CLK_MOUT_ACLK_266, "mout_aclk_266", mout_aclk_266_p, SRC_TOP0, 12, 1), + MUX(CLK_MOUT_VPLL, "mout_vpll", mout_vpll_p, SRC_TOP0, 8, 1), + MUX(CLK_MOUT_EPLL_USER, "mout_epll_user", mout_epll_user_p, SRC_TOP0, 4, 1), + MUX(CLK_MOUT_EBI_1, "mout_ebi_1", mout_ebi_1_p, SRC_TOP0, 0, 1), + + /* SRC_TOP1 */ + MUX(CLK_MOUT_UPLL, "mout_upll", mout_upll_p, SRC_TOP1, 28, 1), + MUX(CLK_MOUT_ACLK_400_MCUISP_SUB, "mout_aclk_400_mcuisp_sub", mout_aclk_400_mcuisp_sub_p, + SRC_TOP1, 24, 1), + MUX(CLK_MOUT_ACLK_266_SUB, "mout_aclk_266_sub", mout_aclk_266_sub_p, SRC_TOP1, 20, 1), + MUX(CLK_MOUT_MPLL, "mout_mpll", mout_mpll_p, SRC_TOP1, 12, 1), + MUX(CLK_MOUT_ACLK_400_MCUISP, "mout_aclk_400_mcuisp", group_div_mpll_pre_p, SRC_TOP1, 8, 1), + MUX(CLK_MOUT_VPLLSRC, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1), + + /* SRC_CAM */ + MUX(CLK_MOUT_CAM1, "mout_cam1", group_sclk_p, SRC_CAM, 20, 4), + MUX(CLK_MOUT_CAM_BLK, "mout_cam_blk", group_sclk_cam_blk_p, SRC_CAM, 0, 4), + + /* SRC_MFC */ + MUX(CLK_MOUT_MFC, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1), + MUX(CLK_MOUT_MFC_1, "mout_mfc_1", group_epll_vpll_p, SRC_MFC, 4, 1), + MUX(CLK_MOUT_MFC_0, "mout_mfc_0", group_div_mpll_pre_p, SRC_MFC, 0, 1), + + /* SRC_G3D */ + MUX(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1), + MUX(CLK_MOUT_G3D_1, "mout_g3d_1", group_epll_vpll_p, SRC_G3D, 4, 1), + MUX(CLK_MOUT_G3D_0, "mout_g3d_0", group_div_mpll_pre_p, SRC_G3D, 0, 1), + + /* SRC_LCD */ + MUX(CLK_MOUT_MIPI0, "mout_mipi0", group_sclk_p, SRC_LCD, 12, 4), + MUX(CLK_MOUT_FIMD0, "mout_fimd0", group_sclk_fimd0_p, SRC_LCD, 0, 4), + + /* SRC_ISP */ + MUX(CLK_MOUT_UART_ISP, "mout_uart_isp", group_sclk_p, SRC_ISP, 12, 4), + MUX(CLK_MOUT_SPI1_ISP, "mout_spi1_isp", group_sclk_p, SRC_ISP, 8, 4), + MUX(CLK_MOUT_SPI0_ISP, "mout_spi0_isp", group_sclk_p, SRC_ISP, 4, 4), + + /* SRC_FSYS */ + MUX(CLK_MOUT_TSADC, "mout_tsadc", group_sclk_p, SRC_FSYS, 28, 4), + MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 3), + MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 3), + + /* SRC_PERIL0 */ + MUX(CLK_MOUT_UART1, "mout_uart1", group_sclk_p, SRC_PERIL0, 4, 4), + MUX(CLK_MOUT_UART0, "mout_uart0", group_sclk_p, SRC_PERIL0, 0, 4), + + /* SRC_PERIL1 */ + MUX(CLK_MOUT_SPI1, "mout_spi1", group_sclk_p, SRC_PERIL1, 20, 4), + MUX(CLK_MOUT_SPI0, "mout_spi0", group_sclk_p, SRC_PERIL1, 16, 4), + MUX(CLK_MOUT_AUDIO, "mout_audio", group_sclk_audio_p, SRC_PERIL1, 4, 4), + + /* SRC_CPU */ + MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p, + SRC_CPU, 24, 1), + MUX(CLK_MOUT_HPM, "mout_hpm", mout_hpm_p, SRC_CPU, 20, 1), + MUX(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1), + MUX(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1), +}; + +static struct samsung_div_clock div_clks[] __initdata = { + /* + * NOTE: Following table is sorted by register address in ascending + * order and then bitfield shift in descending order, as it is done + * in the User's Manual. When adding new entries, please make sure + * that the order is preserved, to avoid merge conflicts and make + * further work with defined data easier. + */ + + /* DIV_LEFTBUS */ + DIV(CLK_DIV_GPL, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3), + DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 4), + + /* DIV_RIGHTBUS */ + DIV(CLK_DIV_GPR, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3), + DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 4), + + /* DIV_TOP */ + DIV(CLK_DIV_MPLL_PRE, "div_mpll_pre", "sclk_mpll_mif", DIV_TOP, 28, 2), + DIV(CLK_DIV_ACLK_400_MCUISP, "div_aclk_400_mcuisp", + "mout_aclk_400_mcuisp", DIV_TOP, 24, 3), + DIV(CLK_DIV_EBI, "div_ebi", "mout_ebi_1", DIV_TOP, 16, 3), + DIV(CLK_DIV_ACLK_200, "div_aclk_200", "mout_aclk_200", DIV_TOP, 12, 3), + DIV(CLK_DIV_ACLK_160, "div_aclk_160", "mout_aclk_160", DIV_TOP, 8, 3), + DIV(CLK_DIV_ACLK_100, "div_aclk_100", "mout_aclk_100", DIV_TOP, 4, 4), + DIV(CLK_DIV_ACLK_266, "div_aclk_266", "mout_aclk_266", DIV_TOP, 0, 3), + + /* DIV_CAM */ + DIV(CLK_DIV_CAM1, "div_cam1", "mout_cam1", DIV_CAM, 20, 4), + DIV(CLK_DIV_CAM_BLK, "div_cam_blk", "mout_cam_blk", DIV_CAM, 0, 4), + + /* DIV_MFC */ + DIV(CLK_DIV_MFC, "div_mfc", "mout_mfc", DIV_MFC, 0, 4), + + /* DIV_G3D */ + DIV(CLK_DIV_G3D, "div_g3d", "mout_g3d", DIV_G3D, 0, 4), + + /* DIV_LCD */ + DIV_F(CLK_DIV_MIPI0_PRE, "div_mipi0_pre", "div_mipi0", DIV_LCD, 20, 4, + CLK_SET_RATE_PARENT, 0), + DIV(CLK_DIV_MIPI0, "div_mipi0", "mout_mipi0", DIV_LCD, 16, 4), + DIV(CLK_DIV_FIMD0, "div_fimd0", "mout_fimd0", DIV_LCD, 0, 4), + + /* DIV_ISP */ + DIV(CLK_DIV_UART_ISP, "div_uart_isp", "mout_uart_isp", DIV_ISP, 28, 4), + DIV_F(CLK_DIV_SPI1_ISP_PRE, "div_spi1_isp_pre", "div_spi1_isp", + DIV_ISP, 20, 8, CLK_SET_RATE_PARENT, 0), + DIV(CLK_DIV_SPI1_ISP, "div_spi1_isp", "mout_spi1_isp", DIV_ISP, 16, 4), + DIV_F(CLK_DIV_SPI0_ISP_PRE, "div_spi0_isp_pre", "div_spi0_isp", + DIV_ISP, 8, 8, CLK_SET_RATE_PARENT, 0), + DIV(CLK_DIV_SPI0_ISP, "div_spi0_isp", "mout_spi0_isp", DIV_ISP, 0, 4), + + /* DIV_FSYS0 */ + DIV_F(CLK_DIV_TSADC_PRE, "div_tsadc_pre", "div_tsadc", DIV_FSYS0, 8, 8, + CLK_SET_RATE_PARENT, 0), + DIV(CLK_DIV_TSADC, "div_tsadc", "mout_tsadc", DIV_FSYS0, 0, 4), + + /* DIV_FSYS1 */ + DIV_F(CLK_DIV_MMC1_PRE, "div_mmc1_pre", "div_mmc1", DIV_FSYS1, 24, 8, + CLK_SET_RATE_PARENT, 0), + DIV(CLK_DIV_MMC1, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4), + DIV_F(CLK_DIV_MMC0_PRE, "div_mmc0_pre", "div_mmc0", DIV_FSYS1, 8, 8, + CLK_SET_RATE_PARENT, 0), + DIV(CLK_DIV_MMC0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4), + + /* DIV_PERIL0 */ + DIV(CLK_DIV_UART1, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4), + DIV(CLK_DIV_UART0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4), + + /* DIV_PERIL1 */ + DIV_F(CLK_DIV_SPI1_PRE, "div_spi1_pre", "div_spi1", DIV_PERIL1, 24, 8, + CLK_SET_RATE_PARENT, 0), + DIV(CLK_DIV_SPI1, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4), + DIV_F(CLK_DIV_SPI0_PRE, "div_spi0_pre", "div_spi0", DIV_PERIL1, 8, 8, + CLK_SET_RATE_PARENT, 0), + DIV(CLK_DIV_SPI0, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4), + + /* DIV_PERIL4 */ + DIV(CLK_DIV_PCM, "div_pcm", "div_audio", DIV_PERIL4, 20, 8), + DIV(CLK_DIV_AUDIO, "div_audio", "mout_audio", DIV_PERIL4, 16, 4), + + /* DIV_PERIL5 */ + DIV(CLK_DIV_I2S, "div_i2s", "div_audio", DIV_PERIL5, 8, 6), + + /* DIV_CPU0 */ + DIV(CLK_DIV_CORE2, "div_core2", "div_core", DIV_CPU0, 28, 3), + DIV(CLK_DIV_APLL, "div_apll", "mout_apll", DIV_CPU0, 24, 3), + DIV(CLK_DIV_PCLK_DBG, "div_pclk_dbg", "div_core2", DIV_CPU0, 20, 3), + DIV(CLK_DIV_ATB, "div_atb", "div_core2", DIV_CPU0, 16, 3), + DIV(CLK_DIV_COREM, "div_corem", "div_core2", DIV_CPU0, 4, 3), + DIV(CLK_DIV_CORE, "div_core", "mout_core", DIV_CPU0, 0, 3), + + /* DIV_CPU1 */ + DIV(CLK_DIV_HPM, "div_hpm", "div_copy", DIV_CPU1, 4, 3), + DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3), +}; + +static struct samsung_gate_clock gate_clks[] __initdata = { + /* + * NOTE: Following table is sorted by register address in ascending + * order and then bitfield shift in descending order, as it is done + * in the User's Manual. When adding new entries, please make sure + * that the order is preserved, to avoid merge conflicts and make + * further work with defined data easier. + */ + + /* GATE_IP_LEFTBUS */ + GATE(CLK_ASYNC_G3D, "async_g3d", "div_aclk_100", GATE_IP_LEFTBUS, 6, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_ASYNC_MFCL, "async_mfcl", "div_aclk_100", GATE_IP_LEFTBUS, 4, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_PPMULEFT, "ppmuleft", "div_aclk_100", GATE_IP_LEFTBUS, 1, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_GPIO_LEFT, "gpio_left", "div_aclk_100", GATE_IP_LEFTBUS, 0, + CLK_IGNORE_UNUSED, 0), + + /* GATE_IP_RIGHTBUS */ + GATE(CLK_ASYNC_ISPMX, "async_ispmx", "div_aclk_100", + GATE_IP_RIGHTBUS, 9, CLK_IGNORE_UNUSED, 0), + GATE(CLK_ASYNC_FSYSD, "async_fsysd", "div_aclk_100", + GATE_IP_RIGHTBUS, 5, CLK_IGNORE_UNUSED, 0), + GATE(CLK_ASYNC_LCD0X, "async_lcd0x", "div_aclk_100", + GATE_IP_RIGHTBUS, 3, CLK_IGNORE_UNUSED, 0), + GATE(CLK_ASYNC_CAMX, "async_camx", "div_aclk_100", GATE_IP_RIGHTBUS, 2, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_PPMURIGHT, "ppmuright", "div_aclk_100", GATE_IP_RIGHTBUS, 1, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_GPIO_RIGHT, "gpio_right", "div_aclk_100", GATE_IP_RIGHTBUS, 0, + CLK_IGNORE_UNUSED, 0), + + /* GATE_IP_PERIR */ + GATE(CLK_MONOCNT, "monocnt", "div_aclk_100", GATE_IP_PERIR, 22, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_TZPC6, "tzpc6", "div_aclk_100", GATE_IP_PERIR, 21, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_PROVISIONKEY1, "provisionkey1", "div_aclk_100", + GATE_IP_PERIR, 20, CLK_IGNORE_UNUSED, 0), + GATE(CLK_PROVISIONKEY0, "provisionkey0", "div_aclk_100", + GATE_IP_PERIR, 19, CLK_IGNORE_UNUSED, 0), + GATE(CLK_CMU_ISPPART, "cmu_isppart", "div_aclk_100", GATE_IP_PERIR, 18, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_TMU_APBIF, "tmu_apbif", "div_aclk_100", + GATE_IP_PERIR, 17, 0, 0), + GATE(CLK_KEYIF, "keyif", "div_aclk_100", GATE_IP_PERIR, 16, 0, 0), + GATE(CLK_RTC, "rtc", "div_aclk_100", GATE_IP_PERIR, 15, 0, 0), + GATE(CLK_WDT, "wdt", "div_aclk_100", GATE_IP_PERIR, 14, 0, 0), + GATE(CLK_MCT, "mct", "div_aclk_100", GATE_IP_PERIR, 13, 0, 0), + GATE(CLK_SECKEY, "seckey", "div_aclk_100", GATE_IP_PERIR, 12, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_TZPC5, "tzpc5", "div_aclk_100", GATE_IP_PERIR, 10, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_TZPC4, "tzpc4", "div_aclk_100", GATE_IP_PERIR, 9, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_TZPC3, "tzpc3", "div_aclk_100", GATE_IP_PERIR, 8, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_TZPC2, "tzpc2", "div_aclk_100", GATE_IP_PERIR, 7, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_TZPC1, "tzpc1", "div_aclk_100", GATE_IP_PERIR, 6, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_TZPC0, "tzpc0", "div_aclk_100", GATE_IP_PERIR, 5, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_CMU_COREPART, "cmu_corepart", "div_aclk_100", GATE_IP_PERIR, 4, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_CMU_TOPPART, "cmu_toppart", "div_aclk_100", GATE_IP_PERIR, 3, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_PMU_APBIF, "pmu_apbif", "div_aclk_100", GATE_IP_PERIR, 2, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_SYSREG, "sysreg", "div_aclk_100", GATE_IP_PERIR, 1, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_CHIP_ID, "chip_id", "div_aclk_100", GATE_IP_PERIR, 0, + CLK_IGNORE_UNUSED, 0), + + /* GATE_SCLK_CAM */ + GATE(CLK_SCLK_JPEG, "sclk_jpeg", "div_cam_blk", + GATE_SCLK_CAM, 8, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_M2MSCALER, "sclk_m2mscaler", "div_cam_blk", + GATE_SCLK_CAM, 2, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_GSCALER1, "sclk_gscaler1", "div_cam_blk", + GATE_SCLK_CAM, 1, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_GSCALER0, "sclk_gscaler0", "div_cam_blk", + GATE_SCLK_CAM, 0, CLK_SET_RATE_PARENT, 0), + + /* GATE_SCLK_MFC */ + GATE(CLK_SCLK_MFC, "sclk_mfc", "div_mfc", + GATE_SCLK_MFC, 0, CLK_SET_RATE_PARENT, 0), + + /* GATE_SCLK_G3D */ + GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d", + GATE_SCLK_G3D, 0, CLK_SET_RATE_PARENT, 0), + + /* GATE_SCLK_LCD */ + GATE(CLK_SCLK_MIPIDPHY2L, "sclk_mipidphy2l", "div_mipi0", + GATE_SCLK_LCD, 4, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MIPI0, "sclk_mipi0", "div_mipi0_pre", + GATE_SCLK_LCD, 3, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_FIMD0, "sclk_fimd0", "div_fimd0", + GATE_SCLK_LCD, 0, CLK_SET_RATE_PARENT, 0), + + /* GATE_SCLK_ISP_TOP */ + GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1", + GATE_SCLK_ISP_TOP, 4, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp", + GATE_SCLK_ISP_TOP, 3, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp", + GATE_SCLK_ISP_TOP, 2, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp", + GATE_SCLK_ISP_TOP, 1, CLK_SET_RATE_PARENT, 0), + + /* GATE_SCLK_FSYS */ + GATE(CLK_SCLK_UPLL, "sclk_upll", "mout_upll", GATE_SCLK_FSYS, 10, 0, 0), + GATE(CLK_SCLK_TSADC, "sclk_tsadc", "div_tsadc_pre", + GATE_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_EBI, "sclk_ebi", "div_ebi", + GATE_SCLK_FSYS, 6, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc1_pre", + GATE_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc0_pre", + GATE_SCLK_FSYS, 0, CLK_SET_RATE_PARENT, 0), + + /* GATE_SCLK_PERIL */ + GATE(CLK_SCLK_I2S, "sclk_i2s", "div_i2s", + GATE_SCLK_PERIL, 18, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_PCM, "sclk_pcm", "div_pcm", + GATE_SCLK_PERIL, 16, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi1_pre", + GATE_SCLK_PERIL, 7, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi0_pre", + GATE_SCLK_PERIL, 6, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1", + GATE_SCLK_PERIL, 1, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0", + GATE_SCLK_PERIL, 0, CLK_SET_RATE_PARENT, 0), + + /* GATE_IP_CAM */ + GATE(CLK_QEJPEG, "qejpeg", "div_cam_blk_320", GATE_IP_CAM, 19, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_PIXELASYNCM1, "pixelasyncm1", "div_cam_blk_320", + GATE_IP_CAM, 18, CLK_IGNORE_UNUSED, 0), + GATE(CLK_PIXELASYNCM0, "pixelasyncm0", "div_cam_blk_320", + GATE_IP_CAM, 17, CLK_IGNORE_UNUSED, 0), + GATE(CLK_PPMUCAMIF, "ppmucamif", "div_cam_blk_320", + GATE_IP_CAM, 16, CLK_IGNORE_UNUSED, 0), + GATE(CLK_QEM2MSCALER, "qem2mscaler", "div_cam_blk_320", + GATE_IP_CAM, 14, CLK_IGNORE_UNUSED, 0), + GATE(CLK_QEGSCALER1, "qegscaler1", "div_cam_blk_320", + GATE_IP_CAM, 13, CLK_IGNORE_UNUSED, 0), + GATE(CLK_QEGSCALER0, "qegscaler0", "div_cam_blk_320", + GATE_IP_CAM, 12, CLK_IGNORE_UNUSED, 0), + GATE(CLK_SMMUJPEG, "smmujpeg", "div_cam_blk_320", + GATE_IP_CAM, 11, 0, 0), + GATE(CLK_SMMUM2M2SCALER, "smmum2m2scaler", "div_cam_blk_320", + GATE_IP_CAM, 9, 0, 0), + GATE(CLK_SMMUGSCALER1, "smmugscaler1", "div_cam_blk_320", + GATE_IP_CAM, 8, 0, 0), + GATE(CLK_SMMUGSCALER0, "smmugscaler0", "div_cam_blk_320", + GATE_IP_CAM, 7, 0, 0), + GATE(CLK_JPEG, "jpeg", "div_cam_blk_320", GATE_IP_CAM, 6, 0, 0), + GATE(CLK_M2MSCALER, "m2mscaler", "div_cam_blk_320", + GATE_IP_CAM, 2, 0, 0), + GATE(CLK_GSCALER1, "gscaler1", "div_cam_blk_320", GATE_IP_CAM, 1, 0, 0), + GATE(CLK_GSCALER0, "gscaler0", "div_cam_blk_320", GATE_IP_CAM, 0, 0, 0), + + /* GATE_IP_MFC */ + GATE(CLK_QEMFC, "qemfc", "div_aclk_200", GATE_IP_MFC, 5, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_PPMUMFC_L, "ppmumfc_l", "div_aclk_200", GATE_IP_MFC, 3, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_SMMUMFC_L, "smmumfc_l", "div_aclk_200", GATE_IP_MFC, 1, 0, 0), + GATE(CLK_MFC, "mfc", "div_aclk_200", GATE_IP_MFC, 0, 0, 0), + + /* GATE_IP_G3D */ + GATE(CLK_SMMUG3D, "smmug3d", "div_aclk_200", GATE_IP_G3D, 3, 0, 0), + GATE(CLK_QEG3D, "qeg3d", "div_aclk_200", GATE_IP_G3D, 2, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_PPMUG3D, "ppmug3d", "div_aclk_200", GATE_IP_G3D, 1, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_G3D, "g3d", "div_aclk_200", GATE_IP_G3D, 0, 0, 0), + + /* GATE_IP_LCD */ + GATE(CLK_QE_CH1_LCD, "qe_ch1_lcd", "div_aclk_160", GATE_IP_LCD, 7, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_QE_CH0_LCD, "qe_ch0_lcd", "div_aclk_160", GATE_IP_LCD, 6, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_PPMULCD0, "ppmulcd0", "div_aclk_160", GATE_IP_LCD, 5, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_SMMUFIMD0, "smmufimd0", "div_aclk_160", GATE_IP_LCD, 4, 0, 0), + GATE(CLK_DSIM0, "dsim0", "div_aclk_160", GATE_IP_LCD, 3, 0, 0), + GATE(CLK_SMIES, "smies", "div_aclk_160", GATE_IP_LCD, 2, 0, 0), + GATE(CLK_FIMD0, "fimd0", "div_aclk_160", GATE_IP_LCD, 0, 0, 0), + + /* GATE_IP_ISP */ + GATE(CLK_CAM1, "cam1", "mout_aclk_266_sub", GATE_IP_ISP, 5, 0, 0), + GATE(CLK_UART_ISP_TOP, "uart_isp_top", "mout_aclk_266_sub", + GATE_IP_ISP, 3, 0, 0), + GATE(CLK_SPI1_ISP_TOP, "spi1_isp_top", "mout_aclk_266_sub", + GATE_IP_ISP, 2, 0, 0), + GATE(CLK_SPI0_ISP_TOP, "spi0_isp_top", "mout_aclk_266_sub", + GATE_IP_ISP, 1, 0, 0), + + /* GATE_IP_FSYS */ + GATE(CLK_TSADC, "tsadc", "div_aclk_200", GATE_IP_FSYS, 20, 0, 0), + GATE(CLK_PPMUFILE, "ppmufile", "div_aclk_200", GATE_IP_FSYS, 17, + CLK_IGNORE_UNUSED, 0), + GATE(CLK_USBOTG, "usbotg", "div_aclk_200", GATE_IP_FSYS, 13, 0, 0), + GATE(CLK_USBHOST, "usbhost", "div_aclk_200", GATE_IP_FSYS, 12, 0, 0), + GATE(CLK_SROMC, "sromc", "div_aclk_200", GATE_IP_FSYS, 11, 0, 0), + GATE(CLK_SDMMC1, "sdmmc1", "div_aclk_200", GATE_IP_FSYS, 6, 0, 0), + GATE(CLK_SDMMC0, "sdmmc0", "div_aclk_200", GATE_IP_FSYS, 5, 0, 0), + GATE(CLK_PDMA1, "pdma1", "div_aclk_200", GATE_IP_FSYS, 1, 0, 0), + GATE(CLK_PDMA0, "pdma0", "div_aclk_200", GATE_IP_FSYS, 0, 0, 0), + + /* GATE_IP_PERIL */ + GATE(CLK_PWM, "pwm", "div_aclk_100", GATE_IP_PERIL, 24, 0, 0), + GATE(CLK_PCM, "pcm", "div_aclk_100", GATE_IP_PERIL, 23, 0, 0), + GATE(CLK_I2S, "i2s", "div_aclk_100", GATE_IP_PERIL, 21, 0, 0), + GATE(CLK_SPI1, "spi1", "div_aclk_100", GATE_IP_PERIL, 17, 0, 0), + GATE(CLK_SPI0, "spi0", "div_aclk_100", GATE_IP_PERIL, 16, 0, 0), + GATE(CLK_I2C7, "i2c7", "div_aclk_100", GATE_IP_PERIL, 13, 0, 0), + GATE(CLK_I2C6, "i2c6", "div_aclk_100", GATE_IP_PERIL, 12, 0, 0), + GATE(CLK_I2C5, "i2c5", "div_aclk_100", GATE_IP_PERIL, 11, 0, 0), + GATE(CLK_I2C4, "i2c4", "div_aclk_100", GATE_IP_PERIL, 10, 0, 0), + GATE(CLK_I2C3, "i2c3", "div_aclk_100", GATE_IP_PERIL, 9, 0, 0), + GATE(CLK_I2C2, "i2c2", "div_aclk_100", GATE_IP_PERIL, 8, 0, 0), + GATE(CLK_I2C1, "i2c1", "div_aclk_100", GATE_IP_PERIL, 7, 0, 0), + GATE(CLK_I2C0, "i2c0", "div_aclk_100", GATE_IP_PERIL, 6, 0, 0), + GATE(CLK_UART1, "uart1", "div_aclk_100", GATE_IP_PERIL, 1, 0, 0), + GATE(CLK_UART0, "uart0", "div_aclk_100", GATE_IP_PERIL, 0, 0, 0), +}; + +/* APLL & MPLL & BPLL & UPLL */ +static struct samsung_pll_rate_table exynos3250_pll_rates[] = { + PLL_35XX_RATE(1200000000, 400, 4, 1), + PLL_35XX_RATE(1100000000, 275, 3, 1), + PLL_35XX_RATE(1066000000, 533, 6, 1), + PLL_35XX_RATE(1000000000, 250, 3, 1), + PLL_35XX_RATE( 960000000, 320, 4, 1), + PLL_35XX_RATE( 900000000, 300, 4, 1), + PLL_35XX_RATE( 850000000, 425, 6, 1), + PLL_35XX_RATE( 800000000, 200, 3, 1), + PLL_35XX_RATE( 700000000, 175, 3, 1), + PLL_35XX_RATE( 667000000, 667, 12, 1), + PLL_35XX_RATE( 600000000, 400, 4, 2), + PLL_35XX_RATE( 533000000, 533, 6, 2), + PLL_35XX_RATE( 520000000, 260, 3, 2), + PLL_35XX_RATE( 500000000, 250, 3, 2), + PLL_35XX_RATE( 400000000, 200, 3, 2), + PLL_35XX_RATE( 200000000, 200, 3, 3), + PLL_35XX_RATE( 100000000, 200, 3, 4), + { /* sentinel */ } +}; + +/* VPLL */ +static struct samsung_pll_rate_table exynos3250_vpll_rates[] = { + PLL_36XX_RATE(600000000, 100, 2, 1, 0), + PLL_36XX_RATE(533000000, 266, 3, 2, 32768), + PLL_36XX_RATE(519230987, 173, 2, 2, 5046), + PLL_36XX_RATE(500000000, 250, 3, 2, 0), + PLL_36XX_RATE(445500000, 148, 2, 2, 32768), + PLL_36XX_RATE(445055007, 148, 2, 2, 23047), + PLL_36XX_RATE(400000000, 200, 3, 2, 0), + PLL_36XX_RATE(371250000, 123, 2, 2, 49152), + PLL_36XX_RATE(370878997, 185, 3, 2, 28803), + PLL_36XX_RATE(340000000, 170, 3, 2, 0), + PLL_36XX_RATE(335000015, 111, 2, 2, 43691), + PLL_36XX_RATE(333000000, 111, 2, 2, 0), + PLL_36XX_RATE(330000000, 110, 2, 2, 0), + PLL_36XX_RATE(320000015, 106, 2, 2, 43691), + PLL_36XX_RATE(300000000, 100, 2, 2, 0), + PLL_36XX_RATE(275000000, 275, 3, 3, 0), + PLL_36XX_RATE(222750000, 148, 2, 3, 32768), + PLL_36XX_RATE(222528007, 148, 2, 3, 23069), + PLL_36XX_RATE(160000000, 160, 3, 3, 0), + PLL_36XX_RATE(148500000, 99, 2, 3, 0), + PLL_36XX_RATE(148352005, 98, 2, 3, 59070), + PLL_36XX_RATE(108000000, 144, 2, 4, 0), + PLL_36XX_RATE( 74250000, 99, 2, 4, 0), + PLL_36XX_RATE( 74176002, 98, 3, 4, 59070), + PLL_36XX_RATE( 54054000, 216, 3, 5, 14156), + PLL_36XX_RATE( 54000000, 144, 2, 5, 0), + { /* sentinel */ } +}; + +static struct samsung_pll_clock exynos3250_plls[nr_plls] __initdata = { + [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", + APLL_LOCK, APLL_CON0, NULL), + [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", + MPLL_LOCK, MPLL_CON0, NULL), + [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll", + VPLL_LOCK, VPLL_CON0, NULL), + [upll] = PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll", + UPLL_LOCK, UPLL_CON0, NULL), +}; + +static void __init exynos3250_cmu_init(struct device_node *np) +{ + struct samsung_clk_provider *ctx; + + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: failed to map registers\n", __func__); + + ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); + if (!ctx) + panic("%s: unable to allocate context.\n", __func__); + + samsung_clk_register_fixed_factor(ctx, fixed_factor_clks, + ARRAY_SIZE(fixed_factor_clks)); + + exynos3250_plls[apll].rate_table = exynos3250_pll_rates; + exynos3250_plls[mpll].rate_table = exynos3250_pll_rates; + exynos3250_plls[vpll].rate_table = exynos3250_vpll_rates; + exynos3250_plls[upll].rate_table = exynos3250_pll_rates; + + samsung_clk_register_pll(ctx, exynos3250_plls, + ARRAY_SIZE(exynos3250_plls), reg_base); + + samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks)); + samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks)); + samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks)); + + exynos3250_clk_sleep_init(); +} +CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init); diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index b4f96721017..c4df294bb7f 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -428,7 +428,7 @@ static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata /* fixed rate clocks generated inside the soc */ static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = { FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000), - FRATE(0, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000), + FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000), FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000), }; @@ -903,7 +903,7 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { GATE(CLK_AUDSS, "audss", "sclk_epll", E4X12_GATE_IP_MAUDIO, 0, 0, 0), GATE(CLK_MDNIE0, "mdnie0", "aclk160", GATE_IP_LCD0, 2, 0, 0), GATE(CLK_ROTATOR, "rotator", "aclk200", E4X12_GATE_IP_IMAGE, 1, 0, 0), - GATE(CLK_MDMA2, "mdma2", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0), + GATE(CLK_MDMA, "mdma", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0), GATE(CLK_SMMU_MDMA, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0, 0), GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), @@ -1043,7 +1043,7 @@ static unsigned long exynos4_get_xom(void) return xom; } -static void __init exynos4_clk_register_finpll(void) +static void __init exynos4_clk_register_finpll(struct samsung_clk_provider *ctx) { struct samsung_fixed_rate_clock fclk; struct clk *clk; @@ -1066,7 +1066,7 @@ static void __init exynos4_clk_register_finpll(void) fclk.parent_name = NULL; fclk.flags = CLK_IS_ROOT; fclk.fixed_rate = finpll_f; - samsung_clk_register_fixed_rate(&fclk, 1); + samsung_clk_register_fixed_rate(ctx, &fclk, 1); } @@ -1176,22 +1176,25 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = { static void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc soc) { + struct samsung_clk_provider *ctx; exynos4_soc = soc; reg_base = of_iomap(np, 0); if (!reg_base) panic("%s: failed to map registers\n", __func__); - samsung_clk_init(np, reg_base, CLK_NR_CLKS); + ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); + if (!ctx) + panic("%s: unable to allocate context.\n", __func__); - samsung_clk_of_register_fixed_ext(exynos4_fixed_rate_ext_clks, + samsung_clk_of_register_fixed_ext(ctx, exynos4_fixed_rate_ext_clks, ARRAY_SIZE(exynos4_fixed_rate_ext_clks), ext_clk_match); - exynos4_clk_register_finpll(); + exynos4_clk_register_finpll(ctx); if (exynos4_soc == EXYNOS4210) { - samsung_clk_register_mux(exynos4210_mux_early, + samsung_clk_register_mux(ctx, exynos4210_mux_early, ARRAY_SIZE(exynos4210_mux_early)); if (_get_rate("fin_pll") == 24000000) { @@ -1205,7 +1208,7 @@ static void __init exynos4_clk_init(struct device_node *np, exynos4210_plls[vpll].rate_table = exynos4210_vpll_rates; - samsung_clk_register_pll(exynos4210_plls, + samsung_clk_register_pll(ctx, exynos4210_plls, ARRAY_SIZE(exynos4210_plls), reg_base); } else { if (_get_rate("fin_pll") == 24000000) { @@ -1217,42 +1220,42 @@ static void __init exynos4_clk_init(struct device_node *np, exynos4x12_vpll_rates; } - samsung_clk_register_pll(exynos4x12_plls, + samsung_clk_register_pll(ctx, exynos4x12_plls, ARRAY_SIZE(exynos4x12_plls), reg_base); } - samsung_clk_register_fixed_rate(exynos4_fixed_rate_clks, + samsung_clk_register_fixed_rate(ctx, exynos4_fixed_rate_clks, ARRAY_SIZE(exynos4_fixed_rate_clks)); - samsung_clk_register_mux(exynos4_mux_clks, + samsung_clk_register_mux(ctx, exynos4_mux_clks, ARRAY_SIZE(exynos4_mux_clks)); - samsung_clk_register_div(exynos4_div_clks, + samsung_clk_register_div(ctx, exynos4_div_clks, ARRAY_SIZE(exynos4_div_clks)); - samsung_clk_register_gate(exynos4_gate_clks, + samsung_clk_register_gate(ctx, exynos4_gate_clks, ARRAY_SIZE(exynos4_gate_clks)); if (exynos4_soc == EXYNOS4210) { - samsung_clk_register_fixed_rate(exynos4210_fixed_rate_clks, + samsung_clk_register_fixed_rate(ctx, exynos4210_fixed_rate_clks, ARRAY_SIZE(exynos4210_fixed_rate_clks)); - samsung_clk_register_mux(exynos4210_mux_clks, + samsung_clk_register_mux(ctx, exynos4210_mux_clks, ARRAY_SIZE(exynos4210_mux_clks)); - samsung_clk_register_div(exynos4210_div_clks, + samsung_clk_register_div(ctx, exynos4210_div_clks, ARRAY_SIZE(exynos4210_div_clks)); - samsung_clk_register_gate(exynos4210_gate_clks, + samsung_clk_register_gate(ctx, exynos4210_gate_clks, ARRAY_SIZE(exynos4210_gate_clks)); - samsung_clk_register_alias(exynos4210_aliases, + samsung_clk_register_alias(ctx, exynos4210_aliases, ARRAY_SIZE(exynos4210_aliases)); } else { - samsung_clk_register_mux(exynos4x12_mux_clks, + samsung_clk_register_mux(ctx, exynos4x12_mux_clks, ARRAY_SIZE(exynos4x12_mux_clks)); - samsung_clk_register_div(exynos4x12_div_clks, + samsung_clk_register_div(ctx, exynos4x12_div_clks, ARRAY_SIZE(exynos4x12_div_clks)); - samsung_clk_register_gate(exynos4x12_gate_clks, + samsung_clk_register_gate(ctx, exynos4x12_gate_clks, ARRAY_SIZE(exynos4x12_gate_clks)); - samsung_clk_register_alias(exynos4x12_aliases, + samsung_clk_register_alias(ctx, exynos4x12_aliases, ARRAY_SIZE(exynos4x12_aliases)); } - samsung_clk_register_alias(exynos4_aliases, + samsung_clk_register_alias(ctx, exynos4_aliases, ARRAY_SIZE(exynos4_aliases)); exynos4_clk_sleep_init(); diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c index e7ee4420da8..1fad4c5e3f5 100644 --- a/drivers/clk/samsung/clk-exynos5250.c +++ b/drivers/clk/samsung/clk-exynos5250.c @@ -24,10 +24,14 @@ #define APLL_CON0 0x100 #define SRC_CPU 0x200 #define DIV_CPU0 0x500 +#define PWR_CTRL1 0x1020 +#define PWR_CTRL2 0x1024 #define MPLL_LOCK 0x4000 #define MPLL_CON0 0x4100 #define SRC_CORE1 0x4204 #define GATE_IP_ACP 0x8800 +#define GATE_IP_ISP0 0xc800 +#define GATE_IP_ISP1 0xc804 #define CPLL_LOCK 0x10020 #define EPLL_LOCK 0x10030 #define VPLL_LOCK 0x10040 @@ -37,6 +41,7 @@ #define VPLL_CON0 0x10140 #define GPLL_CON0 0x10150 #define SRC_TOP0 0x10210 +#define SRC_TOP1 0x10214 #define SRC_TOP2 0x10218 #define SRC_TOP3 0x1021c #define SRC_GSCL 0x10220 @@ -71,6 +76,7 @@ #define GATE_IP_GSCL 0x10920 #define GATE_IP_DISP1 0x10928 #define GATE_IP_MFC 0x1092c +#define GATE_IP_G3D 0x10930 #define GATE_IP_GEN 0x10934 #define GATE_IP_FSYS 0x10944 #define GATE_IP_PERIC 0x10950 @@ -80,6 +86,23 @@ #define SRC_CDREX 0x20200 #define PLL_DIV2_SEL 0x20a24 +/*Below definitions are used for PWR_CTRL settings*/ +#define PWR_CTRL1_CORE2_DOWN_RATIO (7 << 28) +#define PWR_CTRL1_CORE1_DOWN_RATIO (7 << 16) +#define PWR_CTRL1_DIV2_DOWN_EN (1 << 9) +#define PWR_CTRL1_DIV1_DOWN_EN (1 << 8) +#define PWR_CTRL1_USE_CORE1_WFE (1 << 5) +#define PWR_CTRL1_USE_CORE0_WFE (1 << 4) +#define PWR_CTRL1_USE_CORE1_WFI (1 << 1) +#define PWR_CTRL1_USE_CORE0_WFI (1 << 0) + +#define PWR_CTRL2_DIV2_UP_EN (1 << 25) +#define PWR_CTRL2_DIV1_UP_EN (1 << 24) +#define PWR_CTRL2_DUR_STANDBY2_VAL (1 << 16) +#define PWR_CTRL2_DUR_STANDBY1_VAL (1 << 8) +#define PWR_CTRL2_CORE2_UP_RATIO (1 << 4) +#define PWR_CTRL2_CORE1_UP_RATIO (1 << 0) + /* list of PLLs to be registered */ enum exynos5250_plls { apll, mpll, cpll, epll, vpll, gpll, bpll, @@ -98,8 +121,11 @@ static struct samsung_clk_reg_dump *exynos5250_save; static unsigned long exynos5250_clk_regs[] __initdata = { SRC_CPU, DIV_CPU0, + PWR_CTRL1, + PWR_CTRL2, SRC_CORE1, SRC_TOP0, + SRC_TOP1, SRC_TOP2, SRC_TOP3, SRC_GSCL, @@ -133,6 +159,7 @@ static unsigned long exynos5250_clk_regs[] __initdata = { DIV_PERIC5, GATE_IP_GSCL, GATE_IP_MFC, + GATE_IP_G3D, GATE_IP_GEN, GATE_IP_FSYS, GATE_IP_PERIC, @@ -141,6 +168,8 @@ static unsigned long exynos5250_clk_regs[] __initdata = { PLL_DIV2_SEL, GATE_IP_DISP1, GATE_IP_ACP, + GATE_IP_ISP0, + GATE_IP_ISP1, }; static int exynos5250_clk_suspend(void) @@ -189,13 +218,16 @@ PNAME(mout_vpllsrc_p) = { "fin_pll", "sclk_hdmi27m" }; PNAME(mout_vpll_p) = { "mout_vpllsrc", "fout_vpll" }; PNAME(mout_cpll_p) = { "fin_pll", "fout_cpll" }; PNAME(mout_epll_p) = { "fin_pll", "fout_epll" }; +PNAME(mout_gpll_p) = { "fin_pll", "fout_gpll" }; PNAME(mout_mpll_user_p) = { "fin_pll", "mout_mpll" }; PNAME(mout_bpll_user_p) = { "fin_pll", "mout_bpll" }; PNAME(mout_aclk166_p) = { "mout_cpll", "mout_mpll_user" }; PNAME(mout_aclk200_p) = { "mout_mpll_user", "mout_bpll_user" }; +PNAME(mout_aclk400_p) = { "mout_aclk400_g3d_mid", "mout_gpll" }; PNAME(mout_aclk200_sub_p) = { "fin_pll", "div_aclk200" }; PNAME(mout_aclk266_sub_p) = { "fin_pll", "div_aclk266" }; PNAME(mout_aclk333_sub_p) = { "fin_pll", "div_aclk333" }; +PNAME(mout_aclk400_isp_sub_p) = { "fin_pll", "div_aclk400_isp" }; PNAME(mout_hdmi_p) = { "div_hdmi_pixel", "sclk_hdmiphy" }; PNAME(mout_usb3_p) = { "mout_mpll_user", "mout_cpll" }; PNAME(mout_group1_p) = { "fin_pll", "fin_pll", "sclk_hdmi27m", @@ -273,15 +305,23 @@ static struct samsung_mux_clock exynos5250_mux_clks[] __initdata = { MUX(0, "mout_aclk166", mout_aclk166_p, SRC_TOP0, 8, 1), MUX(0, "mout_aclk200", mout_aclk200_p, SRC_TOP0, 12, 1), MUX(0, "mout_aclk333", mout_aclk166_p, SRC_TOP0, 16, 1), + MUX(0, "mout_aclk400_g3d_mid", mout_aclk200_p, SRC_TOP0, 20, 1), + + MUX(0, "mout_aclk400_isp", mout_aclk200_p, SRC_TOP1, 24, 1), + MUX(0, "mout_aclk400_g3d", mout_aclk400_p, SRC_TOP1, 28, 1), MUX(0, "mout_cpll", mout_cpll_p, SRC_TOP2, 8, 1), MUX(0, "mout_epll", mout_epll_p, SRC_TOP2, 12, 1), MUX(0, "mout_vpll", mout_vpll_p, SRC_TOP2, 16, 1), MUX(0, "mout_mpll_user", mout_mpll_user_p, SRC_TOP2, 20, 1), MUX(0, "mout_bpll_user", mout_bpll_user_p, SRC_TOP2, 24, 1), + MUX(CLK_MOUT_GPLL, "mout_gpll", mout_gpll_p, SRC_TOP2, 28, 1), MUX(0, "mout_aclk200_disp1_sub", mout_aclk200_sub_p, SRC_TOP3, 4, 1), MUX(0, "mout_aclk266_gscl_sub", mout_aclk266_sub_p, SRC_TOP3, 8, 1), + MUX(0, "mout_aclk_266_isp_sub", mout_aclk266_sub_p, SRC_TOP3, 16, 1), + MUX(0, "mout_aclk_400_isp_sub", mout_aclk400_isp_sub_p, + SRC_TOP3, 20, 1), MUX(0, "mout_aclk333_sub", mout_aclk333_sub_p, SRC_TOP3, 24, 1), MUX(0, "mout_cam_bayer", mout_group1_p, SRC_GSCL, 12, 4), @@ -351,7 +391,10 @@ static struct samsung_div_clock exynos5250_div_clks[] __initdata = { DIV(0, "div_aclk200", "mout_aclk200", DIV_TOP0, 12, 3), DIV(0, "div_aclk266", "mout_mpll_user", DIV_TOP0, 16, 3), DIV(0, "div_aclk333", "mout_aclk333", DIV_TOP0, 20, 3), + DIV(0, "div_aclk400_g3d", "mout_aclk400_g3d", DIV_TOP0, + 24, 3), + DIV(0, "div_aclk400_isp", "mout_aclk400_isp", DIV_TOP1, 20, 3), DIV(0, "div_aclk66_pre", "mout_mpll_user", DIV_TOP1, 24, 3), DIV(0, "div_cam_bayer", "mout_cam_bayer", DIV_GSCL, 12, 4), @@ -428,6 +471,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { * CMU_ACP */ GATE(CLK_MDMA0, "mdma0", "div_aclk266", GATE_IP_ACP, 1, 0, 0), + GATE(CLK_SSS, "sss", "div_aclk266", GATE_IP_ACP, 2, 0, 0), GATE(CLK_G2D, "g2d", "div_aclk200", GATE_IP_ACP, 3, 0, 0), GATE(CLK_SMMU_MDMA0, "smmu_mdma0", "div_aclk266", GATE_IP_ACP, 5, 0, 0), @@ -533,7 +577,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { 0), GATE(CLK_SMMU_MFCL, "smmu_mfcl", "mout_aclk333_sub", GATE_IP_MFC, 2, 0, 0), - + GATE(CLK_G3D, "g3d", "div_aclk400_g3d", GATE_IP_G3D, 0, + CLK_SET_RATE_PARENT, 0), GATE(CLK_ROTATOR, "rotator", "div_aclk266", GATE_IP_GEN, 1, 0, 0), GATE(CLK_JPEG, "jpeg", "div_aclk166", GATE_IP_GEN, 2, 0, 0), GATE(CLK_MDMA1, "mdma1", "div_aclk266", GATE_IP_GEN, 4, 0, 0), @@ -615,6 +660,31 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = { GATE(CLK_WDT, "wdt", "div_aclk66", GATE_IP_PERIS, 19, 0, 0), GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0), GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0), + GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub", + GATE_IP_DISP1, 2, 0, 0), + GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub", + GATE_IP_DISP1, 8, 0, 0), + GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0), + GATE(CLK_SMMU_FIMC_ISP, "smmu_fimc_isp", "mout_aclk_266_isp_sub", + GATE_IP_ISP0, 8, 0, 0), + GATE(CLK_SMMU_FIMC_DRC, "smmu_fimc_drc", "mout_aclk_266_isp_sub", + GATE_IP_ISP0, 9, 0, 0), + GATE(CLK_SMMU_FIMC_FD, "smmu_fimc_fd", "mout_aclk_266_isp_sub", + GATE_IP_ISP0, 10, 0, 0), + GATE(CLK_SMMU_FIMC_SCC, "smmu_fimc_scc", "mout_aclk_266_isp_sub", + GATE_IP_ISP0, 11, 0, 0), + GATE(CLK_SMMU_FIMC_SCP, "smmu_fimc_scp", "mout_aclk_266_isp_sub", + GATE_IP_ISP0, 12, 0, 0), + GATE(CLK_SMMU_FIMC_MCU, "smmu_fimc_mcu", "mout_aclk_400_isp_sub", + GATE_IP_ISP0, 13, 0, 0), + GATE(CLK_SMMU_FIMC_ODC, "smmu_fimc_odc", "mout_aclk_266_isp_sub", + GATE_IP_ISP1, 4, 0, 0), + GATE(CLK_SMMU_FIMC_DIS0, "smmu_fimc_dis0", "mout_aclk_266_isp_sub", + GATE_IP_ISP1, 5, 0, 0), + GATE(CLK_SMMU_FIMC_DIS1, "smmu_fimc_dis1", "mout_aclk_266_isp_sub", + GATE_IP_ISP1, 6, 0, 0), + GATE(CLK_SMMU_FIMC_3DNR, "smmu_fimc_3dnr", "mout_aclk_266_isp_sub", + GATE_IP_ISP1, 7, 0, 0), }; static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = { @@ -686,6 +756,9 @@ static struct of_device_id ext_clk_match[] __initdata = { /* register exynox5250 clocks */ static void __init exynos5250_clk_init(struct device_node *np) { + struct samsung_clk_provider *ctx; + unsigned int tmp; + if (np) { reg_base = of_iomap(np, 0); if (!reg_base) @@ -694,11 +767,13 @@ static void __init exynos5250_clk_init(struct device_node *np) panic("%s: unable to determine soc\n", __func__); } - samsung_clk_init(np, reg_base, CLK_NR_CLKS); - samsung_clk_of_register_fixed_ext(exynos5250_fixed_rate_ext_clks, + ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); + if (!ctx) + panic("%s: unable to allocate context.\n", __func__); + samsung_clk_of_register_fixed_ext(ctx, exynos5250_fixed_rate_ext_clks, ARRAY_SIZE(exynos5250_fixed_rate_ext_clks), ext_clk_match); - samsung_clk_register_mux(exynos5250_pll_pmux_clks, + samsung_clk_register_mux(ctx, exynos5250_pll_pmux_clks, ARRAY_SIZE(exynos5250_pll_pmux_clks)); if (_get_rate("fin_pll") == 24 * MHZ) { @@ -709,19 +784,40 @@ static void __init exynos5250_clk_init(struct device_node *np) if (_get_rate("mout_vpllsrc") == 24 * MHZ) exynos5250_plls[vpll].rate_table = vpll_24mhz_tbl; - samsung_clk_register_pll(exynos5250_plls, ARRAY_SIZE(exynos5250_plls), - reg_base); - samsung_clk_register_fixed_rate(exynos5250_fixed_rate_clks, + samsung_clk_register_pll(ctx, exynos5250_plls, + ARRAY_SIZE(exynos5250_plls), + reg_base); + samsung_clk_register_fixed_rate(ctx, exynos5250_fixed_rate_clks, ARRAY_SIZE(exynos5250_fixed_rate_clks)); - samsung_clk_register_fixed_factor(exynos5250_fixed_factor_clks, + samsung_clk_register_fixed_factor(ctx, exynos5250_fixed_factor_clks, ARRAY_SIZE(exynos5250_fixed_factor_clks)); - samsung_clk_register_mux(exynos5250_mux_clks, + samsung_clk_register_mux(ctx, exynos5250_mux_clks, ARRAY_SIZE(exynos5250_mux_clks)); - samsung_clk_register_div(exynos5250_div_clks, + samsung_clk_register_div(ctx, exynos5250_div_clks, ARRAY_SIZE(exynos5250_div_clks)); - samsung_clk_register_gate(exynos5250_gate_clks, + samsung_clk_register_gate(ctx, exynos5250_gate_clks, ARRAY_SIZE(exynos5250_gate_clks)); + /* + * Enable arm clock down (in idle) and set arm divider + * ratios in WFI/WFE state. + */ + tmp = (PWR_CTRL1_CORE2_DOWN_RATIO | PWR_CTRL1_CORE1_DOWN_RATIO | + PWR_CTRL1_DIV2_DOWN_EN | PWR_CTRL1_DIV1_DOWN_EN | + PWR_CTRL1_USE_CORE1_WFE | PWR_CTRL1_USE_CORE0_WFE | + PWR_CTRL1_USE_CORE1_WFI | PWR_CTRL1_USE_CORE0_WFI); + __raw_writel(tmp, reg_base + PWR_CTRL1); + + /* + * Enable arm clock up (on exiting idle). Set arm divider + * ratios when not in idle along with the standby duration + * ratios. + */ + tmp = (PWR_CTRL2_DIV2_UP_EN | PWR_CTRL2_DIV1_UP_EN | + PWR_CTRL2_DUR_STANDBY2_VAL | PWR_CTRL2_DUR_STANDBY1_VAL | + PWR_CTRL2_CORE2_UP_RATIO | PWR_CTRL2_CORE1_UP_RATIO); + __raw_writel(tmp, reg_base + PWR_CTRL2); + exynos5250_clk_sleep_init(); pr_info("Exynos5250: clock setup completed, armclk=%ld\n", diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c new file mode 100644 index 00000000000..64596ba58df --- /dev/null +++ b/drivers/clk/samsung/clk-exynos5260.c @@ -0,0 +1,1980 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Rahul Sharma <rahul.sharma@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for Exynos5260 SoC. + */ + +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/syscore_ops.h> + +#include "clk-exynos5260.h" +#include "clk.h" +#include "clk-pll.h" + +#include <dt-bindings/clock/exynos5260-clk.h> + +static LIST_HEAD(clock_reg_cache_list); + +struct exynos5260_clock_reg_cache { + struct list_head node; + void __iomem *reg_base; + struct samsung_clk_reg_dump *rdump; + unsigned int rd_num; +}; + +struct exynos5260_cmu_info { + /* list of pll clocks and respective count */ + struct samsung_pll_clock *pll_clks; + unsigned int nr_pll_clks; + /* list of mux clocks and respective count */ + struct samsung_mux_clock *mux_clks; + unsigned int nr_mux_clks; + /* list of div clocks and respective count */ + struct samsung_div_clock *div_clks; + unsigned int nr_div_clks; + /* list of gate clocks and respective count */ + struct samsung_gate_clock *gate_clks; + unsigned int nr_gate_clks; + /* list of fixed clocks and respective count */ + struct samsung_fixed_rate_clock *fixed_clks; + unsigned int nr_fixed_clks; + /* total number of clocks with IDs assigned*/ + unsigned int nr_clk_ids; + + /* list and number of clocks registers */ + unsigned long *clk_regs; + unsigned int nr_clk_regs; +}; + +/* + * Applicable for all 2550 Type PLLS for Exynos5260, listed below + * DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL. + */ +static struct samsung_pll_rate_table pll2550_24mhz_tbl[] __initdata = { + PLL_35XX_RATE(1700000000, 425, 6, 0), + PLL_35XX_RATE(1600000000, 200, 3, 0), + PLL_35XX_RATE(1500000000, 250, 4, 0), + PLL_35XX_RATE(1400000000, 175, 3, 0), + PLL_35XX_RATE(1300000000, 325, 6, 0), + PLL_35XX_RATE(1200000000, 400, 4, 1), + PLL_35XX_RATE(1100000000, 275, 3, 1), + PLL_35XX_RATE(1000000000, 250, 3, 1), + PLL_35XX_RATE(933000000, 311, 4, 1), + PLL_35XX_RATE(900000000, 300, 4, 1), + PLL_35XX_RATE(800000000, 200, 3, 1), + PLL_35XX_RATE(733000000, 733, 12, 1), + PLL_35XX_RATE(700000000, 175, 3, 1), + PLL_35XX_RATE(667000000, 667, 12, 1), + PLL_35XX_RATE(633000000, 211, 4, 1), + PLL_35XX_RATE(620000000, 310, 3, 2), + PLL_35XX_RATE(600000000, 400, 4, 2), + PLL_35XX_RATE(543000000, 362, 4, 2), + PLL_35XX_RATE(533000000, 533, 6, 2), + PLL_35XX_RATE(500000000, 250, 3, 2), + PLL_35XX_RATE(450000000, 300, 4, 2), + PLL_35XX_RATE(400000000, 200, 3, 2), + PLL_35XX_RATE(350000000, 175, 3, 2), + PLL_35XX_RATE(300000000, 400, 4, 3), + PLL_35XX_RATE(266000000, 266, 3, 3), + PLL_35XX_RATE(200000000, 200, 3, 3), + PLL_35XX_RATE(160000000, 160, 3, 3), +}; + +/* + * Applicable for 2650 Type PLL for AUD_PLL. + */ +static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = { + PLL_36XX_RATE(1600000000, 200, 3, 0, 0), + PLL_36XX_RATE(1200000000, 100, 2, 0, 0), + PLL_36XX_RATE(1000000000, 250, 3, 1, 0), + PLL_36XX_RATE(800000000, 200, 3, 1, 0), + PLL_36XX_RATE(600000000, 100, 2, 1, 0), + PLL_36XX_RATE(532000000, 266, 3, 2, 0), + PLL_36XX_RATE(480000000, 160, 2, 2, 0), + PLL_36XX_RATE(432000000, 144, 2, 2, 0), + PLL_36XX_RATE(400000000, 200, 3, 2, 0), + PLL_36XX_RATE(394073130, 459, 7, 2, 49282), + PLL_36XX_RATE(333000000, 111, 2, 2, 0), + PLL_36XX_RATE(300000000, 100, 2, 2, 0), + PLL_36XX_RATE(266000000, 266, 3, 3, 0), + PLL_36XX_RATE(200000000, 200, 3, 3, 0), + PLL_36XX_RATE(166000000, 166, 3, 3, 0), + PLL_36XX_RATE(133000000, 266, 3, 4, 0), + PLL_36XX_RATE(100000000, 200, 3, 4, 0), + PLL_36XX_RATE(66000000, 176, 2, 5, 0), +}; + +#ifdef CONFIG_PM_SLEEP + +static int exynos5260_clk_suspend(void) +{ + struct exynos5260_clock_reg_cache *cache; + + list_for_each_entry(cache, &clock_reg_cache_list, node) + samsung_clk_save(cache->reg_base, cache->rdump, + cache->rd_num); + + return 0; +} + +static void exynos5260_clk_resume(void) +{ + struct exynos5260_clock_reg_cache *cache; + + list_for_each_entry(cache, &clock_reg_cache_list, node) + samsung_clk_restore(cache->reg_base, cache->rdump, + cache->rd_num); +} + +static struct syscore_ops exynos5260_clk_syscore_ops = { + .suspend = exynos5260_clk_suspend, + .resume = exynos5260_clk_resume, +}; + +static void exynos5260_clk_sleep_init(void __iomem *reg_base, + unsigned long *rdump, + unsigned long nr_rdump) +{ + struct exynos5260_clock_reg_cache *reg_cache; + + reg_cache = kzalloc(sizeof(struct exynos5260_clock_reg_cache), + GFP_KERNEL); + if (!reg_cache) + panic("could not allocate register cache.\n"); + + reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump); + + if (!reg_cache->rdump) + panic("could not allocate register dump storage.\n"); + + if (list_empty(&clock_reg_cache_list)) + register_syscore_ops(&exynos5260_clk_syscore_ops); + + reg_cache->rd_num = nr_rdump; + reg_cache->reg_base = reg_base; + list_add_tail(®_cache->node, &clock_reg_cache_list); +} + +#else +static void exynos5260_clk_sleep_init(void __iomem *reg_base, + unsigned long *rdump, + unsigned long nr_rdump){} +#endif + +/* + * Common function which registers plls, muxes, dividers and gates + * for each CMU. It also add CMU register list to register cache. + */ + +void __init exynos5260_cmu_register_one(struct device_node *np, + struct exynos5260_cmu_info *cmu) +{ + void __iomem *reg_base; + struct samsung_clk_provider *ctx; + + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: failed to map registers\n", __func__); + + ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids); + if (!ctx) + panic("%s: unable to alllocate ctx\n", __func__); + + if (cmu->pll_clks) + samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks, + reg_base); + if (cmu->mux_clks) + samsung_clk_register_mux(ctx, cmu->mux_clks, + cmu->nr_mux_clks); + if (cmu->div_clks) + samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks); + if (cmu->gate_clks) + samsung_clk_register_gate(ctx, cmu->gate_clks, + cmu->nr_gate_clks); + if (cmu->fixed_clks) + samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks, + cmu->nr_fixed_clks); + if (cmu->clk_regs) + exynos5260_clk_sleep_init(reg_base, cmu->clk_regs, + cmu->nr_clk_regs); +} + + +/* CMU_AUD */ + +static unsigned long aud_clk_regs[] __initdata = { + MUX_SEL_AUD, + DIV_AUD0, + DIV_AUD1, + EN_ACLK_AUD, + EN_PCLK_AUD, + EN_SCLK_AUD, + EN_IP_AUD, +}; + +PNAME(mout_aud_pll_user_p) = {"fin_pll", "fout_aud_pll"}; +PNAME(mout_sclk_aud_i2s_p) = {"mout_aud_pll_user", "ioclk_i2s_cdclk"}; +PNAME(mout_sclk_aud_pcm_p) = {"mout_aud_pll_user", "ioclk_pcm_extclk"}; + +struct samsung_mux_clock aud_mux_clks[] __initdata = { + MUX(AUD_MOUT_AUD_PLL_USER, "mout_aud_pll_user", mout_aud_pll_user_p, + MUX_SEL_AUD, 0, 1), + MUX(AUD_MOUT_SCLK_AUD_I2S, "mout_sclk_aud_i2s", mout_sclk_aud_i2s_p, + MUX_SEL_AUD, 4, 1), + MUX(AUD_MOUT_SCLK_AUD_PCM, "mout_sclk_aud_pcm", mout_sclk_aud_pcm_p, + MUX_SEL_AUD, 8, 1), +}; + +struct samsung_div_clock aud_div_clks[] __initdata = { + DIV(AUD_DOUT_ACLK_AUD_131, "dout_aclk_aud_131", "mout_aud_pll_user", + DIV_AUD0, 0, 4), + + DIV(AUD_DOUT_SCLK_AUD_I2S, "dout_sclk_aud_i2s", "mout_sclk_aud_i2s", + DIV_AUD1, 0, 4), + DIV(AUD_DOUT_SCLK_AUD_PCM, "dout_sclk_aud_pcm", "mout_sclk_aud_pcm", + DIV_AUD1, 4, 8), + DIV(AUD_DOUT_SCLK_AUD_UART, "dout_sclk_aud_uart", "mout_aud_pll_user", + DIV_AUD1, 12, 4), +}; + +struct samsung_gate_clock aud_gate_clks[] __initdata = { + GATE(AUD_SCLK_I2S, "sclk_aud_i2s", "dout_sclk_aud_i2s", + EN_SCLK_AUD, 0, CLK_SET_RATE_PARENT, 0), + GATE(AUD_SCLK_PCM, "sclk_aud_pcm", "dout_sclk_aud_pcm", + EN_SCLK_AUD, 1, CLK_SET_RATE_PARENT, 0), + GATE(AUD_SCLK_AUD_UART, "sclk_aud_uart", "dout_sclk_aud_uart", + EN_SCLK_AUD, 2, CLK_SET_RATE_PARENT, 0), + + GATE(AUD_CLK_SRAMC, "clk_sramc", "dout_aclk_aud_131", EN_IP_AUD, + 0, 0, 0), + GATE(AUD_CLK_DMAC, "clk_dmac", "dout_aclk_aud_131", + EN_IP_AUD, 1, 0, 0), + GATE(AUD_CLK_I2S, "clk_i2s", "dout_aclk_aud_131", EN_IP_AUD, 2, 0, 0), + GATE(AUD_CLK_PCM, "clk_pcm", "dout_aclk_aud_131", EN_IP_AUD, 3, 0, 0), + GATE(AUD_CLK_AUD_UART, "clk_aud_uart", "dout_aclk_aud_131", + EN_IP_AUD, 4, 0, 0), +}; + +static void __init exynos5260_clk_aud_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.mux_clks = aud_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks); + cmu.div_clks = aud_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(aud_div_clks); + cmu.gate_clks = aud_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(aud_gate_clks); + cmu.nr_clk_ids = AUD_NR_CLK; + cmu.clk_regs = aud_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(aud_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud", + exynos5260_clk_aud_init); + + +/* CMU_DISP */ + +static unsigned long disp_clk_regs[] __initdata = { + MUX_SEL_DISP0, + MUX_SEL_DISP1, + MUX_SEL_DISP2, + MUX_SEL_DISP3, + MUX_SEL_DISP4, + DIV_DISP, + EN_ACLK_DISP, + EN_PCLK_DISP, + EN_SCLK_DISP0, + EN_SCLK_DISP1, + EN_IP_DISP, + EN_IP_DISP_BUS, +}; + +PNAME(mout_phyclk_dptx_phy_ch3_txd_clk_user_p) = {"fin_pll", + "phyclk_dptx_phy_ch3_txd_clk"}; +PNAME(mout_phyclk_dptx_phy_ch2_txd_clk_user_p) = {"fin_pll", + "phyclk_dptx_phy_ch2_txd_clk"}; +PNAME(mout_phyclk_dptx_phy_ch1_txd_clk_user_p) = {"fin_pll", + "phyclk_dptx_phy_ch1_txd_clk"}; +PNAME(mout_phyclk_dptx_phy_ch0_txd_clk_user_p) = {"fin_pll", + "phyclk_dptx_phy_ch0_txd_clk"}; +PNAME(mout_aclk_disp_222_user_p) = {"fin_pll", "dout_aclk_disp_222"}; +PNAME(mout_sclk_disp_pixel_user_p) = {"fin_pll", "dout_sclk_disp_pixel"}; +PNAME(mout_aclk_disp_333_user_p) = {"fin_pll", "dout_aclk_disp_333"}; +PNAME(mout_phyclk_hdmi_phy_tmds_clko_user_p) = {"fin_pll", + "phyclk_hdmi_phy_tmds_clko"}; +PNAME(mout_phyclk_hdmi_phy_ref_clko_user_p) = {"fin_pll", + "phyclk_hdmi_phy_ref_clko"}; +PNAME(mout_phyclk_hdmi_phy_pixel_clko_user_p) = {"fin_pll", + "phyclk_hdmi_phy_pixel_clko"}; +PNAME(mout_phyclk_hdmi_link_o_tmds_clkhi_user_p) = {"fin_pll", + "phyclk_hdmi_link_o_tmds_clkhi"}; +PNAME(mout_phyclk_mipi_dphy_4l_m_txbyte_clkhs_p) = {"fin_pll", + "phyclk_mipi_dphy_4l_m_txbyte_clkhs"}; +PNAME(mout_phyclk_dptx_phy_o_ref_clk_24m_user_p) = {"fin_pll", + "phyclk_dptx_phy_o_ref_clk_24m"}; +PNAME(mout_phyclk_dptx_phy_clk_div2_user_p) = {"fin_pll", + "phyclk_dptx_phy_clk_div2"}; +PNAME(mout_sclk_hdmi_pixel_p) = {"mout_sclk_disp_pixel_user", + "mout_aclk_disp_222_user"}; +PNAME(mout_phyclk_mipi_dphy_4lmrxclk_esc0_user_p) = {"fin_pll", + "phyclk_mipi_dphy_4l_m_rxclkesc0"}; +PNAME(mout_sclk_hdmi_spdif_p) = {"fin_pll", "ioclk_spdif_extclk", + "dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"}; + +struct samsung_mux_clock disp_mux_clks[] __initdata = { + MUX(DISP_MOUT_ACLK_DISP_333_USER, "mout_aclk_disp_333_user", + mout_aclk_disp_333_user_p, + MUX_SEL_DISP0, 0, 1), + MUX(DISP_MOUT_SCLK_DISP_PIXEL_USER, "mout_sclk_disp_pixel_user", + mout_sclk_disp_pixel_user_p, + MUX_SEL_DISP0, 4, 1), + MUX(DISP_MOUT_ACLK_DISP_222_USER, "mout_aclk_disp_222_user", + mout_aclk_disp_222_user_p, + MUX_SEL_DISP0, 8, 1), + MUX(DISP_MOUT_PHYCLK_DPTX_PHY_CH0_TXD_CLK_USER, + "mout_phyclk_dptx_phy_ch0_txd_clk_user", + mout_phyclk_dptx_phy_ch0_txd_clk_user_p, + MUX_SEL_DISP0, 16, 1), + MUX(DISP_MOUT_PHYCLK_DPTX_PHY_CH1_TXD_CLK_USER, + "mout_phyclk_dptx_phy_ch1_txd_clk_user", + mout_phyclk_dptx_phy_ch1_txd_clk_user_p, + MUX_SEL_DISP0, 20, 1), + MUX(DISP_MOUT_PHYCLK_DPTX_PHY_CH2_TXD_CLK_USER, + "mout_phyclk_dptx_phy_ch2_txd_clk_user", + mout_phyclk_dptx_phy_ch2_txd_clk_user_p, + MUX_SEL_DISP0, 24, 1), + MUX(DISP_MOUT_PHYCLK_DPTX_PHY_CH3_TXD_CLK_USER, + "mout_phyclk_dptx_phy_ch3_txd_clk_user", + mout_phyclk_dptx_phy_ch3_txd_clk_user_p, + MUX_SEL_DISP0, 28, 1), + + MUX(DISP_MOUT_PHYCLK_DPTX_PHY_CLK_DIV2_USER, + "mout_phyclk_dptx_phy_clk_div2_user", + mout_phyclk_dptx_phy_clk_div2_user_p, + MUX_SEL_DISP1, 0, 1), + MUX(DISP_MOUT_PHYCLK_DPTX_PHY_O_REF_CLK_24M_USER, + "mout_phyclk_dptx_phy_o_ref_clk_24m_user", + mout_phyclk_dptx_phy_o_ref_clk_24m_user_p, + MUX_SEL_DISP1, 4, 1), + MUX(DISP_MOUT_PHYCLK_MIPI_DPHY_4L_M_TXBYTE_CLKHS, + "mout_phyclk_mipi_dphy_4l_m_txbyte_clkhs", + mout_phyclk_mipi_dphy_4l_m_txbyte_clkhs_p, + MUX_SEL_DISP1, 8, 1), + MUX(DISP_MOUT_PHYCLK_HDMI_LINK_O_TMDS_CLKHI_USER, + "mout_phyclk_hdmi_link_o_tmds_clkhi_user", + mout_phyclk_hdmi_link_o_tmds_clkhi_user_p, + MUX_SEL_DISP1, 16, 1), + MUX(DISP_MOUT_HDMI_PHY_PIXEL, + "mout_phyclk_hdmi_phy_pixel_clko_user", + mout_phyclk_hdmi_phy_pixel_clko_user_p, + MUX_SEL_DISP1, 20, 1), + MUX(DISP_MOUT_PHYCLK_HDMI_PHY_REF_CLKO_USER, + "mout_phyclk_hdmi_phy_ref_clko_user", + mout_phyclk_hdmi_phy_ref_clko_user_p, + MUX_SEL_DISP1, 24, 1), + MUX(DISP_MOUT_PHYCLK_HDMI_PHY_TMDS_CLKO_USER, + "mout_phyclk_hdmi_phy_tmds_clko_user", + mout_phyclk_hdmi_phy_tmds_clko_user_p, + MUX_SEL_DISP1, 28, 1), + + MUX(DISP_MOUT_PHYCLK_MIPI_DPHY_4LMRXCLK_ESC0_USER, + "mout_phyclk_mipi_dphy_4lmrxclk_esc0_user", + mout_phyclk_mipi_dphy_4lmrxclk_esc0_user_p, + MUX_SEL_DISP2, 0, 1), + MUX(DISP_MOUT_SCLK_HDMI_PIXEL, "mout_sclk_hdmi_pixel", + mout_sclk_hdmi_pixel_p, + MUX_SEL_DISP2, 4, 1), + + MUX(DISP_MOUT_SCLK_HDMI_SPDIF, "mout_sclk_hdmi_spdif", + mout_sclk_hdmi_spdif_p, + MUX_SEL_DISP4, 4, 2), +}; + +struct samsung_div_clock disp_div_clks[] __initdata = { + DIV(DISP_DOUT_PCLK_DISP_111, "dout_pclk_disp_111", + "mout_aclk_disp_222_user", + DIV_DISP, 8, 4), + DIV(DISP_DOUT_SCLK_FIMD1_EXTCLKPLL, "dout_sclk_fimd1_extclkpll", + "mout_sclk_disp_pixel_user", + DIV_DISP, 12, 4), + DIV(DISP_DOUT_SCLK_HDMI_PHY_PIXEL_CLKI, + "dout_sclk_hdmi_phy_pixel_clki", + "mout_sclk_hdmi_pixel", + DIV_DISP, 16, 4), +}; + +struct samsung_gate_clock disp_gate_clks[] __initdata = { + GATE(DISP_MOUT_HDMI_PHY_PIXEL_USER, "sclk_hdmi_link_i_pixel", + "mout_phyclk_hdmi_phy_pixel_clko_user", + EN_SCLK_DISP0, 26, CLK_SET_RATE_PARENT, 0), + GATE(DISP_SCLK_PIXEL, "sclk_hdmi_phy_pixel_clki", + "dout_sclk_hdmi_phy_pixel_clki", + EN_SCLK_DISP0, 29, CLK_SET_RATE_PARENT, 0), + + GATE(DISP_CLK_DP, "clk_dptx_link", "mout_aclk_disp_222_user", + EN_IP_DISP, 4, 0, 0), + GATE(DISP_CLK_DPPHY, "clk_dptx_phy", "mout_aclk_disp_222_user", + EN_IP_DISP, 5, 0, 0), + GATE(DISP_CLK_DSIM1, "clk_dsim1", "mout_aclk_disp_222_user", + EN_IP_DISP, 6, 0, 0), + GATE(DISP_CLK_FIMD1, "clk_fimd1", "mout_aclk_disp_222_user", + EN_IP_DISP, 7, 0, 0), + GATE(DISP_CLK_HDMI, "clk_hdmi", "mout_aclk_disp_222_user", + EN_IP_DISP, 8, 0, 0), + GATE(DISP_CLK_HDMIPHY, "clk_hdmiphy", "mout_aclk_disp_222_user", + EN_IP_DISP, 9, 0, 0), + GATE(DISP_CLK_MIPIPHY, "clk_mipi_dphy", "mout_aclk_disp_222_user", + EN_IP_DISP, 10, 0, 0), + GATE(DISP_CLK_MIXER, "clk_mixer", "mout_aclk_disp_222_user", + EN_IP_DISP, 11, 0, 0), + GATE(DISP_CLK_PIXEL_DISP, "clk_pixel_disp", "mout_aclk_disp_222_user", + EN_IP_DISP, 12, CLK_IGNORE_UNUSED, 0), + GATE(DISP_CLK_PIXEL_MIXER, "clk_pixel_mixer", "mout_aclk_disp_222_user", + EN_IP_DISP, 13, CLK_IGNORE_UNUSED, 0), + GATE(DISP_CLK_SMMU_FIMD1M0, "clk_smmu3_fimd1m0", + "mout_aclk_disp_222_user", + EN_IP_DISP, 22, 0, 0), + GATE(DISP_CLK_SMMU_FIMD1M1, "clk_smmu3_fimd1m1", + "mout_aclk_disp_222_user", + EN_IP_DISP, 23, 0, 0), + GATE(DISP_CLK_SMMU_TV, "clk_smmu3_tv", "mout_aclk_disp_222_user", + EN_IP_DISP, 25, 0, 0), +}; + +static void __init exynos5260_clk_disp_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.mux_clks = disp_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks); + cmu.div_clks = disp_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(disp_div_clks); + cmu.gate_clks = disp_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(disp_gate_clks); + cmu.nr_clk_ids = DISP_NR_CLK; + cmu.clk_regs = disp_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(disp_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp", + exynos5260_clk_disp_init); + + +/* CMU_EGL */ + +static unsigned long egl_clk_regs[] __initdata = { + EGL_PLL_LOCK, + EGL_PLL_CON0, + EGL_PLL_CON1, + EGL_PLL_FREQ_DET, + MUX_SEL_EGL, + MUX_ENABLE_EGL, + DIV_EGL, + DIV_EGL_PLL_FDET, + EN_ACLK_EGL, + EN_PCLK_EGL, + EN_SCLK_EGL, +}; + +PNAME(mout_egl_b_p) = {"mout_egl_pll", "dout_bus_pll"}; +PNAME(mout_egl_pll_p) = {"fin_pll", "fout_egl_pll"}; + +struct samsung_mux_clock egl_mux_clks[] __initdata = { + MUX(EGL_MOUT_EGL_PLL, "mout_egl_pll", mout_egl_pll_p, + MUX_SEL_EGL, 4, 1), + MUX(EGL_MOUT_EGL_B, "mout_egl_b", mout_egl_b_p, MUX_SEL_EGL, 16, 1), +}; + +struct samsung_div_clock egl_div_clks[] __initdata = { + DIV(EGL_DOUT_EGL1, "dout_egl1", "mout_egl_b", DIV_EGL, 0, 3), + DIV(EGL_DOUT_EGL2, "dout_egl2", "dout_egl1", DIV_EGL, 4, 3), + DIV(EGL_DOUT_ACLK_EGL, "dout_aclk_egl", "dout_egl2", DIV_EGL, 8, 3), + DIV(EGL_DOUT_PCLK_EGL, "dout_pclk_egl", "dout_egl_atclk", + DIV_EGL, 12, 3), + DIV(EGL_DOUT_EGL_ATCLK, "dout_egl_atclk", "dout_egl2", DIV_EGL, 16, 3), + DIV(EGL_DOUT_EGL_PCLK_DBG, "dout_egl_pclk_dbg", "dout_egl_atclk", + DIV_EGL, 20, 3), + DIV(EGL_DOUT_EGL_PLL, "dout_egl_pll", "mout_egl_b", DIV_EGL, 24, 3), +}; + +static struct samsung_pll_clock egl_pll_clks[] __initdata = { + PLL(pll_2550xx, EGL_FOUT_EGL_PLL, "fout_egl_pll", "fin_pll", + EGL_PLL_LOCK, EGL_PLL_CON0, + pll2550_24mhz_tbl), +}; + +static void __init exynos5260_clk_egl_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.pll_clks = egl_pll_clks; + cmu.nr_pll_clks = ARRAY_SIZE(egl_pll_clks); + cmu.mux_clks = egl_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(egl_mux_clks); + cmu.div_clks = egl_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(egl_div_clks); + cmu.nr_clk_ids = EGL_NR_CLK; + cmu.clk_regs = egl_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(egl_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl", + exynos5260_clk_egl_init); + + +/* CMU_FSYS */ + +static unsigned long fsys_clk_regs[] __initdata = { + MUX_SEL_FSYS0, + MUX_SEL_FSYS1, + EN_ACLK_FSYS, + EN_ACLK_FSYS_SECURE_RTIC, + EN_ACLK_FSYS_SECURE_SMMU_RTIC, + EN_SCLK_FSYS, + EN_IP_FSYS, + EN_IP_FSYS_SECURE_RTIC, + EN_IP_FSYS_SECURE_SMMU_RTIC, +}; + +PNAME(mout_phyclk_usbhost20_phyclk_user_p) = {"fin_pll", + "phyclk_usbhost20_phy_phyclock"}; +PNAME(mout_phyclk_usbhost20_freeclk_user_p) = {"fin_pll", + "phyclk_usbhost20_phy_freeclk"}; +PNAME(mout_phyclk_usbhost20_clk48mohci_user_p) = {"fin_pll", + "phyclk_usbhost20_phy_clk48mohci"}; +PNAME(mout_phyclk_usbdrd30_pipe_pclk_user_p) = {"fin_pll", + "phyclk_usbdrd30_udrd30_pipe_pclk"}; +PNAME(mout_phyclk_usbdrd30_phyclock_user_p) = {"fin_pll", + "phyclk_usbdrd30_udrd30_phyclock"}; + +struct samsung_mux_clock fsys_mux_clks[] __initdata = { + MUX(FSYS_MOUT_PHYCLK_USBDRD30_PHYCLOCK_USER, + "mout_phyclk_usbdrd30_phyclock_user", + mout_phyclk_usbdrd30_phyclock_user_p, + MUX_SEL_FSYS1, 0, 1), + MUX(FSYS_MOUT_PHYCLK_USBDRD30_PIPE_PCLK_USER, + "mout_phyclk_usbdrd30_pipe_pclk_user", + mout_phyclk_usbdrd30_pipe_pclk_user_p, + MUX_SEL_FSYS1, 4, 1), + MUX(FSYS_MOUT_PHYCLK_USBHOST20_CLK48MOHCI_USER, + "mout_phyclk_usbhost20_clk48mohci_user", + mout_phyclk_usbhost20_clk48mohci_user_p, + MUX_SEL_FSYS1, 8, 1), + MUX(FSYS_MOUT_PHYCLK_USBHOST20_FREECLK_USER, + "mout_phyclk_usbhost20_freeclk_user", + mout_phyclk_usbhost20_freeclk_user_p, + MUX_SEL_FSYS1, 12, 1), + MUX(FSYS_MOUT_PHYCLK_USBHOST20_PHYCLK_USER, + "mout_phyclk_usbhost20_phyclk_user", + mout_phyclk_usbhost20_phyclk_user_p, + MUX_SEL_FSYS1, 16, 1), +}; + +struct samsung_gate_clock fsys_gate_clks[] __initdata = { + GATE(FSYS_PHYCLK_USBHOST20, "phyclk_usbhost20_phyclock", + "mout_phyclk_usbdrd30_phyclock_user", + EN_SCLK_FSYS, 1, 0, 0), + GATE(FSYS_PHYCLK_USBDRD30, "phyclk_usbdrd30_udrd30_phyclock_g", + "mout_phyclk_usbdrd30_phyclock_user", + EN_SCLK_FSYS, 7, 0, 0), + + GATE(FSYS_CLK_MMC0, "clk_mmc0", "dout_aclk_fsys_200", + EN_IP_FSYS, 6, 0, 0), + GATE(FSYS_CLK_MMC1, "clk_mmc1", "dout_aclk_fsys_200", + EN_IP_FSYS, 7, 0, 0), + GATE(FSYS_CLK_MMC2, "clk_mmc2", "dout_aclk_fsys_200", + EN_IP_FSYS, 8, 0, 0), + GATE(FSYS_CLK_PDMA, "clk_pdma", "dout_aclk_fsys_200", + EN_IP_FSYS, 9, 0, 0), + GATE(FSYS_CLK_SROMC, "clk_sromc", "dout_aclk_fsys_200", + EN_IP_FSYS, 13, 0, 0), + GATE(FSYS_CLK_USBDRD30, "clk_usbdrd30", "dout_aclk_fsys_200", + EN_IP_FSYS, 14, 0, 0), + GATE(FSYS_CLK_USBHOST20, "clk_usbhost20", "dout_aclk_fsys_200", + EN_IP_FSYS, 15, 0, 0), + GATE(FSYS_CLK_USBLINK, "clk_usblink", "dout_aclk_fsys_200", + EN_IP_FSYS, 18, 0, 0), + GATE(FSYS_CLK_TSI, "clk_tsi", "dout_aclk_fsys_200", + EN_IP_FSYS, 20, 0, 0), + + GATE(FSYS_CLK_RTIC, "clk_rtic", "dout_aclk_fsys_200", + EN_IP_FSYS_SECURE_RTIC, 11, 0, 0), + GATE(FSYS_CLK_SMMU_RTIC, "clk_smmu_rtic", "dout_aclk_fsys_200", + EN_IP_FSYS_SECURE_SMMU_RTIC, 12, 0, 0), +}; + +static void __init exynos5260_clk_fsys_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.mux_clks = fsys_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks); + cmu.gate_clks = fsys_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(fsys_gate_clks); + cmu.nr_clk_ids = FSYS_NR_CLK; + cmu.clk_regs = fsys_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys", + exynos5260_clk_fsys_init); + + +/* CMU_G2D */ + +static unsigned long g2d_clk_regs[] __initdata = { + MUX_SEL_G2D, + MUX_STAT_G2D, + DIV_G2D, + EN_ACLK_G2D, + EN_ACLK_G2D_SECURE_SSS, + EN_ACLK_G2D_SECURE_SLIM_SSS, + EN_ACLK_G2D_SECURE_SMMU_SLIM_SSS, + EN_ACLK_G2D_SECURE_SMMU_SSS, + EN_ACLK_G2D_SECURE_SMMU_MDMA, + EN_ACLK_G2D_SECURE_SMMU_G2D, + EN_PCLK_G2D, + EN_PCLK_G2D_SECURE_SMMU_SLIM_SSS, + EN_PCLK_G2D_SECURE_SMMU_SSS, + EN_PCLK_G2D_SECURE_SMMU_MDMA, + EN_PCLK_G2D_SECURE_SMMU_G2D, + EN_IP_G2D, + EN_IP_G2D_SECURE_SSS, + EN_IP_G2D_SECURE_SLIM_SSS, + EN_IP_G2D_SECURE_SMMU_SLIM_SSS, + EN_IP_G2D_SECURE_SMMU_SSS, + EN_IP_G2D_SECURE_SMMU_MDMA, + EN_IP_G2D_SECURE_SMMU_G2D, +}; + +PNAME(mout_aclk_g2d_333_user_p) = {"fin_pll", "dout_aclk_g2d_333"}; + +struct samsung_mux_clock g2d_mux_clks[] __initdata = { + MUX(G2D_MOUT_ACLK_G2D_333_USER, "mout_aclk_g2d_333_user", + mout_aclk_g2d_333_user_p, + MUX_SEL_G2D, 0, 1), +}; + +struct samsung_div_clock g2d_div_clks[] __initdata = { + DIV(G2D_DOUT_PCLK_G2D_83, "dout_pclk_g2d_83", "mout_aclk_g2d_333_user", + DIV_G2D, 0, 3), +}; + +struct samsung_gate_clock g2d_gate_clks[] __initdata = { + GATE(G2D_CLK_G2D, "clk_g2d", "mout_aclk_g2d_333_user", + EN_IP_G2D, 4, 0, 0), + GATE(G2D_CLK_JPEG, "clk_jpeg", "mout_aclk_g2d_333_user", + EN_IP_G2D, 5, 0, 0), + GATE(G2D_CLK_MDMA, "clk_mdma", "mout_aclk_g2d_333_user", + EN_IP_G2D, 6, 0, 0), + GATE(G2D_CLK_SMMU3_JPEG, "clk_smmu3_jpeg", "mout_aclk_g2d_333_user", + EN_IP_G2D, 16, 0, 0), + + GATE(G2D_CLK_SSS, "clk_sss", "mout_aclk_g2d_333_user", + EN_IP_G2D_SECURE_SSS, 17, 0, 0), + + GATE(G2D_CLK_SLIM_SSS, "clk_slim_sss", "mout_aclk_g2d_333_user", + EN_IP_G2D_SECURE_SLIM_SSS, 11, 0, 0), + + GATE(G2D_CLK_SMMU_SLIM_SSS, "clk_smmu_slim_sss", + "mout_aclk_g2d_333_user", + EN_IP_G2D_SECURE_SMMU_SLIM_SSS, 13, 0, 0), + + GATE(G2D_CLK_SMMU_SSS, "clk_smmu_sss", "mout_aclk_g2d_333_user", + EN_IP_G2D_SECURE_SMMU_SSS, 14, 0, 0), + + GATE(G2D_CLK_SMMU_MDMA, "clk_smmu_mdma", "mout_aclk_g2d_333_user", + EN_IP_G2D_SECURE_SMMU_MDMA, 12, 0, 0), + + GATE(G2D_CLK_SMMU3_G2D, "clk_smmu3_g2d", "mout_aclk_g2d_333_user", + EN_IP_G2D_SECURE_SMMU_G2D, 15, 0, 0), +}; + +static void __init exynos5260_clk_g2d_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.mux_clks = g2d_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks); + cmu.div_clks = g2d_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(g2d_div_clks); + cmu.gate_clks = g2d_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(g2d_gate_clks); + cmu.nr_clk_ids = G2D_NR_CLK; + cmu.clk_regs = g2d_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d", + exynos5260_clk_g2d_init); + + +/* CMU_G3D */ + +static unsigned long g3d_clk_regs[] __initdata = { + G3D_PLL_LOCK, + G3D_PLL_CON0, + G3D_PLL_CON1, + G3D_PLL_FDET, + MUX_SEL_G3D, + DIV_G3D, + DIV_G3D_PLL_FDET, + EN_ACLK_G3D, + EN_PCLK_G3D, + EN_SCLK_G3D, + EN_IP_G3D, +}; + +PNAME(mout_g3d_pll_p) = {"fin_pll", "fout_g3d_pll"}; + +struct samsung_mux_clock g3d_mux_clks[] __initdata = { + MUX(G3D_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p, + MUX_SEL_G3D, 0, 1), +}; + +struct samsung_div_clock g3d_div_clks[] __initdata = { + DIV(G3D_DOUT_PCLK_G3D, "dout_pclk_g3d", "dout_aclk_g3d", DIV_G3D, 0, 3), + DIV(G3D_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_g3d_pll", DIV_G3D, 4, 3), +}; + +struct samsung_gate_clock g3d_gate_clks[] __initdata = { + GATE(G3D_CLK_G3D, "clk_g3d", "dout_aclk_g3d", EN_IP_G3D, 2, 0, 0), + GATE(G3D_CLK_G3D_HPM, "clk_g3d_hpm", "dout_aclk_g3d", + EN_IP_G3D, 3, 0, 0), +}; + +static struct samsung_pll_clock g3d_pll_clks[] __initdata = { + PLL(pll_2550, G3D_FOUT_G3D_PLL, "fout_g3d_pll", "fin_pll", + G3D_PLL_LOCK, G3D_PLL_CON0, + pll2550_24mhz_tbl), +}; + +static void __init exynos5260_clk_g3d_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.pll_clks = g3d_pll_clks; + cmu.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks); + cmu.mux_clks = g3d_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(g3d_mux_clks); + cmu.div_clks = g3d_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(g3d_div_clks); + cmu.gate_clks = g3d_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(g3d_gate_clks); + cmu.nr_clk_ids = G3D_NR_CLK; + cmu.clk_regs = g3d_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d", + exynos5260_clk_g3d_init); + + +/* CMU_GSCL */ + +static unsigned long gscl_clk_regs[] __initdata = { + MUX_SEL_GSCL, + DIV_GSCL, + EN_ACLK_GSCL, + EN_ACLK_GSCL_FIMC, + EN_ACLK_GSCL_SECURE_SMMU_GSCL0, + EN_ACLK_GSCL_SECURE_SMMU_GSCL1, + EN_ACLK_GSCL_SECURE_SMMU_MSCL0, + EN_ACLK_GSCL_SECURE_SMMU_MSCL1, + EN_PCLK_GSCL, + EN_PCLK_GSCL_FIMC, + EN_PCLK_GSCL_SECURE_SMMU_GSCL0, + EN_PCLK_GSCL_SECURE_SMMU_GSCL1, + EN_PCLK_GSCL_SECURE_SMMU_MSCL0, + EN_PCLK_GSCL_SECURE_SMMU_MSCL1, + EN_SCLK_GSCL, + EN_SCLK_GSCL_FIMC, + EN_IP_GSCL, + EN_IP_GSCL_FIMC, + EN_IP_GSCL_SECURE_SMMU_GSCL0, + EN_IP_GSCL_SECURE_SMMU_GSCL1, + EN_IP_GSCL_SECURE_SMMU_MSCL0, + EN_IP_GSCL_SECURE_SMMU_MSCL1, +}; + +PNAME(mout_aclk_gscl_333_user_p) = {"fin_pll", "dout_aclk_gscl_333"}; +PNAME(mout_aclk_m2m_400_user_p) = {"fin_pll", "dout_aclk_gscl_400"}; +PNAME(mout_aclk_gscl_fimc_user_p) = {"fin_pll", "dout_aclk_gscl_400"}; +PNAME(mout_aclk_csis_p) = {"dout_aclk_csis_200", "mout_aclk_gscl_fimc_user"}; + +struct samsung_mux_clock gscl_mux_clks[] __initdata = { + MUX(GSCL_MOUT_ACLK_GSCL_333_USER, "mout_aclk_gscl_333_user", + mout_aclk_gscl_333_user_p, + MUX_SEL_GSCL, 0, 1), + MUX(GSCL_MOUT_ACLK_M2M_400_USER, "mout_aclk_m2m_400_user", + mout_aclk_m2m_400_user_p, + MUX_SEL_GSCL, 4, 1), + MUX(GSCL_MOUT_ACLK_GSCL_FIMC_USER, "mout_aclk_gscl_fimc_user", + mout_aclk_gscl_fimc_user_p, + MUX_SEL_GSCL, 8, 1), + MUX(GSCL_MOUT_ACLK_CSIS, "mout_aclk_csis", mout_aclk_csis_p, + MUX_SEL_GSCL, 24, 1), +}; + +struct samsung_div_clock gscl_div_clks[] __initdata = { + DIV(GSCL_DOUT_PCLK_M2M_100, "dout_pclk_m2m_100", + "mout_aclk_m2m_400_user", + DIV_GSCL, 0, 3), + DIV(GSCL_DOUT_ACLK_CSIS_200, "dout_aclk_csis_200", + "mout_aclk_m2m_400_user", + DIV_GSCL, 4, 3), +}; + +struct samsung_gate_clock gscl_gate_clks[] __initdata = { + GATE(GSCL_SCLK_CSIS0_WRAP, "sclk_csis0_wrap", "dout_aclk_csis_200", + EN_SCLK_GSCL_FIMC, 0, CLK_SET_RATE_PARENT, 0), + GATE(GSCL_SCLK_CSIS1_WRAP, "sclk_csis1_wrap", "dout_aclk_csis_200", + EN_SCLK_GSCL_FIMC, 1, CLK_SET_RATE_PARENT, 0), + + GATE(GSCL_CLK_GSCL0, "clk_gscl0", "mout_aclk_gscl_333_user", + EN_IP_GSCL, 2, 0, 0), + GATE(GSCL_CLK_GSCL1, "clk_gscl1", "mout_aclk_gscl_333_user", + EN_IP_GSCL, 3, 0, 0), + GATE(GSCL_CLK_MSCL0, "clk_mscl0", "mout_aclk_gscl_333_user", + EN_IP_GSCL, 4, 0, 0), + GATE(GSCL_CLK_MSCL1, "clk_mscl1", "mout_aclk_gscl_333_user", + EN_IP_GSCL, 5, 0, 0), + GATE(GSCL_CLK_PIXEL_GSCL0, "clk_pixel_gscl0", + "mout_aclk_gscl_333_user", + EN_IP_GSCL, 8, 0, 0), + GATE(GSCL_CLK_PIXEL_GSCL1, "clk_pixel_gscl1", + "mout_aclk_gscl_333_user", + EN_IP_GSCL, 9, 0, 0), + + GATE(GSCL_CLK_SMMU3_LITE_A, "clk_smmu3_lite_a", + "mout_aclk_gscl_fimc_user", + EN_IP_GSCL_FIMC, 5, 0, 0), + GATE(GSCL_CLK_SMMU3_LITE_B, "clk_smmu3_lite_b", + "mout_aclk_gscl_fimc_user", + EN_IP_GSCL_FIMC, 6, 0, 0), + GATE(GSCL_CLK_SMMU3_LITE_D, "clk_smmu3_lite_d", + "mout_aclk_gscl_fimc_user", + EN_IP_GSCL_FIMC, 7, 0, 0), + GATE(GSCL_CLK_CSIS0, "clk_csis0", "mout_aclk_gscl_fimc_user", + EN_IP_GSCL_FIMC, 8, 0, 0), + GATE(GSCL_CLK_CSIS1, "clk_csis1", "mout_aclk_gscl_fimc_user", + EN_IP_GSCL_FIMC, 9, 0, 0), + GATE(GSCL_CLK_FIMC_LITE_A, "clk_fimc_lite_a", + "mout_aclk_gscl_fimc_user", + EN_IP_GSCL_FIMC, 10, 0, 0), + GATE(GSCL_CLK_FIMC_LITE_B, "clk_fimc_lite_b", + "mout_aclk_gscl_fimc_user", + EN_IP_GSCL_FIMC, 11, 0, 0), + GATE(GSCL_CLK_FIMC_LITE_D, "clk_fimc_lite_d", + "mout_aclk_gscl_fimc_user", + EN_IP_GSCL_FIMC, 12, 0, 0), + + GATE(GSCL_CLK_SMMU3_GSCL0, "clk_smmu3_gscl0", + "mout_aclk_gscl_333_user", + EN_IP_GSCL_SECURE_SMMU_GSCL0, 17, 0, 0), + GATE(GSCL_CLK_SMMU3_GSCL1, "clk_smmu3_gscl1", "mout_aclk_gscl_333_user", + EN_IP_GSCL_SECURE_SMMU_GSCL1, 18, 0, 0), + GATE(GSCL_CLK_SMMU3_MSCL0, "clk_smmu3_mscl0", + "mout_aclk_m2m_400_user", + EN_IP_GSCL_SECURE_SMMU_MSCL0, 19, 0, 0), + GATE(GSCL_CLK_SMMU3_MSCL1, "clk_smmu3_mscl1", + "mout_aclk_m2m_400_user", + EN_IP_GSCL_SECURE_SMMU_MSCL1, 20, 0, 0), +}; + +static void __init exynos5260_clk_gscl_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.mux_clks = gscl_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks); + cmu.div_clks = gscl_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(gscl_div_clks); + cmu.gate_clks = gscl_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(gscl_gate_clks); + cmu.nr_clk_ids = GSCL_NR_CLK; + cmu.clk_regs = gscl_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl", + exynos5260_clk_gscl_init); + + +/* CMU_ISP */ + +static unsigned long isp_clk_regs[] __initdata = { + MUX_SEL_ISP0, + MUX_SEL_ISP1, + DIV_ISP, + EN_ACLK_ISP0, + EN_ACLK_ISP1, + EN_PCLK_ISP0, + EN_PCLK_ISP1, + EN_SCLK_ISP, + EN_IP_ISP0, + EN_IP_ISP1, +}; + +PNAME(mout_isp_400_user_p) = {"fin_pll", "dout_aclk_isp1_400"}; +PNAME(mout_isp_266_user_p) = {"fin_pll", "dout_aclk_isp1_266"}; + +struct samsung_mux_clock isp_mux_clks[] __initdata = { + MUX(ISP_MOUT_ISP_266_USER, "mout_isp_266_user", mout_isp_266_user_p, + MUX_SEL_ISP0, 0, 1), + MUX(ISP_MOUT_ISP_400_USER, "mout_isp_400_user", mout_isp_400_user_p, + MUX_SEL_ISP0, 4, 1), +}; + +struct samsung_div_clock isp_div_clks[] __initdata = { + DIV(ISP_DOUT_PCLK_ISP_66, "dout_pclk_isp_66", "mout_kfc", + DIV_ISP, 0, 3), + DIV(ISP_DOUT_PCLK_ISP_133, "dout_pclk_isp_133", "mout_kfc", + DIV_ISP, 4, 4), + DIV(ISP_DOUT_CA5_ATCLKIN, "dout_ca5_atclkin", "mout_kfc", + DIV_ISP, 12, 3), + DIV(ISP_DOUT_CA5_PCLKDBG, "dout_ca5_pclkdbg", "mout_kfc", + DIV_ISP, 16, 4), + DIV(ISP_DOUT_SCLK_MPWM, "dout_sclk_mpwm", "mout_kfc", DIV_ISP, 20, 2), +}; + +struct samsung_gate_clock isp_gate_clks[] __initdata = { + GATE(ISP_CLK_GIC, "clk_isp_gic", "mout_aclk_isp1_266", + EN_IP_ISP0, 15, 0, 0), + + GATE(ISP_CLK_CA5, "clk_isp_ca5", "mout_aclk_isp1_266", + EN_IP_ISP1, 1, 0, 0), + GATE(ISP_CLK_FIMC_DRC, "clk_isp_fimc_drc", "mout_aclk_isp1_266", + EN_IP_ISP1, 2, 0, 0), + GATE(ISP_CLK_FIMC_FD, "clk_isp_fimc_fd", "mout_aclk_isp1_266", + EN_IP_ISP1, 3, 0, 0), + GATE(ISP_CLK_FIMC, "clk_isp_fimc", "mout_aclk_isp1_266", + EN_IP_ISP1, 4, 0, 0), + GATE(ISP_CLK_FIMC_SCALERC, "clk_isp_fimc_scalerc", + "mout_aclk_isp1_266", + EN_IP_ISP1, 5, 0, 0), + GATE(ISP_CLK_FIMC_SCALERP, "clk_isp_fimc_scalerp", + "mout_aclk_isp1_266", + EN_IP_ISP1, 6, 0, 0), + GATE(ISP_CLK_I2C0, "clk_isp_i2c0", "mout_aclk_isp1_266", + EN_IP_ISP1, 7, 0, 0), + GATE(ISP_CLK_I2C1, "clk_isp_i2c1", "mout_aclk_isp1_266", + EN_IP_ISP1, 8, 0, 0), + GATE(ISP_CLK_MCUCTL, "clk_isp_mcuctl", "mout_aclk_isp1_266", + EN_IP_ISP1, 9, 0, 0), + GATE(ISP_CLK_MPWM, "clk_isp_mpwm", "mout_aclk_isp1_266", + EN_IP_ISP1, 10, 0, 0), + GATE(ISP_CLK_MTCADC, "clk_isp_mtcadc", "mout_aclk_isp1_266", + EN_IP_ISP1, 11, 0, 0), + GATE(ISP_CLK_PWM, "clk_isp_pwm", "mout_aclk_isp1_266", + EN_IP_ISP1, 14, 0, 0), + GATE(ISP_CLK_SMMU_DRC, "clk_smmu_drc", "mout_aclk_isp1_266", + EN_IP_ISP1, 21, 0, 0), + GATE(ISP_CLK_SMMU_FD, "clk_smmu_fd", "mout_aclk_isp1_266", + EN_IP_ISP1, 22, 0, 0), + GATE(ISP_CLK_SMMU_ISP, "clk_smmu_isp", "mout_aclk_isp1_266", + EN_IP_ISP1, 23, 0, 0), + GATE(ISP_CLK_SMMU_ISPCX, "clk_smmu_ispcx", "mout_aclk_isp1_266", + EN_IP_ISP1, 24, 0, 0), + GATE(ISP_CLK_SMMU_SCALERC, "clk_isp_smmu_scalerc", + "mout_aclk_isp1_266", + EN_IP_ISP1, 25, 0, 0), + GATE(ISP_CLK_SMMU_SCALERP, "clk_isp_smmu_scalerp", + "mout_aclk_isp1_266", + EN_IP_ISP1, 26, 0, 0), + GATE(ISP_CLK_SPI0, "clk_isp_spi0", "mout_aclk_isp1_266", + EN_IP_ISP1, 27, 0, 0), + GATE(ISP_CLK_SPI1, "clk_isp_spi1", "mout_aclk_isp1_266", + EN_IP_ISP1, 28, 0, 0), + GATE(ISP_CLK_WDT, "clk_isp_wdt", "mout_aclk_isp1_266", + EN_IP_ISP1, 31, 0, 0), + GATE(ISP_CLK_UART, "clk_isp_uart", "mout_aclk_isp1_266", + EN_IP_ISP1, 30, 0, 0), + + GATE(ISP_SCLK_UART_EXT, "sclk_isp_uart_ext", "fin_pll", + EN_SCLK_ISP, 7, CLK_SET_RATE_PARENT, 0), + GATE(ISP_SCLK_SPI1_EXT, "sclk_isp_spi1_ext", "fin_pll", + EN_SCLK_ISP, 8, CLK_SET_RATE_PARENT, 0), + GATE(ISP_SCLK_SPI0_EXT, "sclk_isp_spi0_ext", "fin_pll", + EN_SCLK_ISP, 9, CLK_SET_RATE_PARENT, 0), +}; + +static void __init exynos5260_clk_isp_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.mux_clks = isp_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks); + cmu.div_clks = isp_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(isp_div_clks); + cmu.gate_clks = isp_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(isp_gate_clks); + cmu.nr_clk_ids = ISP_NR_CLK; + cmu.clk_regs = isp_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(isp_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp", + exynos5260_clk_isp_init); + + +/* CMU_KFC */ + +static unsigned long kfc_clk_regs[] __initdata = { + KFC_PLL_LOCK, + KFC_PLL_CON0, + KFC_PLL_CON1, + KFC_PLL_FDET, + MUX_SEL_KFC0, + MUX_SEL_KFC2, + DIV_KFC, + DIV_KFC_PLL_FDET, + EN_ACLK_KFC, + EN_PCLK_KFC, + EN_SCLK_KFC, + EN_IP_KFC, +}; + +PNAME(mout_kfc_pll_p) = {"fin_pll", "fout_kfc_pll"}; +PNAME(mout_kfc_p) = {"mout_kfc_pll", "dout_media_pll"}; + +struct samsung_mux_clock kfc_mux_clks[] __initdata = { + MUX(KFC_MOUT_KFC_PLL, "mout_kfc_pll", mout_kfc_pll_p, + MUX_SEL_KFC0, 0, 1), + MUX(KFC_MOUT_KFC, "mout_kfc", mout_kfc_p, MUX_SEL_KFC2, 0, 1), +}; + +struct samsung_div_clock kfc_div_clks[] __initdata = { + DIV(KFC_DOUT_KFC1, "dout_kfc1", "mout_kfc", DIV_KFC, 0, 3), + DIV(KFC_DOUT_KFC2, "dout_kfc2", "dout_kfc1", DIV_KFC, 4, 3), + DIV(KFC_DOUT_KFC_ATCLK, "dout_kfc_atclk", "dout_kfc2", DIV_KFC, 8, 3), + DIV(KFC_DOUT_KFC_PCLK_DBG, "dout_kfc_pclk_dbg", "dout_kfc2", + DIV_KFC, 12, 3), + DIV(KFC_DOUT_ACLK_KFC, "dout_aclk_kfc", "dout_kfc2", DIV_KFC, 16, 3), + DIV(KFC_DOUT_PCLK_KFC, "dout_pclk_kfc", "dout_kfc2", DIV_KFC, 20, 3), + DIV(KFC_DOUT_KFC_PLL, "dout_kfc_pll", "mout_kfc", DIV_KFC, 24, 3), +}; + +static struct samsung_pll_clock kfc_pll_clks[] __initdata = { + PLL(pll_2550xx, KFC_FOUT_KFC_PLL, "fout_kfc_pll", "fin_pll", + KFC_PLL_LOCK, KFC_PLL_CON0, + pll2550_24mhz_tbl), +}; + +static void __init exynos5260_clk_kfc_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.pll_clks = kfc_pll_clks; + cmu.nr_pll_clks = ARRAY_SIZE(kfc_pll_clks); + cmu.mux_clks = kfc_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(kfc_mux_clks); + cmu.div_clks = kfc_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(kfc_div_clks); + cmu.nr_clk_ids = KFC_NR_CLK; + cmu.clk_regs = kfc_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(kfc_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc", + exynos5260_clk_kfc_init); + + +/* CMU_MFC */ + +static unsigned long mfc_clk_regs[] __initdata = { + MUX_SEL_MFC, + DIV_MFC, + EN_ACLK_MFC, + EN_ACLK_SECURE_SMMU2_MFC, + EN_PCLK_MFC, + EN_PCLK_SECURE_SMMU2_MFC, + EN_IP_MFC, + EN_IP_MFC_SECURE_SMMU2_MFC, +}; + +PNAME(mout_aclk_mfc_333_user_p) = {"fin_pll", "dout_aclk_mfc_333"}; + +struct samsung_mux_clock mfc_mux_clks[] __initdata = { + MUX(MFC_MOUT_ACLK_MFC_333_USER, "mout_aclk_mfc_333_user", + mout_aclk_mfc_333_user_p, + MUX_SEL_MFC, 0, 1), +}; + +struct samsung_div_clock mfc_div_clks[] __initdata = { + DIV(MFC_DOUT_PCLK_MFC_83, "dout_pclk_mfc_83", "mout_aclk_mfc_333_user", + DIV_MFC, 0, 3), +}; + +struct samsung_gate_clock mfc_gate_clks[] __initdata = { + GATE(MFC_CLK_MFC, "clk_mfc", "mout_aclk_mfc_333_user", + EN_IP_MFC, 1, 0, 0), + GATE(MFC_CLK_SMMU2_MFCM0, "clk_smmu2_mfcm0", "mout_aclk_mfc_333_user", + EN_IP_MFC_SECURE_SMMU2_MFC, 6, 0, 0), + GATE(MFC_CLK_SMMU2_MFCM1, "clk_smmu2_mfcm1", "mout_aclk_mfc_333_user", + EN_IP_MFC_SECURE_SMMU2_MFC, 7, 0, 0), +}; + +static void __init exynos5260_clk_mfc_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.mux_clks = mfc_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks); + cmu.div_clks = mfc_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(mfc_div_clks); + cmu.gate_clks = mfc_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(mfc_gate_clks); + cmu.nr_clk_ids = MFC_NR_CLK; + cmu.clk_regs = mfc_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc", + exynos5260_clk_mfc_init); + + +/* CMU_MIF */ + +static unsigned long mif_clk_regs[] __initdata = { + MEM_PLL_LOCK, + BUS_PLL_LOCK, + MEDIA_PLL_LOCK, + MEM_PLL_CON0, + MEM_PLL_CON1, + MEM_PLL_FDET, + BUS_PLL_CON0, + BUS_PLL_CON1, + BUS_PLL_FDET, + MEDIA_PLL_CON0, + MEDIA_PLL_CON1, + MEDIA_PLL_FDET, + MUX_SEL_MIF, + DIV_MIF, + DIV_MIF_PLL_FDET, + EN_ACLK_MIF, + EN_ACLK_MIF_SECURE_DREX1_TZ, + EN_ACLK_MIF_SECURE_DREX0_TZ, + EN_ACLK_MIF_SECURE_INTMEM, + EN_PCLK_MIF, + EN_PCLK_MIF_SECURE_MONOCNT, + EN_PCLK_MIF_SECURE_RTC_APBIF, + EN_PCLK_MIF_SECURE_DREX1_TZ, + EN_PCLK_MIF_SECURE_DREX0_TZ, + EN_SCLK_MIF, + EN_IP_MIF, + EN_IP_MIF_SECURE_MONOCNT, + EN_IP_MIF_SECURE_RTC_APBIF, + EN_IP_MIF_SECURE_DREX1_TZ, + EN_IP_MIF_SECURE_DREX0_TZ, + EN_IP_MIF_SECURE_INTEMEM, +}; + +PNAME(mout_mem_pll_p) = {"fin_pll", "fout_mem_pll"}; +PNAME(mout_bus_pll_p) = {"fin_pll", "fout_bus_pll"}; +PNAME(mout_media_pll_p) = {"fin_pll", "fout_media_pll"}; +PNAME(mout_mif_drex_p) = {"dout_mem_pll", "dout_bus_pll"}; +PNAME(mout_mif_drex2x_p) = {"dout_mem_pll", "dout_bus_pll"}; +PNAME(mout_clkm_phy_p) = {"mout_mif_drex", "dout_media_pll"}; +PNAME(mout_clk2x_phy_p) = {"mout_mif_drex2x", "dout_media_pll"}; + +struct samsung_mux_clock mif_mux_clks[] __initdata = { + MUX(MIF_MOUT_MEM_PLL, "mout_mem_pll", mout_mem_pll_p, + MUX_SEL_MIF, 0, 1), + MUX(MIF_MOUT_BUS_PLL, "mout_bus_pll", mout_bus_pll_p, + MUX_SEL_MIF, 4, 1), + MUX(MIF_MOUT_MEDIA_PLL, "mout_media_pll", mout_media_pll_p, + MUX_SEL_MIF, 8, 1), + MUX(MIF_MOUT_MIF_DREX, "mout_mif_drex", mout_mif_drex_p, + MUX_SEL_MIF, 12, 1), + MUX(MIF_MOUT_CLKM_PHY, "mout_clkm_phy", mout_clkm_phy_p, + MUX_SEL_MIF, 16, 1), + MUX(MIF_MOUT_MIF_DREX2X, "mout_mif_drex2x", mout_mif_drex2x_p, + MUX_SEL_MIF, 20, 1), + MUX(MIF_MOUT_CLK2X_PHY, "mout_clk2x_phy", mout_clk2x_phy_p, + MUX_SEL_MIF, 24, 1), +}; + +struct samsung_div_clock mif_div_clks[] __initdata = { + DIV(MIF_DOUT_MEDIA_PLL, "dout_media_pll", "mout_media_pll", + DIV_MIF, 0, 3), + DIV(MIF_DOUT_MEM_PLL, "dout_mem_pll", "mout_mem_pll", + DIV_MIF, 4, 3), + DIV(MIF_DOUT_BUS_PLL, "dout_bus_pll", "mout_bus_pll", + DIV_MIF, 8, 3), + DIV(MIF_DOUT_CLKM_PHY, "dout_clkm_phy", "mout_clkm_phy", + DIV_MIF, 12, 3), + DIV(MIF_DOUT_CLK2X_PHY, "dout_clk2x_phy", "mout_clk2x_phy", + DIV_MIF, 16, 4), + DIV(MIF_DOUT_ACLK_MIF_466, "dout_aclk_mif_466", "dout_clk2x_phy", + DIV_MIF, 20, 3), + DIV(MIF_DOUT_ACLK_BUS_200, "dout_aclk_bus_200", "dout_bus_pll", + DIV_MIF, 24, 3), + DIV(MIF_DOUT_ACLK_BUS_100, "dout_aclk_bus_100", "dout_bus_pll", + DIV_MIF, 28, 4), +}; + +struct samsung_gate_clock mif_gate_clks[] __initdata = { + GATE(MIF_CLK_LPDDR3PHY_WRAP0, "clk_lpddr3phy_wrap0", "dout_clk2x_phy", + EN_IP_MIF, 12, CLK_IGNORE_UNUSED, 0), + GATE(MIF_CLK_LPDDR3PHY_WRAP1, "clk_lpddr3phy_wrap1", "dout_clk2x_phy", + EN_IP_MIF, 13, CLK_IGNORE_UNUSED, 0), + + GATE(MIF_CLK_MONOCNT, "clk_monocnt", "dout_aclk_bus_100", + EN_IP_MIF_SECURE_MONOCNT, 22, + CLK_IGNORE_UNUSED, 0), + + GATE(MIF_CLK_MIF_RTC, "clk_mif_rtc", "dout_aclk_bus_100", + EN_IP_MIF_SECURE_RTC_APBIF, 23, + CLK_IGNORE_UNUSED, 0), + + GATE(MIF_CLK_DREX1, "clk_drex1", "dout_aclk_mif_466", + EN_IP_MIF_SECURE_DREX1_TZ, 9, + CLK_IGNORE_UNUSED, 0), + + GATE(MIF_CLK_DREX0, "clk_drex0", "dout_aclk_mif_466", + EN_IP_MIF_SECURE_DREX0_TZ, 9, + CLK_IGNORE_UNUSED, 0), + + GATE(MIF_CLK_INTMEM, "clk_intmem", "dout_aclk_bus_200", + EN_IP_MIF_SECURE_INTEMEM, 11, + CLK_IGNORE_UNUSED, 0), + + GATE(MIF_SCLK_LPDDR3PHY_WRAP_U0, "sclk_lpddr3phy_wrap_u0", + "dout_clkm_phy", EN_SCLK_MIF, 0, + CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), + GATE(MIF_SCLK_LPDDR3PHY_WRAP_U1, "sclk_lpddr3phy_wrap_u1", + "dout_clkm_phy", EN_SCLK_MIF, 1, + CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), +}; + +static struct samsung_pll_clock mif_pll_clks[] __initdata = { + PLL(pll_2550xx, MIF_FOUT_MEM_PLL, "fout_mem_pll", "fin_pll", + MEM_PLL_LOCK, MEM_PLL_CON0, + pll2550_24mhz_tbl), + PLL(pll_2550xx, MIF_FOUT_BUS_PLL, "fout_bus_pll", "fin_pll", + BUS_PLL_LOCK, BUS_PLL_CON0, + pll2550_24mhz_tbl), + PLL(pll_2550xx, MIF_FOUT_MEDIA_PLL, "fout_media_pll", "fin_pll", + MEDIA_PLL_LOCK, MEDIA_PLL_CON0, + pll2550_24mhz_tbl), +}; + +static void __init exynos5260_clk_mif_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.pll_clks = mif_pll_clks; + cmu.nr_pll_clks = ARRAY_SIZE(mif_pll_clks); + cmu.mux_clks = mif_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(mif_mux_clks); + cmu.div_clks = mif_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(mif_div_clks); + cmu.gate_clks = mif_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(mif_gate_clks); + cmu.nr_clk_ids = MIF_NR_CLK; + cmu.clk_regs = mif_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(mif_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif", + exynos5260_clk_mif_init); + + +/* CMU_PERI */ + +static unsigned long peri_clk_regs[] __initdata = { + MUX_SEL_PERI, + MUX_SEL_PERI1, + DIV_PERI, + EN_PCLK_PERI0, + EN_PCLK_PERI1, + EN_PCLK_PERI2, + EN_PCLK_PERI3, + EN_PCLK_PERI_SECURE_CHIPID, + EN_PCLK_PERI_SECURE_PROVKEY0, + EN_PCLK_PERI_SECURE_PROVKEY1, + EN_PCLK_PERI_SECURE_SECKEY, + EN_PCLK_PERI_SECURE_ANTIRBKCNT, + EN_PCLK_PERI_SECURE_TOP_RTC, + EN_PCLK_PERI_SECURE_TZPC, + EN_SCLK_PERI, + EN_SCLK_PERI_SECURE_TOP_RTC, + EN_IP_PERI0, + EN_IP_PERI1, + EN_IP_PERI2, + EN_IP_PERI_SECURE_CHIPID, + EN_IP_PERI_SECURE_PROVKEY0, + EN_IP_PERI_SECURE_PROVKEY1, + EN_IP_PERI_SECURE_SECKEY, + EN_IP_PERI_SECURE_ANTIRBKCNT, + EN_IP_PERI_SECURE_TOP_RTC, + EN_IP_PERI_SECURE_TZPC, +}; + +PNAME(mout_sclk_pcm_p) = {"ioclk_pcm_extclk", "fin_pll", "dout_aclk_peri_aud", + "phyclk_hdmi_phy_ref_cko"}; +PNAME(mout_sclk_i2scod_p) = {"ioclk_i2s_cdclk", "fin_pll", "dout_aclk_peri_aud", + "phyclk_hdmi_phy_ref_cko"}; +PNAME(mout_sclk_spdif_p) = {"ioclk_spdif_extclk", "fin_pll", + "dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"}; + +struct samsung_mux_clock peri_mux_clks[] __initdata = { + MUX(PERI_MOUT_SCLK_PCM, "mout_sclk_pcm", mout_sclk_pcm_p, + MUX_SEL_PERI1, 4, 2), + MUX(PERI_MOUT_SCLK_I2SCOD, "mout_sclk_i2scod", mout_sclk_i2scod_p, + MUX_SEL_PERI1, 12, 2), + MUX(PERI_MOUT_SCLK_SPDIF, "mout_sclk_spdif", mout_sclk_spdif_p, + MUX_SEL_PERI1, 20, 2), +}; + +struct samsung_div_clock peri_div_clks[] __initdata = { + DIV(PERI_DOUT_PCM, "dout_pcm", "mout_sclk_pcm", DIV_PERI, 0, 8), + DIV(PERI_DOUT_I2S, "dout_i2s", "mout_sclk_i2scod", DIV_PERI, 8, 6), +}; + +struct samsung_gate_clock peri_gate_clks[] __initdata = { + GATE(PERI_SCLK_PCM1, "sclk_pcm1", "dout_pcm", EN_SCLK_PERI, 0, + CLK_SET_RATE_PARENT, 0), + GATE(PERI_SCLK_I2S, "sclk_i2s", "dout_i2s", EN_SCLK_PERI, 1, + CLK_SET_RATE_PARENT, 0), + GATE(PERI_SCLK_SPDIF, "sclk_spdif", "dout_sclk_peri_spi0_b", + EN_SCLK_PERI, 2, CLK_SET_RATE_PARENT, 0), + GATE(PERI_SCLK_SPI0, "sclk_spi0", "dout_sclk_peri_spi0_b", + EN_SCLK_PERI, 7, CLK_SET_RATE_PARENT, 0), + GATE(PERI_SCLK_SPI1, "sclk_spi1", "dout_sclk_peri_spi1_b", + EN_SCLK_PERI, 8, CLK_SET_RATE_PARENT, 0), + GATE(PERI_SCLK_SPI2, "sclk_spi2", "dout_sclk_peri_spi2_b", + EN_SCLK_PERI, 9, CLK_SET_RATE_PARENT, 0), + GATE(PERI_SCLK_UART0, "sclk_uart0", "dout_sclk_peri_uart0", + EN_SCLK_PERI, 10, CLK_SET_RATE_PARENT, 0), + GATE(PERI_SCLK_UART1, "sclk_uart1", "dout_sclk_peri_uart1", + EN_SCLK_PERI, 11, CLK_SET_RATE_PARENT, 0), + GATE(PERI_SCLK_UART2, "sclk_uart2", "dout_sclk_peri_uart2", + EN_SCLK_PERI, 12, CLK_SET_RATE_PARENT, 0), + + GATE(PERI_CLK_ABB, "clk_abb", "dout_aclk_peri_66", + EN_IP_PERI0, 1, 0, 0), + GATE(PERI_CLK_EFUSE_WRITER, "clk_efuse_writer", "dout_aclk_peri_66", + EN_IP_PERI0, 5, 0, 0), + GATE(PERI_CLK_HDMICEC, "clk_hdmicec", "dout_aclk_peri_66", + EN_IP_PERI0, 6, 0, 0), + GATE(PERI_CLK_I2C10, "clk_i2c10", "dout_aclk_peri_66", + EN_IP_PERI0, 7, 0, 0), + GATE(PERI_CLK_I2C11, "clk_i2c11", "dout_aclk_peri_66", + EN_IP_PERI0, 8, 0, 0), + GATE(PERI_CLK_I2C8, "clk_i2c8", "dout_aclk_peri_66", + EN_IP_PERI0, 9, 0, 0), + GATE(PERI_CLK_I2C9, "clk_i2c9", "dout_aclk_peri_66", + EN_IP_PERI0, 10, 0, 0), + GATE(PERI_CLK_I2C4, "clk_i2c4", "dout_aclk_peri_66", + EN_IP_PERI0, 11, 0, 0), + GATE(PERI_CLK_I2C5, "clk_i2c5", "dout_aclk_peri_66", + EN_IP_PERI0, 12, 0, 0), + GATE(PERI_CLK_I2C6, "clk_i2c6", "dout_aclk_peri_66", + EN_IP_PERI0, 13, 0, 0), + GATE(PERI_CLK_I2C7, "clk_i2c7", "dout_aclk_peri_66", + EN_IP_PERI0, 14, 0, 0), + GATE(PERI_CLK_I2CHDMI, "clk_i2chdmi", "dout_aclk_peri_66", + EN_IP_PERI0, 15, 0, 0), + GATE(PERI_CLK_I2S, "clk_peri_i2s", "dout_aclk_peri_66", + EN_IP_PERI0, 16, 0, 0), + GATE(PERI_CLK_MCT, "clk_mct", "dout_aclk_peri_66", + EN_IP_PERI0, 17, 0, 0), + GATE(PERI_CLK_PCM, "clk_peri_pcm", "dout_aclk_peri_66", + EN_IP_PERI0, 18, 0, 0), + GATE(PERI_CLK_HSIC0, "clk_hsic0", "dout_aclk_peri_66", + EN_IP_PERI0, 20, 0, 0), + GATE(PERI_CLK_HSIC1, "clk_hsic1", "dout_aclk_peri_66", + EN_IP_PERI0, 21, 0, 0), + GATE(PERI_CLK_HSIC2, "clk_hsic2", "dout_aclk_peri_66", + EN_IP_PERI0, 22, 0, 0), + GATE(PERI_CLK_HSIC3, "clk_hsic3", "dout_aclk_peri_66", + EN_IP_PERI0, 23, 0, 0), + GATE(PERI_CLK_WDT_EGL, "clk_wdt_egl", "dout_aclk_peri_66", + EN_IP_PERI0, 24, 0, 0), + GATE(PERI_CLK_WDT_KFC, "clk_wdt_kfc", "dout_aclk_peri_66", + EN_IP_PERI0, 25, 0, 0), + + GATE(PERI_CLK_UART4, "clk_uart4", "dout_aclk_peri_66", + EN_IP_PERI2, 0, 0, 0), + GATE(PERI_CLK_PWM, "clk_pwm", "dout_aclk_peri_66", + EN_IP_PERI2, 3, 0, 0), + GATE(PERI_CLK_SPDIF, "clk_spdif", "dout_aclk_peri_66", + EN_IP_PERI2, 6, 0, 0), + GATE(PERI_CLK_SPI0, "clk_spi0", "dout_aclk_peri_66", + EN_IP_PERI2, 7, 0, 0), + GATE(PERI_CLK_SPI1, "clk_spi1", "dout_aclk_peri_66", + EN_IP_PERI2, 8, 0, 0), + GATE(PERI_CLK_SPI2, "clk_spi2", "dout_aclk_peri_66", + EN_IP_PERI2, 9, 0, 0), + GATE(PERI_CLK_TMU0, "clk_tmu0", "dout_aclk_peri_66", + EN_IP_PERI2, 10, 0, 0), + GATE(PERI_CLK_TMU1, "clk_tmu1", "dout_aclk_peri_66", + EN_IP_PERI2, 11, 0, 0), + GATE(PERI_CLK_TMU2, "clk_tmu2", "dout_aclk_peri_66", + EN_IP_PERI2, 12, 0, 0), + GATE(PERI_CLK_TMU3, "clk_tmu3", "dout_aclk_peri_66", + EN_IP_PERI2, 13, 0, 0), + GATE(PERI_CLK_TMU4, "clk_tmu4", "dout_aclk_peri_66", + EN_IP_PERI2, 14, 0, 0), + GATE(PERI_CLK_ADC, "clk_adc", "dout_aclk_peri_66", + EN_IP_PERI2, 18, 0, 0), + GATE(PERI_CLK_UART0, "clk_uart0", "dout_aclk_peri_66", + EN_IP_PERI2, 19, 0, 0), + GATE(PERI_CLK_UART1, "clk_uart1", "dout_aclk_peri_66", + EN_IP_PERI2, 20, 0, 0), + GATE(PERI_CLK_UART2, "clk_uart2", "dout_aclk_peri_66", + EN_IP_PERI2, 21, 0, 0), + + GATE(PERI_CLK_CHIPID, "clk_chipid", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_CHIPID, 2, 0, 0), + + GATE(PERI_CLK_PROVKEY0, "clk_provkey0", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_PROVKEY0, 1, 0, 0), + + GATE(PERI_CLK_PROVKEY1, "clk_provkey1", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_PROVKEY1, 2, 0, 0), + + GATE(PERI_CLK_SECKEY, "clk_seckey", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_SECKEY, 5, 0, 0), + + GATE(PERI_CLK_TOP_RTC, "clk_top_rtc", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TOP_RTC, 5, 0, 0), + + GATE(PERI_CLK_TZPC0, "clk_tzpc0", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 10, 0, 0), + GATE(PERI_CLK_TZPC1, "clk_tzpc1", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 11, 0, 0), + GATE(PERI_CLK_TZPC2, "clk_tzpc2", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 12, 0, 0), + GATE(PERI_CLK_TZPC3, "clk_tzpc3", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 13, 0, 0), + GATE(PERI_CLK_TZPC4, "clk_tzpc4", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 14, 0, 0), + GATE(PERI_CLK_TZPC5, "clk_tzpc5", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 15, 0, 0), + GATE(PERI_CLK_TZPC6, "clk_tzpc6", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 16, 0, 0), + GATE(PERI_CLK_TZPC7, "clk_tzpc7", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 17, 0, 0), + GATE(PERI_CLK_TZPC8, "clk_tzpc8", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 18, 0, 0), + GATE(PERI_CLK_TZPC9, "clk_tzpc9", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 19, 0, 0), + GATE(PERI_CLK_TZPC10, "clk_tzpc10", "dout_aclk_peri_66", + EN_IP_PERI_SECURE_TZPC, 20, 0, 0), +}; + +static void __init exynos5260_clk_peri_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.mux_clks = peri_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks); + cmu.div_clks = peri_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(peri_div_clks); + cmu.gate_clks = peri_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(peri_gate_clks); + cmu.nr_clk_ids = PERI_NR_CLK; + cmu.clk_regs = peri_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(peri_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri", + exynos5260_clk_peri_init); + + +/* CMU_TOP */ + +static unsigned long top_clk_regs[] __initdata = { + DISP_PLL_LOCK, + AUD_PLL_LOCK, + DISP_PLL_CON0, + DISP_PLL_CON1, + DISP_PLL_FDET, + AUD_PLL_CON0, + AUD_PLL_CON1, + AUD_PLL_CON2, + AUD_PLL_FDET, + MUX_SEL_TOP_PLL0, + MUX_SEL_TOP_MFC, + MUX_SEL_TOP_G2D, + MUX_SEL_TOP_GSCL, + MUX_SEL_TOP_ISP10, + MUX_SEL_TOP_ISP11, + MUX_SEL_TOP_DISP0, + MUX_SEL_TOP_DISP1, + MUX_SEL_TOP_BUS, + MUX_SEL_TOP_PERI0, + MUX_SEL_TOP_PERI1, + MUX_SEL_TOP_FSYS, + DIV_TOP_G2D_MFC, + DIV_TOP_GSCL_ISP0, + DIV_TOP_ISP10, + DIV_TOP_ISP11, + DIV_TOP_DISP, + DIV_TOP_BUS, + DIV_TOP_PERI0, + DIV_TOP_PERI1, + DIV_TOP_PERI2, + DIV_TOP_FSYS0, + DIV_TOP_FSYS1, + DIV_TOP_HPM, + DIV_TOP_PLL_FDET, + EN_ACLK_TOP, + EN_SCLK_TOP, + EN_IP_TOP, +}; + +/* fixed rate clocks generated inside the soc */ +struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = { + FRATE(PHYCLK_DPTX_PHY_CH3_TXD_CLK, "phyclk_dptx_phy_ch3_txd_clk", NULL, + CLK_IS_ROOT, 270000000), + FRATE(PHYCLK_DPTX_PHY_CH2_TXD_CLK, "phyclk_dptx_phy_ch2_txd_clk", NULL, + CLK_IS_ROOT, 270000000), + FRATE(PHYCLK_DPTX_PHY_CH1_TXD_CLK, "phyclk_dptx_phy_ch1_txd_clk", NULL, + CLK_IS_ROOT, 270000000), + FRATE(PHYCLK_DPTX_PHY_CH0_TXD_CLK, "phyclk_dptx_phy_ch0_txd_clk", NULL, + CLK_IS_ROOT, 270000000), + FRATE(phyclk_hdmi_phy_tmds_clko, "phyclk_hdmi_phy_tmds_clko", NULL, + CLK_IS_ROOT, 250000000), + FRATE(PHYCLK_HDMI_PHY_PIXEL_CLKO, "phyclk_hdmi_phy_pixel_clko", NULL, + CLK_IS_ROOT, 1660000000), + FRATE(PHYCLK_HDMI_LINK_O_TMDS_CLKHI, "phyclk_hdmi_link_o_tmds_clkhi", + NULL, CLK_IS_ROOT, 125000000), + FRATE(PHYCLK_MIPI_DPHY_4L_M_TXBYTECLKHS, + "phyclk_mipi_dphy_4l_m_txbyteclkhs" , NULL, + CLK_IS_ROOT, 187500000), + FRATE(PHYCLK_DPTX_PHY_O_REF_CLK_24M, "phyclk_dptx_phy_o_ref_clk_24m", + NULL, CLK_IS_ROOT, 24000000), + FRATE(PHYCLK_DPTX_PHY_CLK_DIV2, "phyclk_dptx_phy_clk_div2", NULL, + CLK_IS_ROOT, 135000000), + FRATE(PHYCLK_MIPI_DPHY_4L_M_RXCLKESC0, + "phyclk_mipi_dphy_4l_m_rxclkesc0", NULL, + CLK_IS_ROOT, 20000000), + FRATE(PHYCLK_USBHOST20_PHY_PHYCLOCK, "phyclk_usbhost20_phy_phyclock", + NULL, CLK_IS_ROOT, 60000000), + FRATE(PHYCLK_USBHOST20_PHY_FREECLK, "phyclk_usbhost20_phy_freeclk", + NULL, CLK_IS_ROOT, 60000000), + FRATE(PHYCLK_USBHOST20_PHY_CLK48MOHCI, + "phyclk_usbhost20_phy_clk48mohci", + NULL, CLK_IS_ROOT, 48000000), + FRATE(PHYCLK_USBDRD30_UDRD30_PIPE_PCLK, + "phyclk_usbdrd30_udrd30_pipe_pclk", NULL, + CLK_IS_ROOT, 125000000), + FRATE(PHYCLK_USBDRD30_UDRD30_PHYCLOCK, + "phyclk_usbdrd30_udrd30_phyclock", NULL, + CLK_IS_ROOT, 60000000), +}; + +PNAME(mout_memtop_pll_user_p) = {"fin_pll", "dout_mem_pll"}; +PNAME(mout_bustop_pll_user_p) = {"fin_pll", "dout_bus_pll"}; +PNAME(mout_mediatop_pll_user_p) = {"fin_pll", "dout_media_pll"}; +PNAME(mout_audtop_pll_user_p) = {"fin_pll", "mout_aud_pll"}; +PNAME(mout_aud_pll_p) = {"fin_pll", "fout_aud_pll"}; +PNAME(mout_disp_pll_p) = {"fin_pll", "fout_disp_pll"}; +PNAME(mout_mfc_bustop_333_p) = {"mout_bustop_pll_user", "mout_disp_pll"}; +PNAME(mout_aclk_mfc_333_p) = {"mout_mediatop_pll_user", "mout_mfc_bustop_333"}; +PNAME(mout_g2d_bustop_333_p) = {"mout_bustop_pll_user", "mout_disp_pll"}; +PNAME(mout_aclk_g2d_333_p) = {"mout_mediatop_pll_user", "mout_g2d_bustop_333"}; +PNAME(mout_gscl_bustop_333_p) = {"mout_bustop_pll_user", "mout_disp_pll"}; +PNAME(mout_aclk_gscl_333_p) = {"mout_mediatop_pll_user", + "mout_gscl_bustop_333"}; +PNAME(mout_m2m_mediatop_400_p) = {"mout_mediatop_pll_user", "mout_disp_pll"}; +PNAME(mout_aclk_gscl_400_p) = {"mout_bustop_pll_user", + "mout_m2m_mediatop_400"}; +PNAME(mout_gscl_bustop_fimc_p) = {"mout_bustop_pll_user", "mout_disp_pll"}; +PNAME(mout_aclk_gscl_fimc_p) = {"mout_mediatop_pll_user", + "mout_gscl_bustop_fimc"}; +PNAME(mout_isp1_media_266_p) = {"mout_mediatop_pll_user", + "mout_memtop_pll_user"}; +PNAME(mout_aclk_isp1_266_p) = {"mout_bustop_pll_user", "mout_isp1_media_266"}; +PNAME(mout_isp1_media_400_p) = {"mout_mediatop_pll_user", "mout_disp_pll"}; +PNAME(mout_aclk_isp1_400_p) = {"mout_bustop_pll_user", "mout_isp1_media_400"}; +PNAME(mout_sclk_isp_spi_p) = {"fin_pll", "mout_bustop_pll_user"}; +PNAME(mout_sclk_isp_uart_p) = {"fin_pll", "mout_bustop_pll_user"}; +PNAME(mout_sclk_isp_sensor_p) = {"fin_pll", "mout_bustop_pll_user"}; +PNAME(mout_disp_disp_333_p) = {"mout_disp_pll", "mout_bustop_pll_user"}; +PNAME(mout_aclk_disp_333_p) = {"mout_mediatop_pll_user", "mout_disp_disp_333"}; +PNAME(mout_disp_disp_222_p) = {"mout_disp_pll", "mout_bustop_pll_user"}; +PNAME(mout_aclk_disp_222_p) = {"mout_mediatop_pll_user", "mout_disp_disp_222"}; +PNAME(mout_disp_media_pixel_p) = {"mout_mediatop_pll_user", + "mout_bustop_pll_user"}; +PNAME(mout_sclk_disp_pixel_p) = {"mout_disp_pll", "mout_disp_media_pixel"}; +PNAME(mout_bus_bustop_400_p) = {"mout_bustop_pll_user", "mout_memtop_pll_user"}; +PNAME(mout_bus_bustop_100_p) = {"mout_bustop_pll_user", "mout_memtop_pll_user"}; +PNAME(mout_sclk_peri_spi_clk_p) = {"fin_pll", "mout_bustop_pll_user"}; +PNAME(mout_sclk_peri_uart_uclk_p) = {"fin_pll", "mout_bustop_pll_user"}; +PNAME(mout_sclk_fsys_usb_p) = {"fin_pll", "mout_bustop_pll_user"}; +PNAME(mout_sclk_fsys_mmc_sdclkin_a_p) = {"fin_pll", "mout_bustop_pll_user"}; +PNAME(mout_sclk_fsys_mmc0_sdclkin_b_p) = {"mout_sclk_fsys_mmc0_sdclkin_a", + "mout_mediatop_pll_user"}; +PNAME(mout_sclk_fsys_mmc1_sdclkin_b_p) = {"mout_sclk_fsys_mmc1_sdclkin_a", + "mout_mediatop_pll_user"}; +PNAME(mout_sclk_fsys_mmc2_sdclkin_b_p) = {"mout_sclk_fsys_mmc2_sdclkin_a", + "mout_mediatop_pll_user"}; + +struct samsung_mux_clock top_mux_clks[] __initdata = { + MUX(TOP_MOUT_MEDIATOP_PLL_USER, "mout_mediatop_pll_user", + mout_mediatop_pll_user_p, + MUX_SEL_TOP_PLL0, 0, 1), + MUX(TOP_MOUT_MEMTOP_PLL_USER, "mout_memtop_pll_user", + mout_memtop_pll_user_p, + MUX_SEL_TOP_PLL0, 4, 1), + MUX(TOP_MOUT_BUSTOP_PLL_USER, "mout_bustop_pll_user", + mout_bustop_pll_user_p, + MUX_SEL_TOP_PLL0, 8, 1), + MUX(TOP_MOUT_DISP_PLL, "mout_disp_pll", mout_disp_pll_p, + MUX_SEL_TOP_PLL0, 12, 1), + MUX(TOP_MOUT_AUD_PLL, "mout_aud_pll", mout_aud_pll_p, + MUX_SEL_TOP_PLL0, 16, 1), + MUX(TOP_MOUT_AUDTOP_PLL_USER, "mout_audtop_pll_user", + mout_audtop_pll_user_p, + MUX_SEL_TOP_PLL0, 24, 1), + + MUX(TOP_MOUT_DISP_DISP_333, "mout_disp_disp_333", mout_disp_disp_333_p, + MUX_SEL_TOP_DISP0, 0, 1), + MUX(TOP_MOUT_ACLK_DISP_333, "mout_aclk_disp_333", mout_aclk_disp_333_p, + MUX_SEL_TOP_DISP0, 8, 1), + MUX(TOP_MOUT_DISP_DISP_222, "mout_disp_disp_222", mout_disp_disp_222_p, + MUX_SEL_TOP_DISP0, 12, 1), + MUX(TOP_MOUT_ACLK_DISP_222, "mout_aclk_disp_222", mout_aclk_disp_222_p, + MUX_SEL_TOP_DISP0, 20, 1), + + MUX(TOP_MOUT_FIMD1, "mout_sclk_disp_pixel", mout_sclk_disp_pixel_p, + MUX_SEL_TOP_DISP1, 0, 1), + MUX(TOP_MOUT_DISP_MEDIA_PIXEL, "mout_disp_media_pixel", + mout_disp_media_pixel_p, + MUX_SEL_TOP_DISP1, 8, 1), + + MUX(TOP_MOUT_SCLK_PERI_SPI2_CLK, "mout_sclk_peri_spi2_clk", + mout_sclk_peri_spi_clk_p, + MUX_SEL_TOP_PERI1, 0, 1), + MUX(TOP_MOUT_SCLK_PERI_SPI1_CLK, "mout_sclk_peri_spi1_clk", + mout_sclk_peri_spi_clk_p, + MUX_SEL_TOP_PERI1, 4, 1), + MUX(TOP_MOUT_SCLK_PERI_SPI0_CLK, "mout_sclk_peri_spi0_clk", + mout_sclk_peri_spi_clk_p, + MUX_SEL_TOP_PERI1, 8, 1), + MUX(TOP_MOUT_SCLK_PERI_UART1_UCLK, "mout_sclk_peri_uart1_uclk", + mout_sclk_peri_uart_uclk_p, + MUX_SEL_TOP_PERI1, 12, 1), + MUX(TOP_MOUT_SCLK_PERI_UART2_UCLK, "mout_sclk_peri_uart2_uclk", + mout_sclk_peri_uart_uclk_p, + MUX_SEL_TOP_PERI1, 16, 1), + MUX(TOP_MOUT_SCLK_PERI_UART0_UCLK, "mout_sclk_peri_uart0_uclk", + mout_sclk_peri_uart_uclk_p, + MUX_SEL_TOP_PERI1, 20, 1), + + + MUX(TOP_MOUT_BUS1_BUSTOP_400, "mout_bus1_bustop_400", + mout_bus_bustop_400_p, + MUX_SEL_TOP_BUS, 0, 1), + MUX(TOP_MOUT_BUS1_BUSTOP_100, "mout_bus1_bustop_100", + mout_bus_bustop_100_p, + MUX_SEL_TOP_BUS, 4, 1), + MUX(TOP_MOUT_BUS2_BUSTOP_100, "mout_bus2_bustop_100", + mout_bus_bustop_100_p, + MUX_SEL_TOP_BUS, 8, 1), + MUX(TOP_MOUT_BUS2_BUSTOP_400, "mout_bus2_bustop_400", + mout_bus_bustop_400_p, + MUX_SEL_TOP_BUS, 12, 1), + MUX(TOP_MOUT_BUS3_BUSTOP_400, "mout_bus3_bustop_400", + mout_bus_bustop_400_p, + MUX_SEL_TOP_BUS, 16, 1), + MUX(TOP_MOUT_BUS3_BUSTOP_100, "mout_bus3_bustop_100", + mout_bus_bustop_100_p, + MUX_SEL_TOP_BUS, 20, 1), + MUX(TOP_MOUT_BUS4_BUSTOP_400, "mout_bus4_bustop_400", + mout_bus_bustop_400_p, + MUX_SEL_TOP_BUS, 24, 1), + MUX(TOP_MOUT_BUS4_BUSTOP_100, "mout_bus4_bustop_100", + mout_bus_bustop_100_p, + MUX_SEL_TOP_BUS, 28, 1), + + MUX(TOP_MOUT_SCLK_FSYS_USB, "mout_sclk_fsys_usb", + mout_sclk_fsys_usb_p, + MUX_SEL_TOP_FSYS, 0, 1), + MUX(TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_A, "mout_sclk_fsys_mmc2_sdclkin_a", + mout_sclk_fsys_mmc_sdclkin_a_p, + MUX_SEL_TOP_FSYS, 4, 1), + MUX(TOP_MOUT_SCLK_FSYS_MMC2_SDCLKIN_B, "mout_sclk_fsys_mmc2_sdclkin_b", + mout_sclk_fsys_mmc2_sdclkin_b_p, + MUX_SEL_TOP_FSYS, 8, 1), + MUX(TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_A, "mout_sclk_fsys_mmc1_sdclkin_a", + mout_sclk_fsys_mmc_sdclkin_a_p, + MUX_SEL_TOP_FSYS, 12, 1), + MUX(TOP_MOUT_SCLK_FSYS_MMC1_SDCLKIN_B, "mout_sclk_fsys_mmc1_sdclkin_b", + mout_sclk_fsys_mmc1_sdclkin_b_p, + MUX_SEL_TOP_FSYS, 16, 1), + MUX(TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_A, "mout_sclk_fsys_mmc0_sdclkin_a", + mout_sclk_fsys_mmc_sdclkin_a_p, + MUX_SEL_TOP_FSYS, 20, 1), + MUX(TOP_MOUT_SCLK_FSYS_MMC0_SDCLKIN_B, "mout_sclk_fsys_mmc0_sdclkin_b", + mout_sclk_fsys_mmc0_sdclkin_b_p, + MUX_SEL_TOP_FSYS, 24, 1), + + MUX(TOP_MOUT_ISP1_MEDIA_400, "mout_isp1_media_400", + mout_isp1_media_400_p, + MUX_SEL_TOP_ISP10, 4, 1), + MUX(TOP_MOUT_ACLK_ISP1_400, "mout_aclk_isp1_400", mout_aclk_isp1_400_p, + MUX_SEL_TOP_ISP10, 8 , 1), + MUX(TOP_MOUT_ISP1_MEDIA_266, "mout_isp1_media_266", + mout_isp1_media_266_p, + MUX_SEL_TOP_ISP10, 16, 1), + MUX(TOP_MOUT_ACLK_ISP1_266, "mout_aclk_isp1_266", mout_aclk_isp1_266_p, + MUX_SEL_TOP_ISP10, 20, 1), + + MUX(TOP_MOUT_SCLK_ISP1_SPI0, "mout_sclk_isp1_spi0", mout_sclk_isp_spi_p, + MUX_SEL_TOP_ISP11, 4, 1), + MUX(TOP_MOUT_SCLK_ISP1_SPI1, "mout_sclk_isp1_spi1", mout_sclk_isp_spi_p, + MUX_SEL_TOP_ISP11, 8, 1), + MUX(TOP_MOUT_SCLK_ISP1_UART, "mout_sclk_isp1_uart", + mout_sclk_isp_uart_p, + MUX_SEL_TOP_ISP11, 12, 1), + MUX(TOP_MOUT_SCLK_ISP1_SENSOR0, "mout_sclk_isp1_sensor0", + mout_sclk_isp_sensor_p, + MUX_SEL_TOP_ISP11, 16, 1), + MUX(TOP_MOUT_SCLK_ISP1_SENSOR1, "mout_sclk_isp1_sensor1", + mout_sclk_isp_sensor_p, + MUX_SEL_TOP_ISP11, 20, 1), + MUX(TOP_MOUT_SCLK_ISP1_SENSOR2, "mout_sclk_isp1_sensor2", + mout_sclk_isp_sensor_p, + MUX_SEL_TOP_ISP11, 24, 1), + + MUX(TOP_MOUT_MFC_BUSTOP_333, "mout_mfc_bustop_333", + mout_mfc_bustop_333_p, + MUX_SEL_TOP_MFC, 4, 1), + MUX(TOP_MOUT_ACLK_MFC_333, "mout_aclk_mfc_333", mout_aclk_mfc_333_p, + MUX_SEL_TOP_MFC, 8, 1), + + MUX(TOP_MOUT_G2D_BUSTOP_333, "mout_g2d_bustop_333", + mout_g2d_bustop_333_p, + MUX_SEL_TOP_G2D, 4, 1), + MUX(TOP_MOUT_ACLK_G2D_333, "mout_aclk_g2d_333", mout_aclk_g2d_333_p, + MUX_SEL_TOP_G2D, 8, 1), + + MUX(TOP_MOUT_M2M_MEDIATOP_400, "mout_m2m_mediatop_400", + mout_m2m_mediatop_400_p, + MUX_SEL_TOP_GSCL, 0, 1), + MUX(TOP_MOUT_ACLK_GSCL_400, "mout_aclk_gscl_400", + mout_aclk_gscl_400_p, + MUX_SEL_TOP_GSCL, 4, 1), + MUX(TOP_MOUT_GSCL_BUSTOP_333, "mout_gscl_bustop_333", + mout_gscl_bustop_333_p, + MUX_SEL_TOP_GSCL, 8, 1), + MUX(TOP_MOUT_ACLK_GSCL_333, "mout_aclk_gscl_333", + mout_aclk_gscl_333_p, + MUX_SEL_TOP_GSCL, 12, 1), + MUX(TOP_MOUT_GSCL_BUSTOP_FIMC, "mout_gscl_bustop_fimc", + mout_gscl_bustop_fimc_p, + MUX_SEL_TOP_GSCL, 16, 1), + MUX(TOP_MOUT_ACLK_GSCL_FIMC, "mout_aclk_gscl_fimc", + mout_aclk_gscl_fimc_p, + MUX_SEL_TOP_GSCL, 20, 1), +}; + +struct samsung_div_clock top_div_clks[] __initdata = { + DIV(TOP_DOUT_ACLK_G2D_333, "dout_aclk_g2d_333", "mout_aclk_g2d_333", + DIV_TOP_G2D_MFC, 0, 3), + DIV(TOP_DOUT_ACLK_MFC_333, "dout_aclk_mfc_333", "mout_aclk_mfc_333", + DIV_TOP_G2D_MFC, 4, 3), + + DIV(TOP_DOUT_ACLK_GSCL_333, "dout_aclk_gscl_333", "mout_aclk_gscl_333", + DIV_TOP_GSCL_ISP0, 0, 3), + DIV(TOP_DOUT_ACLK_GSCL_400, "dout_aclk_gscl_400", "mout_aclk_gscl_400", + DIV_TOP_GSCL_ISP0, 4, 3), + DIV(TOP_DOUT_ACLK_GSCL_FIMC, "dout_aclk_gscl_fimc", + "mout_aclk_gscl_fimc", DIV_TOP_GSCL_ISP0, 8, 3), + DIV(TOP_DOUT_SCLK_ISP1_SENSOR0_A, "dout_sclk_isp1_sensor0_a", + "mout_aclk_gscl_fimc", DIV_TOP_GSCL_ISP0, 16, 4), + DIV(TOP_DOUT_SCLK_ISP1_SENSOR1_A, "dout_sclk_isp1_sensor1_a", + "mout_aclk_gscl_400", DIV_TOP_GSCL_ISP0, 20, 4), + DIV(TOP_DOUT_SCLK_ISP1_SENSOR2_A, "dout_sclk_isp1_sensor2_a", + "mout_aclk_gscl_fimc", DIV_TOP_GSCL_ISP0, 24, 4), + + DIV(TOP_DOUT_ACLK_ISP1_266, "dout_aclk_isp1_266", "mout_aclk_isp1_266", + DIV_TOP_ISP10, 0, 3), + DIV(TOP_DOUT_ACLK_ISP1_400, "dout_aclk_isp1_400", "mout_aclk_isp1_400", + DIV_TOP_ISP10, 4, 3), + DIV(TOP_DOUT_SCLK_ISP1_SPI0_A, "dout_sclk_isp1_spi0_a", + "mout_sclk_isp1_spi0", DIV_TOP_ISP10, 12, 4), + DIV(TOP_DOUT_SCLK_ISP1_SPI0_B, "dout_sclk_isp1_spi0_b", + "dout_sclk_isp1_spi0_a", DIV_TOP_ISP10, 16, 8), + + DIV(TOP_DOUT_SCLK_ISP1_SPI1_A, "dout_sclk_isp1_spi1_a", + "mout_sclk_isp1_spi1", DIV_TOP_ISP11, 0, 4), + DIV(TOP_DOUT_SCLK_ISP1_SPI1_B, "dout_sclk_isp1_spi1_b", + "dout_sclk_isp1_spi1_a", DIV_TOP_ISP11, 4, 8), + DIV(TOP_DOUT_SCLK_ISP1_UART, "dout_sclk_isp1_uart", + "mout_sclk_isp1_uart", DIV_TOP_ISP11, 12, 4), + DIV(TOP_DOUT_SCLK_ISP1_SENSOR0_B, "dout_sclk_isp1_sensor0_b", + "dout_sclk_isp1_sensor0_a", DIV_TOP_ISP11, 16, 4), + DIV(TOP_DOUT_SCLK_ISP1_SENSOR1_B, "dout_sclk_isp1_sensor1_b", + "dout_sclk_isp1_sensor1_a", DIV_TOP_ISP11, 20, 4), + DIV(TOP_DOUT_SCLK_ISP1_SENSOR2_B, "dout_sclk_isp1_sensor2_b", + "dout_sclk_isp1_sensor2_a", DIV_TOP_ISP11, 24, 4), + + DIV(TOP_DOUTTOP__SCLK_HPM_TARGETCLK, "dout_sclk_hpm_targetclk", + "mout_bustop_pll_user", DIV_TOP_HPM, 0, 3), + + DIV(TOP_DOUT_ACLK_DISP_333, "dout_aclk_disp_333", "mout_aclk_disp_333", + DIV_TOP_DISP, 0, 3), + DIV(TOP_DOUT_ACLK_DISP_222, "dout_aclk_disp_222", "mout_aclk_disp_222", + DIV_TOP_DISP, 4, 3), + DIV(TOP_DOUT_SCLK_DISP_PIXEL, "dout_sclk_disp_pixel", + "mout_sclk_disp_pixel", DIV_TOP_DISP, 8, 3), + + DIV(TOP_DOUT_ACLK_BUS1_400, "dout_aclk_bus1_400", + "mout_bus1_bustop_400", DIV_TOP_BUS, 0, 3), + DIV(TOP_DOUT_ACLK_BUS1_100, "dout_aclk_bus1_100", + "mout_bus1_bustop_100", DIV_TOP_BUS, 4, 4), + DIV(TOP_DOUT_ACLK_BUS2_400, "dout_aclk_bus2_400", + "mout_bus2_bustop_400", DIV_TOP_BUS, 8, 3), + DIV(TOP_DOUT_ACLK_BUS2_100, "dout_aclk_bus2_100", + "mout_bus2_bustop_100", DIV_TOP_BUS, 12, 4), + DIV(TOP_DOUT_ACLK_BUS3_400, "dout_aclk_bus3_400", + "mout_bus3_bustop_400", DIV_TOP_BUS, 16, 3), + DIV(TOP_DOUT_ACLK_BUS3_100, "dout_aclk_bus3_100", + "mout_bus3_bustop_100", DIV_TOP_BUS, 20, 4), + DIV(TOP_DOUT_ACLK_BUS4_400, "dout_aclk_bus4_400", + "mout_bus4_bustop_400", DIV_TOP_BUS, 24, 3), + DIV(TOP_DOUT_ACLK_BUS4_100, "dout_aclk_bus4_100", + "mout_bus4_bustop_100", DIV_TOP_BUS, 28, 4), + + DIV(TOP_DOUT_SCLK_PERI_SPI0_A, "dout_sclk_peri_spi0_a", + "mout_sclk_peri_spi0_clk", DIV_TOP_PERI0, 4, 4), + DIV(TOP_DOUT_SCLK_PERI_SPI0_B, "dout_sclk_peri_spi0_b", + "dout_sclk_peri_spi0_a", DIV_TOP_PERI0, 8, 8), + DIV(TOP_DOUT_SCLK_PERI_SPI1_A, "dout_sclk_peri_spi1_a", + "mout_sclk_peri_spi1_clk", DIV_TOP_PERI0, 16, 4), + DIV(TOP_DOUT_SCLK_PERI_SPI1_B, "dout_sclk_peri_spi1_b", + "dout_sclk_peri_spi1_a", DIV_TOP_PERI0, 20, 8), + + DIV(TOP_DOUT_SCLK_PERI_SPI2_A, "dout_sclk_peri_spi2_a", + "mout_sclk_peri_spi2_clk", DIV_TOP_PERI1, 0, 4), + DIV(TOP_DOUT_SCLK_PERI_SPI2_B, "dout_sclk_peri_spi2_b", + "dout_sclk_peri_spi2_a", DIV_TOP_PERI1, 4, 8), + DIV(TOP_DOUT_SCLK_PERI_UART1, "dout_sclk_peri_uart1", + "mout_sclk_peri_uart1_uclk", DIV_TOP_PERI1, 16, 4), + DIV(TOP_DOUT_SCLK_PERI_UART2, "dout_sclk_peri_uart2", + "mout_sclk_peri_uart2_uclk", DIV_TOP_PERI1, 20, 4), + DIV(TOP_DOUT_SCLK_PERI_UART0, "dout_sclk_peri_uart0", + "mout_sclk_peri_uart0_uclk", DIV_TOP_PERI1, 24, 4), + + DIV(TOP_DOUT_ACLK_PERI_66, "dout_aclk_peri_66", "mout_bustop_pll_user", + DIV_TOP_PERI2, 20, 4), + DIV(TOP_DOUT_ACLK_PERI_AUD, "dout_aclk_peri_aud", + "mout_audtop_pll_user", DIV_TOP_PERI2, 24, 3), + + DIV(TOP_DOUT_ACLK_FSYS_200, "dout_aclk_fsys_200", + "mout_bustop_pll_user", DIV_TOP_FSYS0, 0, 3), + DIV(TOP_DOUT_SCLK_FSYS_USBDRD30_SUSPEND_CLK, + "dout_sclk_fsys_usbdrd30_suspend_clk", + "mout_sclk_fsys_usb", DIV_TOP_FSYS0, 4, 4), + DIV(TOP_DOUT_SCLK_FSYS_MMC0_SDCLKIN_A, "dout_sclk_fsys_mmc0_sdclkin_a", + "mout_sclk_fsys_mmc0_sdclkin_b", + DIV_TOP_FSYS0, 12, 4), + DIV(TOP_DOUT_SCLK_FSYS_MMC0_SDCLKIN_B, "dout_sclk_fsys_mmc0_sdclkin_b", + "dout_sclk_fsys_mmc0_sdclkin_a", + DIV_TOP_FSYS0, 16, 8), + + + DIV(TOP_DOUT_SCLK_FSYS_MMC1_SDCLKIN_A, "dout_sclk_fsys_mmc1_sdclkin_a", + "mout_sclk_fsys_mmc1_sdclkin_b", + DIV_TOP_FSYS1, 0, 4), + DIV(TOP_DOUT_SCLK_FSYS_MMC1_SDCLKIN_B, "dout_sclk_fsys_mmc1_sdclkin_b", + "dout_sclk_fsys_mmc1_sdclkin_a", + DIV_TOP_FSYS1, 4, 8), + DIV(TOP_DOUT_SCLK_FSYS_MMC2_SDCLKIN_A, "dout_sclk_fsys_mmc2_sdclkin_a", + "mout_sclk_fsys_mmc2_sdclkin_b", + DIV_TOP_FSYS1, 12, 4), + DIV(TOP_DOUT_SCLK_FSYS_MMC2_SDCLKIN_B, "dout_sclk_fsys_mmc2_sdclkin_b", + "dout_sclk_fsys_mmc2_sdclkin_a", + DIV_TOP_FSYS1, 16, 8), + +}; + +struct samsung_gate_clock top_gate_clks[] __initdata = { + GATE(TOP_SCLK_MMC0, "sclk_fsys_mmc0_sdclkin", + "dout_sclk_fsys_mmc0_sdclkin_b", + EN_SCLK_TOP, 7, CLK_SET_RATE_PARENT, 0), + GATE(TOP_SCLK_MMC1, "sclk_fsys_mmc1_sdclkin", + "dout_sclk_fsys_mmc1_sdclkin_b", + EN_SCLK_TOP, 8, CLK_SET_RATE_PARENT, 0), + GATE(TOP_SCLK_MMC2, "sclk_fsys_mmc2_sdclkin", + "dout_sclk_fsys_mmc2_sdclkin_b", + EN_SCLK_TOP, 9, CLK_SET_RATE_PARENT, 0), + GATE(TOP_SCLK_FIMD1, "sclk_disp_pixel", "dout_sclk_disp_pixel", + EN_ACLK_TOP, 10, CLK_IGNORE_UNUSED | + CLK_SET_RATE_PARENT, 0), +}; + +static struct samsung_pll_clock top_pll_clks[] __initdata = { + PLL(pll_2550xx, TOP_FOUT_DISP_PLL, "fout_disp_pll", "fin_pll", + DISP_PLL_LOCK, DISP_PLL_CON0, + pll2550_24mhz_tbl), + PLL(pll_2650xx, TOP_FOUT_AUD_PLL, "fout_aud_pll", "fin_pll", + AUD_PLL_LOCK, AUD_PLL_CON0, + pll2650_24mhz_tbl), +}; + +static void __init exynos5260_clk_top_init(struct device_node *np) +{ + struct exynos5260_cmu_info cmu = {0}; + + cmu.pll_clks = top_pll_clks; + cmu.nr_pll_clks = ARRAY_SIZE(top_pll_clks); + cmu.mux_clks = top_mux_clks; + cmu.nr_mux_clks = ARRAY_SIZE(top_mux_clks); + cmu.div_clks = top_div_clks; + cmu.nr_div_clks = ARRAY_SIZE(top_div_clks); + cmu.gate_clks = top_gate_clks; + cmu.nr_gate_clks = ARRAY_SIZE(top_gate_clks); + cmu.fixed_clks = fixed_rate_clks; + cmu.nr_fixed_clks = ARRAY_SIZE(fixed_rate_clks); + cmu.nr_clk_ids = TOP_NR_CLK; + cmu.clk_regs = top_clk_regs; + cmu.nr_clk_regs = ARRAY_SIZE(top_clk_regs); + + exynos5260_cmu_register_one(np, &cmu); +} + +CLK_OF_DECLARE(exynos5260_clk_top, "samsung,exynos5260-clock-top", + exynos5260_clk_top_init); diff --git a/drivers/clk/samsung/clk-exynos5260.h b/drivers/clk/samsung/clk-exynos5260.h new file mode 100644 index 00000000000..d739716d6ea --- /dev/null +++ b/drivers/clk/samsung/clk-exynos5260.h @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Rahul Sharma <rahul.sharma@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for Exynos5260 SoC. + */ + +#ifndef __CLK_EXYNOS5260_H +#define __CLK_EXYNOS5260_H + +/* +*Registers for CMU_AUD +*/ +#define MUX_SEL_AUD 0x0200 +#define MUX_ENABLE_AUD 0x0300 +#define MUX_STAT_AUD 0x0400 +#define MUX_IGNORE_AUD 0x0500 +#define DIV_AUD0 0x0600 +#define DIV_AUD1 0x0604 +#define DIV_STAT_AUD0 0x0700 +#define DIV_STAT_AUD1 0x0704 +#define EN_ACLK_AUD 0x0800 +#define EN_PCLK_AUD 0x0900 +#define EN_SCLK_AUD 0x0a00 +#define EN_IP_AUD 0x0b00 + +/* +*Registers for CMU_DISP +*/ +#define MUX_SEL_DISP0 0x0200 +#define MUX_SEL_DISP1 0x0204 +#define MUX_SEL_DISP2 0x0208 +#define MUX_SEL_DISP3 0x020C +#define MUX_SEL_DISP4 0x0210 +#define MUX_ENABLE_DISP0 0x0300 +#define MUX_ENABLE_DISP1 0x0304 +#define MUX_ENABLE_DISP2 0x0308 +#define MUX_ENABLE_DISP3 0x030c +#define MUX_ENABLE_DISP4 0x0310 +#define MUX_STAT_DISP0 0x0400 +#define MUX_STAT_DISP1 0x0404 +#define MUX_STAT_DISP2 0x0408 +#define MUX_STAT_DISP3 0x040c +#define MUX_STAT_DISP4 0x0410 +#define MUX_IGNORE_DISP0 0x0500 +#define MUX_IGNORE_DISP1 0x0504 +#define MUX_IGNORE_DISP2 0x0508 +#define MUX_IGNORE_DISP3 0x050c +#define MUX_IGNORE_DISP4 0x0510 +#define DIV_DISP 0x0600 +#define DIV_STAT_DISP 0x0700 +#define EN_ACLK_DISP 0x0800 +#define EN_PCLK_DISP 0x0900 +#define EN_SCLK_DISP0 0x0a00 +#define EN_SCLK_DISP1 0x0a04 +#define EN_IP_DISP 0x0b00 +#define EN_IP_DISP_BUS 0x0b04 + + +/* +*Registers for CMU_EGL +*/ +#define EGL_PLL_LOCK 0x0000 +#define EGL_DPLL_LOCK 0x0004 +#define EGL_PLL_CON0 0x0100 +#define EGL_PLL_CON1 0x0104 +#define EGL_PLL_FREQ_DET 0x010c +#define EGL_DPLL_CON0 0x0110 +#define EGL_DPLL_CON1 0x0114 +#define EGL_DPLL_FREQ_DET 0x011c +#define MUX_SEL_EGL 0x0200 +#define MUX_ENABLE_EGL 0x0300 +#define MUX_STAT_EGL 0x0400 +#define DIV_EGL 0x0600 +#define DIV_EGL_PLL_FDET 0x0604 +#define DIV_STAT_EGL 0x0700 +#define DIV_STAT_EGL_PLL_FDET 0x0704 +#define EN_ACLK_EGL 0x0800 +#define EN_PCLK_EGL 0x0900 +#define EN_SCLK_EGL 0x0a00 +#define EN_IP_EGL 0x0b00 +#define CLKOUT_CMU_EGL 0x0c00 +#define CLKOUT_CMU_EGL_DIV_STAT 0x0c04 +#define ARMCLK_STOPCTRL 0x1000 +#define EAGLE_EMA_CTRL 0x1008 +#define EAGLE_EMA_STATUS 0x100c +#define PWR_CTRL 0x1020 +#define PWR_CTRL2 0x1024 +#define CLKSTOP_CTRL 0x1028 +#define INTR_SPREAD_EN 0x1080 +#define INTR_SPREAD_USE_STANDBYWFI 0x1084 +#define INTR_SPREAD_BLOCKING_DURATION 0x1088 +#define CMU_EGL_SPARE0 0x2000 +#define CMU_EGL_SPARE1 0x2004 +#define CMU_EGL_SPARE2 0x2008 +#define CMU_EGL_SPARE3 0x200c +#define CMU_EGL_SPARE4 0x2010 + +/* +*Registers for CMU_FSYS +*/ + +#define MUX_SEL_FSYS0 0x0200 +#define MUX_SEL_FSYS1 0x0204 +#define MUX_ENABLE_FSYS0 0x0300 +#define MUX_ENABLE_FSYS1 0x0304 +#define MUX_STAT_FSYS0 0x0400 +#define MUX_STAT_FSYS1 0x0404 +#define MUX_IGNORE_FSYS0 0x0500 +#define MUX_IGNORE_FSYS1 0x0504 +#define EN_ACLK_FSYS 0x0800 +#define EN_ACLK_FSYS_SECURE_RTIC 0x0804 +#define EN_ACLK_FSYS_SECURE_SMMU_RTIC 0x0808 +#define EN_PCLK_FSYS 0x0900 +#define EN_SCLK_FSYS 0x0a00 +#define EN_IP_FSYS 0x0b00 +#define EN_IP_FSYS_SECURE_RTIC 0x0b04 +#define EN_IP_FSYS_SECURE_SMMU_RTIC 0x0b08 + +/* +*Registers for CMU_G2D +*/ + +#define MUX_SEL_G2D 0x0200 +#define MUX_ENABLE_G2D 0x0300 +#define MUX_STAT_G2D 0x0400 +#define DIV_G2D 0x0600 +#define DIV_STAT_G2D 0x0700 +#define EN_ACLK_G2D 0x0800 +#define EN_ACLK_G2D_SECURE_SSS 0x0804 +#define EN_ACLK_G2D_SECURE_SLIM_SSS 0x0808 +#define EN_ACLK_G2D_SECURE_SMMU_SLIM_SSS 0x080c +#define EN_ACLK_G2D_SECURE_SMMU_SSS 0x0810 +#define EN_ACLK_G2D_SECURE_SMMU_MDMA 0x0814 +#define EN_ACLK_G2D_SECURE_SMMU_G2D 0x0818 +#define EN_PCLK_G2D 0x0900 +#define EN_PCLK_G2D_SECURE_SMMU_SLIM_SSS 0x0904 +#define EN_PCLK_G2D_SECURE_SMMU_SSS 0x0908 +#define EN_PCLK_G2D_SECURE_SMMU_MDMA 0x090c +#define EN_PCLK_G2D_SECURE_SMMU_G2D 0x0910 +#define EN_IP_G2D 0x0b00 +#define EN_IP_G2D_SECURE_SSS 0x0b04 +#define EN_IP_G2D_SECURE_SLIM_SSS 0x0b08 +#define EN_IP_G2D_SECURE_SMMU_SLIM_SSS 0x0b0c +#define EN_IP_G2D_SECURE_SMMU_SSS 0x0b10 +#define EN_IP_G2D_SECURE_SMMU_MDMA 0x0b14 +#define EN_IP_G2D_SECURE_SMMU_G2D 0x0b18 + +/* +*Registers for CMU_G3D +*/ + +#define G3D_PLL_LOCK 0x0000 +#define G3D_PLL_CON0 0x0100 +#define G3D_PLL_CON1 0x0104 +#define G3D_PLL_FDET 0x010c +#define MUX_SEL_G3D 0x0200 +#define MUX_EN_G3D 0x0300 +#define MUX_STAT_G3D 0x0400 +#define MUX_IGNORE_G3D 0x0500 +#define DIV_G3D 0x0600 +#define DIV_G3D_PLL_FDET 0x0604 +#define DIV_STAT_G3D 0x0700 +#define DIV_STAT_G3D_PLL_FDET 0x0704 +#define EN_ACLK_G3D 0x0800 +#define EN_PCLK_G3D 0x0900 +#define EN_SCLK_G3D 0x0a00 +#define EN_IP_G3D 0x0b00 +#define CLKOUT_CMU_G3D 0x0c00 +#define CLKOUT_CMU_G3D_DIV_STAT 0x0c04 +#define G3DCLK_STOPCTRL 0x1000 +#define G3D_EMA_CTRL 0x1008 +#define G3D_EMA_STATUS 0x100c + +/* +*Registers for CMU_GSCL +*/ + +#define MUX_SEL_GSCL 0x0200 +#define MUX_EN_GSCL 0x0300 +#define MUX_STAT_GSCL 0x0400 +#define MUX_IGNORE_GSCL 0x0500 +#define DIV_GSCL 0x0600 +#define DIV_STAT_GSCL 0x0700 +#define EN_ACLK_GSCL 0x0800 +#define EN_ACLK_GSCL_FIMC 0x0804 +#define EN_ACLK_GSCL_SECURE_SMMU_GSCL0 0x0808 +#define EN_ACLK_GSCL_SECURE_SMMU_GSCL1 0x080c +#define EN_ACLK_GSCL_SECURE_SMMU_MSCL0 0x0810 +#define EN_ACLK_GSCL_SECURE_SMMU_MSCL1 0x0814 +#define EN_PCLK_GSCL 0x0900 +#define EN_PCLK_GSCL_FIMC 0x0904 +#define EN_PCLK_GSCL_SECURE_SMMU_GSCL0 0x0908 +#define EN_PCLK_GSCL_SECURE_SMMU_GSCL1 0x090c +#define EN_PCLK_GSCL_SECURE_SMMU_MSCL0 0x0910 +#define EN_PCLK_GSCL_SECURE_SMMU_MSCL1 0x0914 +#define EN_SCLK_GSCL 0x0a00 +#define EN_SCLK_GSCL_FIMC 0x0a04 +#define EN_IP_GSCL 0x0b00 +#define EN_IP_GSCL_FIMC 0x0b04 +#define EN_IP_GSCL_SECURE_SMMU_GSCL0 0x0b08 +#define EN_IP_GSCL_SECURE_SMMU_GSCL1 0x0b0c +#define EN_IP_GSCL_SECURE_SMMU_MSCL0 0x0b10 +#define EN_IP_GSCL_SECURE_SMMU_MSCL1 0x0b14 + +/* +*Registers for CMU_ISP +*/ +#define MUX_SEL_ISP0 0x0200 +#define MUX_SEL_ISP1 0x0204 +#define MUX_ENABLE_ISP0 0x0300 +#define MUX_ENABLE_ISP1 0x0304 +#define MUX_STAT_ISP0 0x0400 +#define MUX_STAT_ISP1 0x0404 +#define MUX_IGNORE_ISP0 0x0500 +#define MUX_IGNORE_ISP1 0x0504 +#define DIV_ISP 0x0600 +#define DIV_STAT_ISP 0x0700 +#define EN_ACLK_ISP0 0x0800 +#define EN_ACLK_ISP1 0x0804 +#define EN_PCLK_ISP0 0x0900 +#define EN_PCLK_ISP1 0x0904 +#define EN_SCLK_ISP 0x0a00 +#define EN_IP_ISP0 0x0b00 +#define EN_IP_ISP1 0x0b04 + +/* +*Registers for CMU_KFC +*/ +#define KFC_PLL_LOCK 0x0000 +#define KFC_PLL_CON0 0x0100 +#define KFC_PLL_CON1 0x0104 +#define KFC_PLL_FDET 0x010c +#define MUX_SEL_KFC0 0x0200 +#define MUX_SEL_KFC2 0x0208 +#define MUX_ENABLE_KFC0 0x0300 +#define MUX_ENABLE_KFC2 0x0308 +#define MUX_STAT_KFC0 0x0400 +#define MUX_STAT_KFC2 0x0408 +#define DIV_KFC 0x0600 +#define DIV_KFC_PLL_FDET 0x0604 +#define DIV_STAT_KFC 0x0700 +#define DIV_STAT_KFC_PLL_FDET 0x0704 +#define EN_ACLK_KFC 0x0800 +#define EN_PCLK_KFC 0x0900 +#define EN_SCLK_KFC 0x0a00 +#define EN_IP_KFC 0x0b00 +#define CLKOUT_CMU_KFC 0x0c00 +#define CLKOUT_CMU_KFC_DIV_STAT 0x0c04 +#define ARMCLK_STOPCTRL_KFC 0x1000 +#define ARM_EMA_CTRL 0x1008 +#define ARM_EMA_STATUS 0x100c +#define PWR_CTRL_KFC 0x1020 +#define PWR_CTRL2_KFC 0x1024 +#define CLKSTOP_CTRL_KFC 0x1028 +#define INTR_SPREAD_ENABLE_KFC 0x1080 +#define INTR_SPREAD_USE_STANDBYWFI_KFC 0x1084 +#define INTR_SPREAD_BLOCKING_DURATION_KFC 0x1088 +#define CMU_KFC_SPARE0 0x2000 +#define CMU_KFC_SPARE1 0x2004 +#define CMU_KFC_SPARE2 0x2008 +#define CMU_KFC_SPARE3 0x200c +#define CMU_KFC_SPARE4 0x2010 + +/* +*Registers for CMU_MFC +*/ +#define MUX_SEL_MFC 0x0200 +#define MUX_ENABLE_MFC 0x0300 +#define MUX_STAT_MFC 0x0400 +#define DIV_MFC 0x0600 +#define DIV_STAT_MFC 0x0700 +#define EN_ACLK_MFC 0x0800 +#define EN_ACLK_SECURE_SMMU2_MFC 0x0804 +#define EN_PCLK_MFC 0x0900 +#define EN_PCLK_SECURE_SMMU2_MFC 0x0904 +#define EN_IP_MFC 0x0b00 +#define EN_IP_MFC_SECURE_SMMU2_MFC 0x0b04 + +/* +*Registers for CMU_MIF +*/ +#define MEM_PLL_LOCK 0x0000 +#define BUS_PLL_LOCK 0x0004 +#define MEDIA_PLL_LOCK 0x0008 +#define MEM_PLL_CON0 0x0100 +#define MEM_PLL_CON1 0x0104 +#define MEM_PLL_FDET 0x010c +#define BUS_PLL_CON0 0x0110 +#define BUS_PLL_CON1 0x0114 +#define BUS_PLL_FDET 0x011c +#define MEDIA_PLL_CON0 0x0120 +#define MEDIA_PLL_CON1 0x0124 +#define MEDIA_PLL_FDET 0x012c +#define MUX_SEL_MIF 0x0200 +#define MUX_ENABLE_MIF 0x0300 +#define MUX_STAT_MIF 0x0400 +#define MUX_IGNORE_MIF 0x0500 +#define DIV_MIF 0x0600 +#define DIV_MIF_PLL_FDET 0x0604 +#define DIV_STAT_MIF 0x0700 +#define DIV_STAT_MIF_PLL_FDET 0x0704 +#define EN_ACLK_MIF 0x0800 +#define EN_ACLK_MIF_SECURE_DREX1_TZ 0x0804 +#define EN_ACLK_MIF_SECURE_DREX0_TZ 0x0808 +#define EN_ACLK_MIF_SECURE_INTMEM 0x080c +#define EN_PCLK_MIF 0x0900 +#define EN_PCLK_MIF_SECURE_MONOCNT 0x0904 +#define EN_PCLK_MIF_SECURE_RTC_APBIF 0x0908 +#define EN_PCLK_MIF_SECURE_DREX1_TZ 0x090c +#define EN_PCLK_MIF_SECURE_DREX0_TZ 0x0910 +#define EN_SCLK_MIF 0x0a00 +#define EN_IP_MIF 0x0b00 +#define EN_IP_MIF_SECURE_MONOCNT 0x0b04 +#define EN_IP_MIF_SECURE_RTC_APBIF 0x0b08 +#define EN_IP_MIF_SECURE_DREX1_TZ 0x0b0c +#define EN_IP_MIF_SECURE_DREX0_TZ 0x0b10 +#define EN_IP_MIF_SECURE_INTEMEM 0x0b14 +#define CLKOUT_CMU_MIF_DIV_STAT 0x0c04 +#define DREX_FREQ_CTRL 0x1000 +#define PAUSE 0x1004 +#define DDRPHY_LOCK_CTRL 0x1008 +#define CLKOUT_CMU_MIF 0xcb00 + +/* +*Registers for CMU_PERI +*/ +#define MUX_SEL_PERI 0x0200 +#define MUX_SEL_PERI1 0x0204 +#define MUX_ENABLE_PERI 0x0300 +#define MUX_ENABLE_PERI1 0x0304 +#define MUX_STAT_PERI 0x0400 +#define MUX_STAT_PERI1 0x0404 +#define MUX_IGNORE_PERI 0x0500 +#define MUX_IGNORE_PERI1 0x0504 +#define DIV_PERI 0x0600 +#define DIV_STAT_PERI 0x0700 +#define EN_PCLK_PERI0 0x0800 +#define EN_PCLK_PERI1 0x0804 +#define EN_PCLK_PERI2 0x0808 +#define EN_PCLK_PERI3 0x080c +#define EN_PCLK_PERI_SECURE_CHIPID 0x0810 +#define EN_PCLK_PERI_SECURE_PROVKEY0 0x0814 +#define EN_PCLK_PERI_SECURE_PROVKEY1 0x0818 +#define EN_PCLK_PERI_SECURE_SECKEY 0x081c +#define EN_PCLK_PERI_SECURE_ANTIRBKCNT 0x0820 +#define EN_PCLK_PERI_SECURE_TOP_RTC 0x0824 +#define EN_PCLK_PERI_SECURE_TZPC 0x0828 +#define EN_SCLK_PERI 0x0a00 +#define EN_SCLK_PERI_SECURE_TOP_RTC 0x0a04 +#define EN_IP_PERI0 0x0b00 +#define EN_IP_PERI1 0x0b04 +#define EN_IP_PERI2 0x0b08 +#define EN_IP_PERI_SECURE_CHIPID 0x0b0c +#define EN_IP_PERI_SECURE_PROVKEY0 0x0b10 +#define EN_IP_PERI_SECURE_PROVKEY1 0x0b14 +#define EN_IP_PERI_SECURE_SECKEY 0x0b18 +#define EN_IP_PERI_SECURE_ANTIRBKCNT 0x0b1c +#define EN_IP_PERI_SECURE_TOP_RTC 0x0b20 +#define EN_IP_PERI_SECURE_TZPC 0x0b24 + +/* +*Registers for CMU_TOP +*/ +#define DISP_PLL_LOCK 0x0000 +#define AUD_PLL_LOCK 0x0004 +#define DISP_PLL_CON0 0x0100 +#define DISP_PLL_CON1 0x0104 +#define DISP_PLL_FDET 0x0108 +#define AUD_PLL_CON0 0x0110 +#define AUD_PLL_CON1 0x0114 +#define AUD_PLL_CON2 0x0118 +#define AUD_PLL_FDET 0x011c +#define MUX_SEL_TOP_PLL0 0x0200 +#define MUX_SEL_TOP_MFC 0x0204 +#define MUX_SEL_TOP_G2D 0x0208 +#define MUX_SEL_TOP_GSCL 0x020c +#define MUX_SEL_TOP_ISP10 0x0214 +#define MUX_SEL_TOP_ISP11 0x0218 +#define MUX_SEL_TOP_DISP0 0x021c +#define MUX_SEL_TOP_DISP1 0x0220 +#define MUX_SEL_TOP_BUS 0x0224 +#define MUX_SEL_TOP_PERI0 0x0228 +#define MUX_SEL_TOP_PERI1 0x022c +#define MUX_SEL_TOP_FSYS 0x0230 +#define MUX_ENABLE_TOP_PLL0 0x0300 +#define MUX_ENABLE_TOP_MFC 0x0304 +#define MUX_ENABLE_TOP_G2D 0x0308 +#define MUX_ENABLE_TOP_GSCL 0x030c +#define MUX_ENABLE_TOP_ISP10 0x0314 +#define MUX_ENABLE_TOP_ISP11 0x0318 +#define MUX_ENABLE_TOP_DISP0 0x031c +#define MUX_ENABLE_TOP_DISP1 0x0320 +#define MUX_ENABLE_TOP_BUS 0x0324 +#define MUX_ENABLE_TOP_PERI0 0x0328 +#define MUX_ENABLE_TOP_PERI1 0x032c +#define MUX_ENABLE_TOP_FSYS 0x0330 +#define MUX_STAT_TOP_PLL0 0x0400 +#define MUX_STAT_TOP_MFC 0x0404 +#define MUX_STAT_TOP_G2D 0x0408 +#define MUX_STAT_TOP_GSCL 0x040c +#define MUX_STAT_TOP_ISP10 0x0414 +#define MUX_STAT_TOP_ISP11 0x0418 +#define MUX_STAT_TOP_DISP0 0x041c +#define MUX_STAT_TOP_DISP1 0x0420 +#define MUX_STAT_TOP_BUS 0x0424 +#define MUX_STAT_TOP_PERI0 0x0428 +#define MUX_STAT_TOP_PERI1 0x042c +#define MUX_STAT_TOP_FSYS 0x0430 +#define MUX_IGNORE_TOP_PLL0 0x0500 +#define MUX_IGNORE_TOP_MFC 0x0504 +#define MUX_IGNORE_TOP_G2D 0x0508 +#define MUX_IGNORE_TOP_GSCL 0x050c +#define MUX_IGNORE_TOP_ISP10 0x0514 +#define MUX_IGNORE_TOP_ISP11 0x0518 +#define MUX_IGNORE_TOP_DISP0 0x051c +#define MUX_IGNORE_TOP_DISP1 0x0520 +#define MUX_IGNORE_TOP_BUS 0x0524 +#define MUX_IGNORE_TOP_PERI0 0x0528 +#define MUX_IGNORE_TOP_PERI1 0x052c +#define MUX_IGNORE_TOP_FSYS 0x0530 +#define DIV_TOP_G2D_MFC 0x0600 +#define DIV_TOP_GSCL_ISP0 0x0604 +#define DIV_TOP_ISP10 0x0608 +#define DIV_TOP_ISP11 0x060c +#define DIV_TOP_DISP 0x0610 +#define DIV_TOP_BUS 0x0614 +#define DIV_TOP_PERI0 0x0618 +#define DIV_TOP_PERI1 0x061c +#define DIV_TOP_PERI2 0x0620 +#define DIV_TOP_FSYS0 0x0624 +#define DIV_TOP_FSYS1 0x0628 +#define DIV_TOP_HPM 0x062c +#define DIV_TOP_PLL_FDET 0x0630 +#define DIV_STAT_TOP_G2D_MFC 0x0700 +#define DIV_STAT_TOP_GSCL_ISP0 0x0704 +#define DIV_STAT_TOP_ISP10 0x0708 +#define DIV_STAT_TOP_ISP11 0x070c +#define DIV_STAT_TOP_DISP 0x0710 +#define DIV_STAT_TOP_BUS 0x0714 +#define DIV_STAT_TOP_PERI0 0x0718 +#define DIV_STAT_TOP_PERI1 0x071c +#define DIV_STAT_TOP_PERI2 0x0720 +#define DIV_STAT_TOP_FSYS0 0x0724 +#define DIV_STAT_TOP_FSYS1 0x0728 +#define DIV_STAT_TOP_HPM 0x072c +#define DIV_STAT_TOP_PLL_FDET 0x0730 +#define EN_ACLK_TOP 0x0800 +#define EN_SCLK_TOP 0x0a00 +#define EN_IP_TOP 0x0b00 +#define CLKOUT_CMU_TOP 0x0c00 +#define CLKOUT_CMU_TOP_DIV_STAT 0x0c04 + +#endif /*__CLK_EXYNOS5260_H */ + diff --git a/drivers/clk/samsung/clk-exynos5410.c b/drivers/clk/samsung/clk-exynos5410.c new file mode 100644 index 00000000000..c9505ab9ee7 --- /dev/null +++ b/drivers/clk/samsung/clk-exynos5410.c @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2013 Samsung Electronics Co., Ltd. + * Author: Tarek Dakhran <t.dakhran@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for Exynos5410 SoC. +*/ + +#include <dt-bindings/clock/exynos5410.h> + +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> + +#include "clk.h" + +#define APLL_LOCK 0x0 +#define APLL_CON0 0x100 +#define CPLL_LOCK 0x10020 +#define CPLL_CON0 0x10120 +#define MPLL_LOCK 0x4000 +#define MPLL_CON0 0x4100 +#define BPLL_LOCK 0x20010 +#define BPLL_CON0 0x20110 +#define KPLL_LOCK 0x28000 +#define KPLL_CON0 0x28100 + +#define SRC_CPU 0x200 +#define DIV_CPU0 0x500 +#define SRC_CPERI1 0x4204 +#define DIV_TOP0 0x10510 +#define DIV_TOP1 0x10514 +#define DIV_FSYS1 0x1054c +#define DIV_FSYS2 0x10550 +#define DIV_PERIC0 0x10558 +#define SRC_TOP0 0x10210 +#define SRC_TOP1 0x10214 +#define SRC_TOP2 0x10218 +#define SRC_FSYS 0x10244 +#define SRC_PERIC0 0x10250 +#define SRC_MASK_FSYS 0x10340 +#define SRC_MASK_PERIC0 0x10350 +#define GATE_BUS_FSYS0 0x10740 +#define GATE_IP_FSYS 0x10944 +#define GATE_IP_PERIC 0x10950 +#define GATE_IP_PERIS 0x10960 +#define SRC_CDREX 0x20200 +#define SRC_KFC 0x28200 +#define DIV_KFC0 0x28500 + +/* list of PLLs */ +enum exynos5410_plls { + apll, cpll, mpll, + bpll, kpll, + nr_plls /* number of PLLs */ +}; + +/* list of all parent clocks */ +PNAME(apll_p) = { "fin_pll", "fout_apll", }; +PNAME(bpll_p) = { "fin_pll", "fout_bpll", }; +PNAME(cpll_p) = { "fin_pll", "fout_cpll" }; +PNAME(mpll_p) = { "fin_pll", "fout_mpll", }; +PNAME(kpll_p) = { "fin_pll", "fout_kpll", }; + +PNAME(mout_cpu_p) = { "mout_apll", "sclk_mpll", }; +PNAME(mout_kfc_p) = { "mout_kpll", "sclk_mpll", }; + +PNAME(mpll_user_p) = { "fin_pll", "sclk_mpll", }; +PNAME(bpll_user_p) = { "fin_pll", "sclk_bpll", }; +PNAME(mpll_bpll_p) = { "sclk_mpll_muxed", "sclk_bpll_muxed", }; + +PNAME(group2_p) = { "fin_pll", "fin_pll", "none", "none", + "none", "none", "sclk_mpll_bpll", + "none", "none", "sclk_cpll" }; + +static struct samsung_mux_clock exynos5410_mux_clks[] __initdata = { + MUX(0, "mout_apll", apll_p, SRC_CPU, 0, 1), + MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1), + + MUX(0, "mout_kpll", kpll_p, SRC_KFC, 0, 1), + MUX(0, "mout_kfc", mout_kfc_p, SRC_KFC, 16, 1), + + MUX(0, "sclk_mpll", mpll_p, SRC_CPERI1, 8, 1), + MUX(0, "sclk_mpll_muxed", mpll_user_p, SRC_TOP2, 20, 1), + + MUX(0, "sclk_bpll", bpll_p, SRC_CDREX, 0, 1), + MUX(0, "sclk_bpll_muxed", bpll_user_p, SRC_TOP2, 24, 1), + + MUX(0, "sclk_cpll", cpll_p, SRC_TOP2, 8, 1), + + MUX(0, "sclk_mpll_bpll", mpll_bpll_p, SRC_TOP1, 20, 1), + + MUX(0, "mout_mmc0", group2_p, SRC_FSYS, 0, 4), + MUX(0, "mout_mmc1", group2_p, SRC_FSYS, 4, 4), + MUX(0, "mout_mmc2", group2_p, SRC_FSYS, 8, 4), + + MUX(0, "mout_uart0", group2_p, SRC_PERIC0, 0, 4), + MUX(0, "mout_uart1", group2_p, SRC_PERIC0, 4, 4), + MUX(0, "mout_uart2", group2_p, SRC_PERIC0, 8, 4), + + MUX(0, "mout_aclk200", mpll_bpll_p, SRC_TOP0, 12, 1), + MUX(0, "mout_aclk400", mpll_bpll_p, SRC_TOP0, 20, 1), +}; + +static struct samsung_div_clock exynos5410_div_clks[] __initdata = { + DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3), + DIV(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3), + + DIV(0, "div_acp", "div_arm2", DIV_CPU0, 8, 3), + DIV(0, "div_cpud", "div_arm2", DIV_CPU0, 4, 3), + DIV(0, "div_atb", "div_arm2", DIV_CPU0, 16, 3), + DIV(0, "pclk_dbg", "div_arm2", DIV_CPU0, 20, 3), + + DIV(0, "div_kfc", "mout_kfc", DIV_KFC0, 0, 3), + DIV(0, "div_aclk", "div_kfc", DIV_KFC0, 4, 3), + DIV(0, "div_pclk", "div_kfc", DIV_KFC0, 20, 3), + + DIV(0, "aclk66_pre", "sclk_mpll_muxed", DIV_TOP1, 24, 3), + DIV(0, "aclk66", "aclk66_pre", DIV_TOP0, 0, 3), + + DIV(0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4), + DIV(0, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4), + DIV(0, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4), + + DIV_F(0, "div_mmc_pre0", "div_mmc0", + DIV_FSYS1, 8, 8, CLK_SET_RATE_PARENT, 0), + DIV_F(0, "div_mmc_pre1", "div_mmc1", + DIV_FSYS1, 24, 8, CLK_SET_RATE_PARENT, 0), + DIV_F(0, "div_mmc_pre2", "div_mmc2", + DIV_FSYS2, 8, 8, CLK_SET_RATE_PARENT, 0), + + DIV(0, "div_uart0", "mout_uart0", DIV_PERIC0, 0, 4), + DIV(0, "div_uart1", "mout_uart1", DIV_PERIC0, 4, 4), + DIV(0, "div_uart2", "mout_uart2", DIV_PERIC0, 8, 4), + DIV(0, "div_uart3", "mout_uart3", DIV_PERIC0, 12, 4), + + DIV(0, "aclk200", "mout_aclk200", DIV_TOP0, 12, 3), + DIV(0, "aclk400", "mout_aclk400", DIV_TOP0, 24, 3), +}; + +static struct samsung_gate_clock exynos5410_gate_clks[] __initdata = { + GATE(CLK_MCT, "mct", "aclk66", GATE_IP_PERIS, 18, 0, 0), + + GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc_pre0", + SRC_MASK_FSYS, 0, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc_pre1", + SRC_MASK_FSYS, 4, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc_pre2", + SRC_MASK_FSYS, 8, CLK_SET_RATE_PARENT, 0), + + GATE(CLK_MMC0, "sdmmc0", "aclk200", GATE_BUS_FSYS0, 12, 0, 0), + GATE(CLK_MMC1, "sdmmc1", "aclk200", GATE_BUS_FSYS0, 13, 0, 0), + GATE(CLK_MMC2, "sdmmc2", "aclk200", GATE_BUS_FSYS0, 14, 0, 0), + + GATE(CLK_UART0, "uart0", "aclk66", GATE_IP_PERIC, 0, 0, 0), + GATE(CLK_UART1, "uart1", "aclk66", GATE_IP_PERIC, 1, 0, 0), + GATE(CLK_UART2, "uart2", "aclk66", GATE_IP_PERIC, 2, 0, 0), + + GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0", + SRC_MASK_PERIC0, 0, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1", + SRC_MASK_PERIC0, 4, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2", + SRC_MASK_PERIC0, 8, CLK_SET_RATE_PARENT, 0), +}; + +static struct samsung_pll_clock exynos5410_plls[nr_plls] __initdata = { + [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK, + APLL_CON0, NULL), + [cpll] = PLL(pll_35xx, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK, + CPLL_CON0, NULL), + [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", MPLL_LOCK, + MPLL_CON0, NULL), + [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", BPLL_LOCK, + BPLL_CON0, NULL), + [kpll] = PLL(pll_35xx, CLK_FOUT_KPLL, "fout_kpll", "fin_pll", KPLL_LOCK, + KPLL_CON0, NULL), +}; + +/* register exynos5410 clocks */ +static void __init exynos5410_clk_init(struct device_node *np) +{ + struct samsung_clk_provider *ctx; + void __iomem *reg_base; + + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: failed to map registers\n", __func__); + + ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); + + samsung_clk_register_pll(ctx, exynos5410_plls, + ARRAY_SIZE(exynos5410_plls), reg_base); + + samsung_clk_register_mux(ctx, exynos5410_mux_clks, + ARRAY_SIZE(exynos5410_mux_clks)); + samsung_clk_register_div(ctx, exynos5410_div_clks, + ARRAY_SIZE(exynos5410_div_clks)); + samsung_clk_register_gate(ctx, exynos5410_gate_clks, + ARRAY_SIZE(exynos5410_gate_clks)); + + pr_debug("Exynos5410: clock setup completed.\n"); +} +CLK_OF_DECLARE(exynos5410_clk, "samsung,exynos5410-clock", exynos5410_clk_init); diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index 60b26819bed..9d7d7eed03f 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -27,18 +27,24 @@ #define DIV_CPU1 0x504 #define GATE_BUS_CPU 0x700 #define GATE_SCLK_CPU 0x800 +#define CLKOUT_CMU_CPU 0xa00 +#define GATE_IP_G2D 0x8800 #define CPLL_LOCK 0x10020 #define DPLL_LOCK 0x10030 #define EPLL_LOCK 0x10040 #define RPLL_LOCK 0x10050 #define IPLL_LOCK 0x10060 #define SPLL_LOCK 0x10070 -#define VPLL_LOCK 0x10070 +#define VPLL_LOCK 0x10080 #define MPLL_LOCK 0x10090 #define CPLL_CON0 0x10120 #define DPLL_CON0 0x10128 #define EPLL_CON0 0x10130 +#define EPLL_CON1 0x10134 +#define EPLL_CON2 0x10138 #define RPLL_CON0 0x10140 +#define RPLL_CON1 0x10144 +#define RPLL_CON2 0x10148 #define IPLL_CON0 0x10150 #define SPLL_CON0 0x10160 #define VPLL_CON0 0x10170 @@ -51,21 +57,31 @@ #define SRC_TOP5 0x10214 #define SRC_TOP6 0x10218 #define SRC_TOP7 0x1021c +#define SRC_TOP8 0x10220 /* 5800 specific */ +#define SRC_TOP9 0x10224 /* 5800 specific */ #define SRC_DISP10 0x1022c #define SRC_MAU 0x10240 #define SRC_FSYS 0x10244 #define SRC_PERIC0 0x10250 #define SRC_PERIC1 0x10254 +#define SRC_ISP 0x10270 +#define SRC_CAM 0x10274 /* 5800 specific */ #define SRC_TOP10 0x10280 #define SRC_TOP11 0x10284 #define SRC_TOP12 0x10288 -#define SRC_MASK_DISP10 0x1032c +#define SRC_TOP13 0x1028c /* 5800 specific */ +#define SRC_MASK_TOP2 0x10308 +#define SRC_MASK_TOP7 0x1031c +#define SRC_MASK_DISP10 0x1032c +#define SRC_MASK_MAU 0x10334 #define SRC_MASK_FSYS 0x10340 #define SRC_MASK_PERIC0 0x10350 #define SRC_MASK_PERIC1 0x10354 #define DIV_TOP0 0x10500 #define DIV_TOP1 0x10504 #define DIV_TOP2 0x10508 +#define DIV_TOP8 0x10520 /* 5800 specific */ +#define DIV_TOP9 0x10524 /* 5800 specific */ #define DIV_DISP10 0x1052c #define DIV_MAU 0x10544 #define DIV_FSYS0 0x10548 @@ -76,54 +92,82 @@ #define DIV_PERIC2 0x10560 #define DIV_PERIC3 0x10564 #define DIV_PERIC4 0x10568 +#define DIV_CAM 0x10574 /* 5800 specific */ +#define SCLK_DIV_ISP0 0x10580 +#define SCLK_DIV_ISP1 0x10584 +#define DIV2_RATIO0 0x10590 +#define DIV4_RATIO 0x105a0 #define GATE_BUS_TOP 0x10700 +#define GATE_BUS_GEN 0x1073c #define GATE_BUS_FSYS0 0x10740 +#define GATE_BUS_FSYS2 0x10748 #define GATE_BUS_PERIC 0x10750 #define GATE_BUS_PERIC1 0x10754 #define GATE_BUS_PERIS0 0x10760 #define GATE_BUS_PERIS1 0x10764 +#define GATE_BUS_NOC 0x10770 +#define GATE_TOP_SCLK_ISP 0x10870 #define GATE_IP_GSCL0 0x10910 #define GATE_IP_GSCL1 0x10920 +#define GATE_IP_CAM 0x10924 /* 5800 specific */ #define GATE_IP_MFC 0x1092c #define GATE_IP_DISP1 0x10928 #define GATE_IP_G3D 0x10930 #define GATE_IP_GEN 0x10934 +#define GATE_IP_FSYS 0x10944 +#define GATE_IP_PERIC 0x10950 +#define GATE_IP_PERIS 0x10960 #define GATE_IP_MSCL 0x10970 #define GATE_TOP_SCLK_GSCL 0x10820 #define GATE_TOP_SCLK_DISP1 0x10828 #define GATE_TOP_SCLK_MAU 0x1083c #define GATE_TOP_SCLK_FSYS 0x10840 #define GATE_TOP_SCLK_PERIC 0x10850 +#define TOP_SPARE2 0x10b08 #define BPLL_LOCK 0x20010 #define BPLL_CON0 0x20110 -#define SRC_CDREX 0x20200 #define KPLL_LOCK 0x28000 #define KPLL_CON0 0x28100 #define SRC_KFC 0x28200 #define DIV_KFC0 0x28500 +/* Exynos5x SoC type */ +enum exynos5x_soc { + EXYNOS5420, + EXYNOS5800, +}; + /* list of PLLs */ -enum exynos5420_plls { +enum exynos5x_plls { apll, cpll, dpll, epll, rpll, ipll, spll, vpll, mpll, bpll, kpll, nr_plls /* number of PLLs */ }; static void __iomem *reg_base; +static enum exynos5x_soc exynos5x_soc; #ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *exynos5420_save; +static struct samsung_clk_reg_dump *exynos5x_save; +static struct samsung_clk_reg_dump *exynos5800_save; /* * list of controller registers to be saved and restored during a * suspend/resume cycle. */ -static unsigned long exynos5420_clk_regs[] __initdata = { +static unsigned long exynos5x_clk_regs[] __initdata = { SRC_CPU, DIV_CPU0, DIV_CPU1, GATE_BUS_CPU, GATE_SCLK_CPU, + CLKOUT_CMU_CPU, + EPLL_CON0, + EPLL_CON1, + EPLL_CON2, + RPLL_CON0, + RPLL_CON1, + RPLL_CON2, SRC_TOP0, SRC_TOP1, SRC_TOP2, @@ -140,10 +184,13 @@ static unsigned long exynos5420_clk_regs[] __initdata = { SRC_TOP10, SRC_TOP11, SRC_TOP12, + SRC_MASK_TOP2, + SRC_MASK_TOP7, SRC_MASK_DISP10, SRC_MASK_FSYS, SRC_MASK_PERIC0, SRC_MASK_PERIC1, + SRC_ISP, DIV_TOP0, DIV_TOP1, DIV_TOP2, @@ -157,41 +204,71 @@ static unsigned long exynos5420_clk_regs[] __initdata = { DIV_PERIC2, DIV_PERIC3, DIV_PERIC4, + SCLK_DIV_ISP0, + SCLK_DIV_ISP1, + DIV2_RATIO0, + DIV4_RATIO, GATE_BUS_TOP, + GATE_BUS_GEN, GATE_BUS_FSYS0, + GATE_BUS_FSYS2, GATE_BUS_PERIC, GATE_BUS_PERIC1, GATE_BUS_PERIS0, GATE_BUS_PERIS1, + GATE_BUS_NOC, + GATE_TOP_SCLK_ISP, GATE_IP_GSCL0, GATE_IP_GSCL1, GATE_IP_MFC, GATE_IP_DISP1, GATE_IP_G3D, GATE_IP_GEN, + GATE_IP_FSYS, + GATE_IP_PERIC, + GATE_IP_PERIS, GATE_IP_MSCL, GATE_TOP_SCLK_GSCL, GATE_TOP_SCLK_DISP1, GATE_TOP_SCLK_MAU, GATE_TOP_SCLK_FSYS, GATE_TOP_SCLK_PERIC, - SRC_CDREX, + TOP_SPARE2, SRC_KFC, DIV_KFC0, }; +static unsigned long exynos5800_clk_regs[] __initdata = { + SRC_TOP8, + SRC_TOP9, + SRC_CAM, + SRC_TOP1, + DIV_TOP8, + DIV_TOP9, + DIV_CAM, + GATE_IP_CAM, +}; + static int exynos5420_clk_suspend(void) { - samsung_clk_save(reg_base, exynos5420_save, - ARRAY_SIZE(exynos5420_clk_regs)); + samsung_clk_save(reg_base, exynos5x_save, + ARRAY_SIZE(exynos5x_clk_regs)); + + if (exynos5x_soc == EXYNOS5800) + samsung_clk_save(reg_base, exynos5800_save, + ARRAY_SIZE(exynos5800_clk_regs)); return 0; } static void exynos5420_clk_resume(void) { - samsung_clk_restore(reg_base, exynos5420_save, - ARRAY_SIZE(exynos5420_clk_regs)); + samsung_clk_restore(reg_base, exynos5x_save, + ARRAY_SIZE(exynos5x_clk_regs)); + + if (exynos5x_soc == EXYNOS5800) + samsung_clk_restore(reg_base, exynos5800_save, + ARRAY_SIZE(exynos5800_clk_regs)); } static struct syscore_ops exynos5420_clk_syscore_ops = { @@ -201,108 +278,183 @@ static struct syscore_ops exynos5420_clk_syscore_ops = { static void exynos5420_clk_sleep_init(void) { - exynos5420_save = samsung_clk_alloc_reg_dump(exynos5420_clk_regs, - ARRAY_SIZE(exynos5420_clk_regs)); - if (!exynos5420_save) { + exynos5x_save = samsung_clk_alloc_reg_dump(exynos5x_clk_regs, + ARRAY_SIZE(exynos5x_clk_regs)); + if (!exynos5x_save) { pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", __func__); return; } + if (exynos5x_soc == EXYNOS5800) { + exynos5800_save = + samsung_clk_alloc_reg_dump(exynos5800_clk_regs, + ARRAY_SIZE(exynos5800_clk_regs)); + if (!exynos5800_save) + goto err_soc; + } + register_syscore_ops(&exynos5420_clk_syscore_ops); + return; +err_soc: + kfree(exynos5x_save); + pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", + __func__); + return; } #else static void exynos5420_clk_sleep_init(void) {} #endif /* list of all parent clocks */ -PNAME(mspll_cpu_p) = { "sclk_cpll", "sclk_dpll", - "sclk_mpll", "sclk_spll" }; -PNAME(cpu_p) = { "mout_apll" , "mout_mspll_cpu" }; -PNAME(kfc_p) = { "mout_kpll" , "mout_mspll_kfc" }; -PNAME(apll_p) = { "fin_pll", "fout_apll", }; -PNAME(bpll_p) = { "fin_pll", "fout_bpll", }; -PNAME(cpll_p) = { "fin_pll", "fout_cpll", }; -PNAME(dpll_p) = { "fin_pll", "fout_dpll", }; -PNAME(epll_p) = { "fin_pll", "fout_epll", }; -PNAME(ipll_p) = { "fin_pll", "fout_ipll", }; -PNAME(kpll_p) = { "fin_pll", "fout_kpll", }; -PNAME(mpll_p) = { "fin_pll", "fout_mpll", }; -PNAME(rpll_p) = { "fin_pll", "fout_rpll", }; -PNAME(spll_p) = { "fin_pll", "fout_spll", }; -PNAME(vpll_p) = { "fin_pll", "fout_vpll", }; - -PNAME(group1_p) = { "sclk_cpll", "sclk_dpll", "sclk_mpll" }; -PNAME(group2_p) = { "fin_pll", "sclk_cpll", "sclk_dpll", "sclk_mpll", - "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" }; -PNAME(group3_p) = { "sclk_rpll", "sclk_spll" }; -PNAME(group4_p) = { "sclk_ipll", "sclk_dpll", "sclk_mpll" }; -PNAME(group5_p) = { "sclk_vpll", "sclk_dpll" }; - -PNAME(sw_aclk66_p) = { "dout_aclk66", "sclk_spll" }; -PNAME(aclk66_peric_p) = { "fin_pll", "mout_sw_aclk66" }; - -PNAME(sw_aclk200_fsys_p) = { "dout_aclk200_fsys", "sclk_spll"}; -PNAME(user_aclk200_fsys_p) = { "fin_pll", "mout_sw_aclk200_fsys" }; - -PNAME(sw_aclk200_fsys2_p) = { "dout_aclk200_fsys2", "sclk_spll"}; -PNAME(user_aclk200_fsys2_p) = { "fin_pll", "mout_sw_aclk200_fsys2" }; - -PNAME(sw_aclk200_p) = { "dout_aclk200", "sclk_spll"}; -PNAME(aclk200_disp1_p) = { "fin_pll", "mout_sw_aclk200" }; - -PNAME(sw_aclk400_mscl_p) = { "dout_aclk400_mscl", "sclk_spll"}; -PNAME(user_aclk400_mscl_p) = { "fin_pll", "mout_sw_aclk400_mscl" }; - -PNAME(sw_aclk333_p) = { "dout_aclk333", "sclk_spll"}; -PNAME(user_aclk333_p) = { "fin_pll", "mout_sw_aclk333" }; - -PNAME(sw_aclk166_p) = { "dout_aclk166", "sclk_spll"}; -PNAME(user_aclk166_p) = { "fin_pll", "mout_sw_aclk166" }; - -PNAME(sw_aclk266_p) = { "dout_aclk266", "sclk_spll"}; -PNAME(user_aclk266_p) = { "fin_pll", "mout_sw_aclk266" }; - -PNAME(sw_aclk333_432_gscl_p) = { "dout_aclk333_432_gscl", "sclk_spll"}; -PNAME(user_aclk333_432_gscl_p) = { "fin_pll", "mout_sw_aclk333_432_gscl" }; - -PNAME(sw_aclk300_gscl_p) = { "dout_aclk300_gscl", "sclk_spll"}; -PNAME(user_aclk300_gscl_p) = { "fin_pll", "mout_sw_aclk300_gscl" }; - -PNAME(sw_aclk300_disp1_p) = { "dout_aclk300_disp1", "sclk_spll"}; -PNAME(user_aclk300_disp1_p) = { "fin_pll", "mout_sw_aclk300_disp1" }; - -PNAME(sw_aclk300_jpeg_p) = { "dout_aclk300_jpeg", "sclk_spll"}; -PNAME(user_aclk300_jpeg_p) = { "fin_pll", "mout_sw_aclk300_jpeg" }; - -PNAME(sw_aclk_g3d_p) = { "dout_aclk_g3d", "sclk_spll"}; -PNAME(user_aclk_g3d_p) = { "fin_pll", "mout_sw_aclk_g3d" }; - -PNAME(sw_aclk266_g2d_p) = { "dout_aclk266_g2d", "sclk_spll"}; -PNAME(user_aclk266_g2d_p) = { "fin_pll", "mout_sw_aclk266_g2d" }; - -PNAME(sw_aclk333_g2d_p) = { "dout_aclk333_g2d", "sclk_spll"}; -PNAME(user_aclk333_g2d_p) = { "fin_pll", "mout_sw_aclk333_g2d" }; - -PNAME(audio0_p) = { "fin_pll", "cdclk0", "sclk_dpll", "sclk_mpll", - "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" }; -PNAME(audio1_p) = { "fin_pll", "cdclk1", "sclk_dpll", "sclk_mpll", - "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" }; -PNAME(audio2_p) = { "fin_pll", "cdclk2", "sclk_dpll", "sclk_mpll", - "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" }; -PNAME(spdif_p) = { "fin_pll", "dout_audio0", "dout_audio1", "dout_audio2", - "spdif_extclk", "sclk_ipll", "sclk_epll", "sclk_rpll" }; -PNAME(hdmi_p) = { "dout_hdmi_pixel", "sclk_hdmiphy" }; -PNAME(maudio0_p) = { "fin_pll", "maudio_clk", "sclk_dpll", "sclk_mpll", - "sclk_spll", "sclk_ipll", "sclk_epll", "sclk_rpll" }; +PNAME(mout_mspll_cpu_p) = {"mout_sclk_cpll", "mout_sclk_dpll", + "mout_sclk_mpll", "mout_sclk_spll"}; +PNAME(mout_cpu_p) = {"mout_apll" , "mout_mspll_cpu"}; +PNAME(mout_kfc_p) = {"mout_kpll" , "mout_mspll_kfc"}; +PNAME(mout_apll_p) = {"fin_pll", "fout_apll"}; +PNAME(mout_bpll_p) = {"fin_pll", "fout_bpll"}; +PNAME(mout_cpll_p) = {"fin_pll", "fout_cpll"}; +PNAME(mout_dpll_p) = {"fin_pll", "fout_dpll"}; +PNAME(mout_epll_p) = {"fin_pll", "fout_epll"}; +PNAME(mout_ipll_p) = {"fin_pll", "fout_ipll"}; +PNAME(mout_kpll_p) = {"fin_pll", "fout_kpll"}; +PNAME(mout_mpll_p) = {"fin_pll", "fout_mpll"}; +PNAME(mout_rpll_p) = {"fin_pll", "fout_rpll"}; +PNAME(mout_spll_p) = {"fin_pll", "fout_spll"}; +PNAME(mout_vpll_p) = {"fin_pll", "fout_vpll"}; + +PNAME(mout_group1_p) = {"mout_sclk_cpll", "mout_sclk_dpll", + "mout_sclk_mpll"}; +PNAME(mout_group2_p) = {"fin_pll", "mout_sclk_cpll", + "mout_sclk_dpll", "mout_sclk_mpll", "mout_sclk_spll", + "mout_sclk_ipll", "mout_sclk_epll", "mout_sclk_rpll"}; +PNAME(mout_group3_p) = {"mout_sclk_rpll", "mout_sclk_spll"}; +PNAME(mout_group4_p) = {"mout_sclk_ipll", "mout_sclk_dpll", "mout_sclk_mpll"}; +PNAME(mout_group5_p) = {"mout_sclk_vpll", "mout_sclk_dpll"}; + +PNAME(mout_fimd1_final_p) = {"mout_fimd1", "mout_fimd1_opt"}; +PNAME(mout_sw_aclk66_p) = {"dout_aclk66", "mout_sclk_spll"}; +PNAME(mout_user_aclk66_peric_p) = { "fin_pll", "mout_sw_aclk66"}; +PNAME(mout_user_pclk66_gpio_p) = {"mout_sw_aclk66", "ff_sw_aclk66"}; + +PNAME(mout_sw_aclk200_fsys_p) = {"dout_aclk200_fsys", "mout_sclk_spll"}; +PNAME(mout_sw_pclk200_fsys_p) = {"dout_pclk200_fsys", "mout_sclk_spll"}; +PNAME(mout_user_pclk200_fsys_p) = {"fin_pll", "mout_sw_pclk200_fsys"}; +PNAME(mout_user_aclk200_fsys_p) = {"fin_pll", "mout_sw_aclk200_fsys"}; + +PNAME(mout_sw_aclk200_fsys2_p) = {"dout_aclk200_fsys2", "mout_sclk_spll"}; +PNAME(mout_user_aclk200_fsys2_p) = {"fin_pll", "mout_sw_aclk200_fsys2"}; +PNAME(mout_sw_aclk100_noc_p) = {"dout_aclk100_noc", "mout_sclk_spll"}; +PNAME(mout_user_aclk100_noc_p) = {"fin_pll", "mout_sw_aclk100_noc"}; + +PNAME(mout_sw_aclk400_wcore_p) = {"dout_aclk400_wcore", "mout_sclk_spll"}; +PNAME(mout_aclk400_wcore_bpll_p) = {"mout_aclk400_wcore", "sclk_bpll"}; +PNAME(mout_user_aclk400_wcore_p) = {"fin_pll", "mout_sw_aclk400_wcore"}; + +PNAME(mout_sw_aclk400_isp_p) = {"dout_aclk400_isp", "mout_sclk_spll"}; +PNAME(mout_user_aclk400_isp_p) = {"fin_pll", "mout_sw_aclk400_isp"}; + +PNAME(mout_sw_aclk333_432_isp0_p) = {"dout_aclk333_432_isp0", + "mout_sclk_spll"}; +PNAME(mout_user_aclk333_432_isp0_p) = {"fin_pll", "mout_sw_aclk333_432_isp0"}; + +PNAME(mout_sw_aclk333_432_isp_p) = {"dout_aclk333_432_isp", "mout_sclk_spll"}; +PNAME(mout_user_aclk333_432_isp_p) = {"fin_pll", "mout_sw_aclk333_432_isp"}; + +PNAME(mout_sw_aclk200_p) = {"dout_aclk200", "mout_sclk_spll"}; +PNAME(mout_user_aclk200_disp1_p) = {"fin_pll", "mout_sw_aclk200"}; + +PNAME(mout_sw_aclk400_mscl_p) = {"dout_aclk400_mscl", "mout_sclk_spll"}; +PNAME(mout_user_aclk400_mscl_p) = {"fin_pll", "mout_sw_aclk400_mscl"}; + +PNAME(mout_sw_aclk333_p) = {"dout_aclk333", "mout_sclk_spll"}; +PNAME(mout_user_aclk333_p) = {"fin_pll", "mout_sw_aclk333"}; + +PNAME(mout_sw_aclk166_p) = {"dout_aclk166", "mout_sclk_spll"}; +PNAME(mout_user_aclk166_p) = {"fin_pll", "mout_sw_aclk166"}; + +PNAME(mout_sw_aclk266_p) = {"dout_aclk266", "mout_sclk_spll"}; +PNAME(mout_user_aclk266_p) = {"fin_pll", "mout_sw_aclk266"}; +PNAME(mout_user_aclk266_isp_p) = {"fin_pll", "mout_sw_aclk266"}; + +PNAME(mout_sw_aclk333_432_gscl_p) = {"dout_aclk333_432_gscl", "mout_sclk_spll"}; +PNAME(mout_user_aclk333_432_gscl_p) = {"fin_pll", "mout_sw_aclk333_432_gscl"}; + +PNAME(mout_sw_aclk300_gscl_p) = {"dout_aclk300_gscl", "mout_sclk_spll"}; +PNAME(mout_user_aclk300_gscl_p) = {"fin_pll", "mout_sw_aclk300_gscl"}; + +PNAME(mout_sw_aclk300_disp1_p) = {"dout_aclk300_disp1", "mout_sclk_spll"}; +PNAME(mout_sw_aclk400_disp1_p) = {"dout_aclk400_disp1", "mout_sclk_spll"}; +PNAME(mout_user_aclk300_disp1_p) = {"fin_pll", "mout_sw_aclk300_disp1"}; +PNAME(mout_user_aclk400_disp1_p) = {"fin_pll", "mout_sw_aclk400_disp1"}; + +PNAME(mout_sw_aclk300_jpeg_p) = {"dout_aclk300_jpeg", "mout_sclk_spll"}; +PNAME(mout_user_aclk300_jpeg_p) = {"fin_pll", "mout_sw_aclk300_jpeg"}; + +PNAME(mout_sw_aclk_g3d_p) = {"dout_aclk_g3d", "mout_sclk_spll"}; +PNAME(mout_user_aclk_g3d_p) = {"fin_pll", "mout_sw_aclk_g3d"}; + +PNAME(mout_sw_aclk266_g2d_p) = {"dout_aclk266_g2d", "mout_sclk_spll"}; +PNAME(mout_user_aclk266_g2d_p) = {"fin_pll", "mout_sw_aclk266_g2d"}; + +PNAME(mout_sw_aclk333_g2d_p) = {"dout_aclk333_g2d", "mout_sclk_spll"}; +PNAME(mout_user_aclk333_g2d_p) = {"fin_pll", "mout_sw_aclk333_g2d"}; + +PNAME(mout_audio0_p) = {"fin_pll", "cdclk0", "mout_sclk_dpll", + "mout_sclk_mpll", "mout_sclk_spll", "mout_sclk_ipll", + "mout_sclk_epll", "mout_sclk_rpll"}; +PNAME(mout_audio1_p) = {"fin_pll", "cdclk1", "mout_sclk_dpll", + "mout_sclk_mpll", "mout_sclk_spll", "mout_sclk_ipll", + "mout_sclk_epll", "mout_sclk_rpll"}; +PNAME(mout_audio2_p) = {"fin_pll", "cdclk2", "mout_sclk_dpll", + "mout_sclk_mpll", "mout_sclk_spll", "mout_sclk_ipll", + "mout_sclk_epll", "mout_sclk_rpll"}; +PNAME(mout_spdif_p) = {"fin_pll", "dout_audio0", "dout_audio1", + "dout_audio2", "spdif_extclk", "mout_sclk_ipll", + "mout_sclk_epll", "mout_sclk_rpll"}; +PNAME(mout_hdmi_p) = {"dout_hdmi_pixel", "sclk_hdmiphy"}; +PNAME(mout_maudio0_p) = {"fin_pll", "maudio_clk", "mout_sclk_dpll", + "mout_sclk_mpll", "mout_sclk_spll", "mout_sclk_ipll", + "mout_sclk_epll", "mout_sclk_rpll"}; +PNAME(mout_mau_epll_clk_p) = {"mout_sclk_epll", "mout_sclk_dpll", + "mout_sclk_mpll", "mout_sclk_spll"}; +/* List of parents specific to exynos5800 */ +PNAME(mout_epll2_5800_p) = { "mout_sclk_epll", "ff_dout_epll2" }; +PNAME(mout_group1_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll", + "mout_sclk_mpll", "ff_dout_spll2" }; +PNAME(mout_group2_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll", + "mout_sclk_mpll", "ff_dout_spll2", + "mout_epll2", "mout_sclk_ipll" }; +PNAME(mout_group3_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll", + "mout_sclk_mpll", "ff_dout_spll2", + "mout_epll2" }; +PNAME(mout_group5_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll", + "mout_sclk_mpll", "mout_sclk_spll" }; +PNAME(mout_group6_5800_p) = { "mout_sclk_ipll", "mout_sclk_dpll", + "mout_sclk_mpll", "ff_dout_spll2" }; +PNAME(mout_group7_5800_p) = { "mout_sclk_cpll", "mout_sclk_dpll", + "mout_sclk_mpll", "mout_sclk_spll", + "mout_epll2", "mout_sclk_ipll" }; +PNAME(mout_mau_epll_clk_5800_p) = { "mout_sclk_epll", "mout_sclk_dpll", + "mout_sclk_mpll", + "ff_dout_spll2" }; +PNAME(mout_group8_5800_p) = { "dout_aclk432_scaler", "dout_sclk_sw" }; +PNAME(mout_group9_5800_p) = { "dout_osc_div", "mout_sw_aclk432_scaler" }; +PNAME(mout_group10_5800_p) = { "dout_aclk432_cam", "dout_sclk_sw" }; +PNAME(mout_group11_5800_p) = { "dout_osc_div", "mout_sw_aclk432_cam" }; +PNAME(mout_group12_5800_p) = { "dout_aclkfl1_550_cam", "dout_sclk_sw" }; +PNAME(mout_group13_5800_p) = { "dout_osc_div", "mout_sw_aclkfl1_550_cam" }; +PNAME(mout_group14_5800_p) = { "dout_aclk550_cam", "dout_sclk_sw" }; +PNAME(mout_group15_5800_p) = { "dout_osc_div", "mout_sw_aclk550_cam" }; /* fixed rate clocks generated outside the soc */ -static struct samsung_fixed_rate_clock exynos5420_fixed_rate_ext_clks[] __initdata = { +static struct samsung_fixed_rate_clock + exynos5x_fixed_rate_ext_clks[] __initdata = { FRATE(CLK_FIN_PLL, "fin_pll", NULL, CLK_IS_ROOT, 0), }; /* fixed rate clocks generated inside the soc */ -static struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = { +static struct samsung_fixed_rate_clock exynos5x_fixed_rate_clks[] __initdata = { FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 24000000), FRATE(0, "sclk_pwi", NULL, CLK_IS_ROOT, 24000000), FRATE(0, "sclk_usbh20", NULL, CLK_IS_ROOT, 48000000), @@ -310,146 +462,309 @@ static struct samsung_fixed_rate_clock exynos5420_fixed_rate_clks[] __initdata = FRATE(0, "sclk_usbh20_scan_clk", NULL, CLK_IS_ROOT, 480000000), }; -static struct samsung_fixed_factor_clock exynos5420_fixed_factor_clks[] __initdata = { - FFACTOR(0, "sclk_hsic_12m", "fin_pll", 1, 2, 0), +static struct samsung_fixed_factor_clock + exynos5x_fixed_factor_clks[] __initdata = { + FFACTOR(0, "ff_hsic_12m", "fin_pll", 1, 2, 0), + FFACTOR(0, "ff_sw_aclk66", "mout_sw_aclk66", 1, 2, 0), }; -static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = { - MUX(0, "mout_mspll_kfc", mspll_cpu_p, SRC_TOP7, 8, 2), - MUX(0, "mout_mspll_cpu", mspll_cpu_p, SRC_TOP7, 12, 2), - MUX(0, "mout_apll", apll_p, SRC_CPU, 0, 1), - MUX(0, "mout_cpu", cpu_p, SRC_CPU, 16, 1), - MUX(0, "mout_kpll", kpll_p, SRC_KFC, 0, 1), - MUX(0, "mout_cpu_kfc", kfc_p, SRC_KFC, 16, 1), - - MUX(0, "sclk_bpll", bpll_p, SRC_CDREX, 0, 1), - - MUX_A(0, "mout_aclk400_mscl", group1_p, - SRC_TOP0, 4, 2, "aclk400_mscl"), - MUX(0, "mout_aclk200", group1_p, SRC_TOP0, 8, 2), - MUX(0, "mout_aclk200_fsys2", group1_p, SRC_TOP0, 12, 2), - MUX(0, "mout_aclk200_fsys", group1_p, SRC_TOP0, 28, 2), - - MUX(0, "mout_aclk333_432_gscl", group4_p, SRC_TOP1, 0, 2), - MUX(0, "mout_aclk66", group1_p, SRC_TOP1, 8, 2), - MUX(0, "mout_aclk266", group1_p, SRC_TOP1, 20, 2), - MUX(0, "mout_aclk166", group1_p, SRC_TOP1, 24, 2), - MUX(0, "mout_aclk333", group1_p, SRC_TOP1, 28, 2), - - MUX(0, "mout_aclk333_g2d", group1_p, SRC_TOP2, 8, 2), - MUX(0, "mout_aclk266_g2d", group1_p, SRC_TOP2, 12, 2), - MUX(0, "mout_aclk_g3d", group5_p, SRC_TOP2, 16, 1), - MUX(0, "mout_aclk300_jpeg", group1_p, SRC_TOP2, 20, 2), - MUX(0, "mout_aclk300_disp1", group1_p, SRC_TOP2, 24, 2), - MUX(0, "mout_aclk300_gscl", group1_p, SRC_TOP2, 28, 2), - - MUX(0, "mout_user_aclk400_mscl", user_aclk400_mscl_p, +static struct samsung_fixed_factor_clock + exynos5800_fixed_factor_clks[] __initdata = { + FFACTOR(0, "ff_dout_epll2", "mout_sclk_epll", 1, 2, 0), + FFACTOR(0, "ff_dout_spll2", "mout_sclk_spll", 1, 2, 0), +}; + +struct samsung_mux_clock exynos5800_mux_clks[] __initdata = { + MUX(0, "mout_aclk400_isp", mout_group3_5800_p, SRC_TOP0, 0, 3), + MUX(0, "mout_aclk400_mscl", mout_group3_5800_p, SRC_TOP0, 4, 3), + MUX(0, "mout_aclk400_wcore", mout_group2_5800_p, SRC_TOP0, 16, 3), + MUX(0, "mout_aclk100_noc", mout_group1_5800_p, SRC_TOP0, 20, 2), + + MUX(0, "mout_aclk333_432_gscl", mout_group6_5800_p, SRC_TOP1, 0, 2), + MUX(0, "mout_aclk333_432_isp", mout_group6_5800_p, SRC_TOP1, 4, 2), + MUX(0, "mout_aclk333_432_isp0", mout_group6_5800_p, SRC_TOP1, 12, 2), + MUX(0, "mout_aclk266", mout_group5_5800_p, SRC_TOP1, 20, 2), + MUX(0, "mout_aclk333", mout_group1_5800_p, SRC_TOP1, 28, 2), + + MUX(0, "mout_aclk400_disp1", mout_group7_5800_p, SRC_TOP2, 4, 3), + MUX(0, "mout_aclk333_g2d", mout_group5_5800_p, SRC_TOP2, 8, 2), + MUX(0, "mout_aclk266_g2d", mout_group5_5800_p, SRC_TOP2, 12, 2), + MUX(0, "mout_aclk300_jpeg", mout_group5_5800_p, SRC_TOP2, 20, 2), + MUX(0, "mout_aclk300_disp1", mout_group5_5800_p, SRC_TOP2, 24, 2), + MUX(0, "mout_aclk300_gscl", mout_group5_5800_p, SRC_TOP2, 28, 2), + + MUX(0, "mout_mau_epll_clk", mout_mau_epll_clk_5800_p, SRC_TOP7, + 20, 2), + MUX(0, "sclk_bpll", mout_bpll_p, SRC_TOP7, 24, 1), + MUX(0, "mout_epll2", mout_epll2_5800_p, SRC_TOP7, 28, 1), + + MUX(0, "mout_aclk550_cam", mout_group3_5800_p, SRC_TOP8, 16, 3), + MUX(0, "mout_aclkfl1_550_cam", mout_group3_5800_p, SRC_TOP8, 20, 3), + MUX(0, "mout_aclk432_cam", mout_group6_5800_p, SRC_TOP8, 24, 2), + MUX(0, "mout_aclk432_scaler", mout_group6_5800_p, SRC_TOP8, 28, 2), + + MUX(0, "mout_user_aclk550_cam", mout_group15_5800_p, + SRC_TOP9, 16, 1), + MUX(0, "mout_user_aclkfl1_550_cam", mout_group13_5800_p, + SRC_TOP9, 20, 1), + MUX(0, "mout_user_aclk432_cam", mout_group11_5800_p, + SRC_TOP9, 24, 1), + MUX(0, "mout_user_aclk432_scaler", mout_group9_5800_p, + SRC_TOP9, 28, 1), + + MUX(0, "mout_sw_aclk550_cam", mout_group14_5800_p, SRC_TOP13, 16, 1), + MUX(0, "mout_sw_aclkfl1_550_cam", mout_group12_5800_p, + SRC_TOP13, 20, 1), + MUX(0, "mout_sw_aclk432_cam", mout_group10_5800_p, + SRC_TOP13, 24, 1), + MUX(0, "mout_sw_aclk432_scaler", mout_group8_5800_p, + SRC_TOP13, 28, 1), + + MUX(0, "mout_fimd1", mout_group2_p, SRC_DISP10, 4, 3), +}; + +struct samsung_div_clock exynos5800_div_clks[] __initdata = { + DIV(0, "dout_aclk400_wcore", "mout_aclk400_wcore", DIV_TOP0, 16, 3), + + DIV(0, "dout_aclk550_cam", "mout_aclk550_cam", + DIV_TOP8, 16, 3), + DIV(0, "dout_aclkfl1_550_cam", "mout_aclkfl1_550_cam", + DIV_TOP8, 20, 3), + DIV(0, "dout_aclk432_cam", "mout_aclk432_cam", + DIV_TOP8, 24, 3), + DIV(0, "dout_aclk432_scaler", "mout_aclk432_scaler", + DIV_TOP8, 28, 3), + + DIV(0, "dout_osc_div", "fin_pll", DIV_TOP9, 20, 3), + DIV(0, "dout_sclk_sw", "sclk_spll", DIV_TOP9, 24, 6), +}; + +struct samsung_gate_clock exynos5800_gate_clks[] __initdata = { + GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", + GATE_BUS_TOP, 24, 0, 0), + GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", + GATE_BUS_TOP, 27, 0, 0), +}; + +struct samsung_mux_clock exynos5420_mux_clks[] __initdata = { + MUX(0, "sclk_bpll", mout_bpll_p, TOP_SPARE2, 0, 1), + MUX(0, "mout_aclk400_wcore_bpll", mout_aclk400_wcore_bpll_p, + TOP_SPARE2, 4, 1), + + MUX(0, "mout_aclk400_isp", mout_group1_p, SRC_TOP0, 0, 2), + MUX_A(0, "mout_aclk400_mscl", mout_group1_p, + SRC_TOP0, 4, 2, "aclk400_mscl"), + MUX(0, "mout_aclk400_wcore", mout_group1_p, SRC_TOP0, 16, 2), + MUX(0, "mout_aclk100_noc", mout_group1_p, SRC_TOP0, 20, 2), + + MUX(0, "mout_aclk333_432_gscl", mout_group4_p, SRC_TOP1, 0, 2), + MUX(0, "mout_aclk333_432_isp", mout_group4_p, + SRC_TOP1, 4, 2), + MUX(0, "mout_aclk333_432_isp0", mout_group4_p, SRC_TOP1, 12, 2), + MUX(0, "mout_aclk266", mout_group1_p, SRC_TOP1, 20, 2), + MUX(0, "mout_aclk333", mout_group1_p, SRC_TOP1, 28, 2), + + MUX(0, "mout_aclk400_disp1", mout_group1_p, SRC_TOP2, 4, 2), + MUX(0, "mout_aclk333_g2d", mout_group1_p, SRC_TOP2, 8, 2), + MUX(0, "mout_aclk266_g2d", mout_group1_p, SRC_TOP2, 12, 2), + MUX(0, "mout_aclk300_jpeg", mout_group1_p, SRC_TOP2, 20, 2), + MUX(0, "mout_aclk300_disp1", mout_group1_p, SRC_TOP2, 24, 2), + MUX(0, "mout_aclk300_gscl", mout_group1_p, SRC_TOP2, 28, 2), + + MUX(0, "mout_mau_epll_clk", mout_mau_epll_clk_p, SRC_TOP7, 20, 2), + + MUX(0, "mout_fimd1", mout_group3_p, SRC_DISP10, 4, 1), +}; + +struct samsung_div_clock exynos5420_div_clks[] __initdata = { + DIV(0, "dout_aclk400_wcore", "mout_aclk400_wcore_bpll", + DIV_TOP0, 16, 3), +}; + +static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = { + MUX(0, "mout_user_pclk66_gpio", mout_user_pclk66_gpio_p, + SRC_TOP7, 4, 1), + MUX(0, "mout_mspll_kfc", mout_mspll_cpu_p, SRC_TOP7, 8, 2), + MUX(0, "mout_mspll_cpu", mout_mspll_cpu_p, SRC_TOP7, 12, 2), + + MUX(0, "mout_apll", mout_apll_p, SRC_CPU, 0, 1), + MUX(0, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1), + MUX(0, "mout_kpll", mout_kpll_p, SRC_KFC, 0, 1), + MUX(0, "mout_kfc", mout_kfc_p, SRC_KFC, 16, 1), + + MUX(0, "mout_aclk200", mout_group1_p, SRC_TOP0, 8, 2), + MUX(0, "mout_aclk200_fsys2", mout_group1_p, SRC_TOP0, 12, 2), + MUX(0, "mout_pclk200_fsys", mout_group1_p, SRC_TOP0, 24, 2), + MUX(0, "mout_aclk200_fsys", mout_group1_p, SRC_TOP0, 28, 2), + + MUX(0, "mout_aclk66", mout_group1_p, SRC_TOP1, 8, 2), + MUX(0, "mout_aclk166", mout_group1_p, SRC_TOP1, 24, 2), + + MUX(0, "mout_aclk_g3d", mout_group5_p, SRC_TOP2, 16, 1), + + MUX(0, "mout_user_aclk400_isp", mout_user_aclk400_isp_p, + SRC_TOP3, 0, 1), + MUX(0, "mout_user_aclk400_mscl", mout_user_aclk400_mscl_p, SRC_TOP3, 4, 1), - MUX_A(0, "mout_aclk200_disp1", aclk200_disp1_p, - SRC_TOP3, 8, 1, "aclk200_disp1"), - MUX(0, "mout_user_aclk200_fsys2", user_aclk200_fsys2_p, + MUX(0, "mout_user_aclk200_disp1", mout_user_aclk200_disp1_p, + SRC_TOP3, 8, 1), + MUX(0, "mout_user_aclk200_fsys2", mout_user_aclk200_fsys2_p, SRC_TOP3, 12, 1), - MUX(0, "mout_user_aclk200_fsys", user_aclk200_fsys_p, + MUX(0, "mout_user_aclk400_wcore", mout_user_aclk400_wcore_p, + SRC_TOP3, 16, 1), + MUX(0, "mout_user_aclk100_noc", mout_user_aclk100_noc_p, + SRC_TOP3, 20, 1), + MUX(0, "mout_user_pclk200_fsys", mout_user_pclk200_fsys_p, + SRC_TOP3, 24, 1), + MUX(0, "mout_user_aclk200_fsys", mout_user_aclk200_fsys_p, SRC_TOP3, 28, 1), - MUX(0, "mout_user_aclk333_432_gscl", user_aclk333_432_gscl_p, + MUX(0, "mout_user_aclk333_432_gscl", mout_user_aclk333_432_gscl_p, SRC_TOP4, 0, 1), - MUX(0, "mout_aclk66_peric", aclk66_peric_p, SRC_TOP4, 8, 1), - MUX(0, "mout_user_aclk266", user_aclk266_p, SRC_TOP4, 20, 1), - MUX(0, "mout_user_aclk166", user_aclk166_p, SRC_TOP4, 24, 1), - MUX(0, "mout_user_aclk333", user_aclk333_p, SRC_TOP4, 28, 1), - - MUX(0, "mout_aclk66_psgen", aclk66_peric_p, SRC_TOP5, 4, 1), - MUX(0, "mout_user_aclk333_g2d", user_aclk333_g2d_p, SRC_TOP5, 8, 1), - MUX(0, "mout_user_aclk266_g2d", user_aclk266_g2d_p, SRC_TOP5, 12, 1), - MUX_A(0, "mout_user_aclk_g3d", user_aclk_g3d_p, - SRC_TOP5, 16, 1, "aclkg3d"), - MUX(0, "mout_user_aclk300_jpeg", user_aclk300_jpeg_p, + MUX(0, "mout_user_aclk333_432_isp", mout_user_aclk333_432_isp_p, + SRC_TOP4, 4, 1), + MUX(0, "mout_user_aclk66_peric", mout_user_aclk66_peric_p, + SRC_TOP4, 8, 1), + MUX(0, "mout_user_aclk333_432_isp0", mout_user_aclk333_432_isp0_p, + SRC_TOP4, 12, 1), + MUX(0, "mout_user_aclk266_isp", mout_user_aclk266_isp_p, + SRC_TOP4, 16, 1), + MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1), + MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1), + MUX(0, "mout_user_aclk333", mout_user_aclk333_p, SRC_TOP4, 28, 1), + + MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p, + SRC_TOP5, 0, 1), + MUX(0, "mout_user_aclk66_psgen", mout_user_aclk66_peric_p, + SRC_TOP5, 4, 1), + MUX(0, "mout_user_aclk333_g2d", mout_user_aclk333_g2d_p, + SRC_TOP5, 8, 1), + MUX(0, "mout_user_aclk266_g2d", mout_user_aclk266_g2d_p, + SRC_TOP5, 12, 1), + MUX(CLK_MOUT_G3D, "mout_user_aclk_g3d", mout_user_aclk_g3d_p, + SRC_TOP5, 16, 1), + MUX(0, "mout_user_aclk300_jpeg", mout_user_aclk300_jpeg_p, SRC_TOP5, 20, 1), - MUX(0, "mout_user_aclk300_disp1", user_aclk300_disp1_p, + MUX(0, "mout_user_aclk300_disp1", mout_user_aclk300_disp1_p, SRC_TOP5, 24, 1), - MUX(0, "mout_user_aclk300_gscl", user_aclk300_gscl_p, + MUX(0, "mout_user_aclk300_gscl", mout_user_aclk300_gscl_p, SRC_TOP5, 28, 1), - MUX(0, "sclk_mpll", mpll_p, SRC_TOP6, 0, 1), - MUX(0, "sclk_vpll", vpll_p, SRC_TOP6, 4, 1), - MUX(0, "sclk_spll", spll_p, SRC_TOP6, 8, 1), - MUX(0, "sclk_ipll", ipll_p, SRC_TOP6, 12, 1), - MUX(0, "sclk_rpll", rpll_p, SRC_TOP6, 16, 1), - MUX(0, "sclk_epll", epll_p, SRC_TOP6, 20, 1), - MUX(0, "sclk_dpll", dpll_p, SRC_TOP6, 24, 1), - MUX(0, "sclk_cpll", cpll_p, SRC_TOP6, 28, 1), - - MUX(0, "mout_sw_aclk400_mscl", sw_aclk400_mscl_p, SRC_TOP10, 4, 1), - MUX(0, "mout_sw_aclk200", sw_aclk200_p, SRC_TOP10, 8, 1), - MUX(0, "mout_sw_aclk200_fsys2", sw_aclk200_fsys2_p, + MUX(0, "mout_sclk_mpll", mout_mpll_p, SRC_TOP6, 0, 1), + MUX(CLK_MOUT_VPLL, "mout_sclk_vpll", mout_vpll_p, SRC_TOP6, 4, 1), + MUX(0, "mout_sclk_spll", mout_spll_p, SRC_TOP6, 8, 1), + MUX(0, "mout_sclk_ipll", mout_ipll_p, SRC_TOP6, 12, 1), + MUX(0, "mout_sclk_rpll", mout_rpll_p, SRC_TOP6, 16, 1), + MUX(0, "mout_sclk_epll", mout_epll_p, SRC_TOP6, 20, 1), + MUX(0, "mout_sclk_dpll", mout_dpll_p, SRC_TOP6, 24, 1), + MUX(0, "mout_sclk_cpll", mout_cpll_p, SRC_TOP6, 28, 1), + + MUX(0, "mout_sw_aclk400_isp", mout_sw_aclk400_isp_p, + SRC_TOP10, 0, 1), + MUX(0, "mout_sw_aclk400_mscl", mout_sw_aclk400_mscl_p, + SRC_TOP10, 4, 1), + MUX(0, "mout_sw_aclk200", mout_sw_aclk200_p, SRC_TOP10, 8, 1), + MUX(0, "mout_sw_aclk200_fsys2", mout_sw_aclk200_fsys2_p, SRC_TOP10, 12, 1), - MUX(0, "mout_sw_aclk200_fsys", sw_aclk200_fsys_p, SRC_TOP10, 28, 1), - - MUX(0, "mout_sw_aclk333_432_gscl", sw_aclk333_432_gscl_p, + MUX(0, "mout_sw_aclk400_wcore", mout_sw_aclk400_wcore_p, + SRC_TOP10, 16, 1), + MUX(0, "mout_sw_aclk100_noc", mout_sw_aclk100_noc_p, + SRC_TOP10, 20, 1), + MUX(0, "mout_sw_pclk200_fsys", mout_sw_pclk200_fsys_p, + SRC_TOP10, 24, 1), + MUX(0, "mout_sw_aclk200_fsys", mout_sw_aclk200_fsys_p, + SRC_TOP10, 28, 1), + + MUX(0, "mout_sw_aclk333_432_gscl", mout_sw_aclk333_432_gscl_p, SRC_TOP11, 0, 1), - MUX(0, "mout_sw_aclk66", sw_aclk66_p, SRC_TOP11, 8, 1), - MUX(0, "mout_sw_aclk266", sw_aclk266_p, SRC_TOP11, 20, 1), - MUX(0, "mout_sw_aclk166", sw_aclk166_p, SRC_TOP11, 24, 1), - MUX(0, "mout_sw_aclk333", sw_aclk333_p, SRC_TOP11, 28, 1), - - MUX(0, "mout_sw_aclk333_g2d", sw_aclk333_g2d_p, SRC_TOP12, 8, 1), - MUX(0, "mout_sw_aclk266_g2d", sw_aclk266_g2d_p, SRC_TOP12, 12, 1), - MUX(0, "mout_sw_aclk_g3d", sw_aclk_g3d_p, SRC_TOP12, 16, 1), - MUX(0, "mout_sw_aclk300_jpeg", sw_aclk300_jpeg_p, SRC_TOP12, 20, 1), - MUX(0, "mout_sw_aclk300_disp1", sw_aclk300_disp1_p, + MUX(0, "mout_sw_aclk333_432_isp", mout_sw_aclk333_432_isp_p, + SRC_TOP11, 4, 1), + MUX(0, "mout_sw_aclk66", mout_sw_aclk66_p, SRC_TOP11, 8, 1), + MUX(0, "mout_sw_aclk333_432_isp0", mout_sw_aclk333_432_isp0_p, + SRC_TOP11, 12, 1), + MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1), + MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1), + MUX(0, "mout_sw_aclk333", mout_sw_aclk333_p, SRC_TOP11, 28, 1), + + MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p, + SRC_TOP12, 4, 1), + MUX(0, "mout_sw_aclk333_g2d", mout_sw_aclk333_g2d_p, + SRC_TOP12, 8, 1), + MUX(0, "mout_sw_aclk266_g2d", mout_sw_aclk266_g2d_p, + SRC_TOP12, 12, 1), + MUX(0, "mout_sw_aclk_g3d", mout_sw_aclk_g3d_p, SRC_TOP12, 16, 1), + MUX(0, "mout_sw_aclk300_jpeg", mout_sw_aclk300_jpeg_p, + SRC_TOP12, 20, 1), + MUX(0, "mout_sw_aclk300_disp1", mout_sw_aclk300_disp1_p, SRC_TOP12, 24, 1), - MUX(0, "mout_sw_aclk300_gscl", sw_aclk300_gscl_p, SRC_TOP12, 28, 1), + MUX(0, "mout_sw_aclk300_gscl", mout_sw_aclk300_gscl_p, + SRC_TOP12, 28, 1), /* DISP1 Block */ - MUX(0, "mout_fimd1", group3_p, SRC_DISP10, 4, 1), - MUX(0, "mout_mipi1", group2_p, SRC_DISP10, 16, 3), - MUX(0, "mout_dp1", group2_p, SRC_DISP10, 20, 3), - MUX(0, "mout_pixel", group2_p, SRC_DISP10, 24, 3), - MUX(CLK_MOUT_HDMI, "mout_hdmi", hdmi_p, SRC_DISP10, 28, 1), + MUX(0, "mout_mipi1", mout_group2_p, SRC_DISP10, 16, 3), + MUX(0, "mout_dp1", mout_group2_p, SRC_DISP10, 20, 3), + MUX(0, "mout_pixel", mout_group2_p, SRC_DISP10, 24, 3), + MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_DISP10, 28, 1), + MUX(0, "mout_fimd1_opt", mout_group2_p, SRC_DISP10, 8, 3), + + MUX(0, "mout_fimd1_final", mout_fimd1_final_p, TOP_SPARE2, 8, 1), /* MAU Block */ - MUX(0, "mout_maudio0", maudio0_p, SRC_MAU, 28, 3), + MUX(CLK_MOUT_MAUDIO0, "mout_maudio0", mout_maudio0_p, SRC_MAU, 28, 3), /* FSYS Block */ - MUX(0, "mout_usbd301", group2_p, SRC_FSYS, 4, 3), - MUX(0, "mout_mmc0", group2_p, SRC_FSYS, 8, 3), - MUX(0, "mout_mmc1", group2_p, SRC_FSYS, 12, 3), - MUX(0, "mout_mmc2", group2_p, SRC_FSYS, 16, 3), - MUX(0, "mout_usbd300", group2_p, SRC_FSYS, 20, 3), - MUX(0, "mout_unipro", group2_p, SRC_FSYS, 24, 3), + MUX(0, "mout_usbd301", mout_group2_p, SRC_FSYS, 4, 3), + MUX(0, "mout_mmc0", mout_group2_p, SRC_FSYS, 8, 3), + MUX(0, "mout_mmc1", mout_group2_p, SRC_FSYS, 12, 3), + MUX(0, "mout_mmc2", mout_group2_p, SRC_FSYS, 16, 3), + MUX(0, "mout_usbd300", mout_group2_p, SRC_FSYS, 20, 3), + MUX(0, "mout_unipro", mout_group2_p, SRC_FSYS, 24, 3), + MUX(0, "mout_mphy_refclk", mout_group2_p, SRC_FSYS, 28, 3), /* PERIC Block */ - MUX(0, "mout_uart0", group2_p, SRC_PERIC0, 4, 3), - MUX(0, "mout_uart1", group2_p, SRC_PERIC0, 8, 3), - MUX(0, "mout_uart2", group2_p, SRC_PERIC0, 12, 3), - MUX(0, "mout_uart3", group2_p, SRC_PERIC0, 16, 3), - MUX(0, "mout_pwm", group2_p, SRC_PERIC0, 24, 3), - MUX(0, "mout_spdif", spdif_p, SRC_PERIC0, 28, 3), - MUX(0, "mout_audio0", audio0_p, SRC_PERIC1, 8, 3), - MUX(0, "mout_audio1", audio1_p, SRC_PERIC1, 12, 3), - MUX(0, "mout_audio2", audio2_p, SRC_PERIC1, 16, 3), - MUX(0, "mout_spi0", group2_p, SRC_PERIC1, 20, 3), - MUX(0, "mout_spi1", group2_p, SRC_PERIC1, 24, 3), - MUX(0, "mout_spi2", group2_p, SRC_PERIC1, 28, 3), + MUX(0, "mout_uart0", mout_group2_p, SRC_PERIC0, 4, 3), + MUX(0, "mout_uart1", mout_group2_p, SRC_PERIC0, 8, 3), + MUX(0, "mout_uart2", mout_group2_p, SRC_PERIC0, 12, 3), + MUX(0, "mout_uart3", mout_group2_p, SRC_PERIC0, 16, 3), + MUX(0, "mout_pwm", mout_group2_p, SRC_PERIC0, 24, 3), + MUX(0, "mout_spdif", mout_spdif_p, SRC_PERIC0, 28, 3), + MUX(0, "mout_audio0", mout_audio0_p, SRC_PERIC1, 8, 3), + MUX(0, "mout_audio1", mout_audio1_p, SRC_PERIC1, 12, 3), + MUX(0, "mout_audio2", mout_audio2_p, SRC_PERIC1, 16, 3), + MUX(0, "mout_spi0", mout_group2_p, SRC_PERIC1, 20, 3), + MUX(0, "mout_spi1", mout_group2_p, SRC_PERIC1, 24, 3), + MUX(0, "mout_spi2", mout_group2_p, SRC_PERIC1, 28, 3), + + /* ISP Block */ + MUX(0, "mout_pwm_isp", mout_group2_p, SRC_ISP, 24, 3), + MUX(0, "mout_uart_isp", mout_group2_p, SRC_ISP, 20, 3), + MUX(0, "mout_spi0_isp", mout_group2_p, SRC_ISP, 12, 3), + MUX(0, "mout_spi1_isp", mout_group2_p, SRC_ISP, 16, 3), + MUX(0, "mout_isp_sensor", mout_group2_p, SRC_ISP, 28, 3), }; -static struct samsung_div_clock exynos5420_div_clks[] __initdata = { +static struct samsung_div_clock exynos5x_div_clks[] __initdata = { DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3), DIV(0, "sclk_apll", "mout_apll", DIV_CPU0, 24, 3), DIV(0, "armclk2", "div_arm", DIV_CPU0, 28, 3), - DIV(0, "div_kfc", "mout_cpu_kfc", DIV_KFC0, 0, 3), + DIV(0, "div_kfc", "mout_kfc", DIV_KFC0, 0, 3), DIV(0, "sclk_kpll", "mout_kpll", DIV_KFC0, 24, 3), + DIV(0, "dout_aclk400_isp", "mout_aclk400_isp", DIV_TOP0, 0, 3), DIV(0, "dout_aclk400_mscl", "mout_aclk400_mscl", DIV_TOP0, 4, 3), DIV(0, "dout_aclk200", "mout_aclk200", DIV_TOP0, 8, 3), DIV(0, "dout_aclk200_fsys2", "mout_aclk200_fsys2", DIV_TOP0, 12, 3), + DIV(0, "dout_aclk100_noc", "mout_aclk100_noc", DIV_TOP0, 20, 3), DIV(0, "dout_pclk200_fsys", "mout_pclk200_fsys", DIV_TOP0, 24, 3), DIV(0, "dout_aclk200_fsys", "mout_aclk200_fsys", DIV_TOP0, 28, 3), DIV(0, "dout_aclk333_432_gscl", "mout_aclk333_432_gscl", DIV_TOP1, 0, 3), + DIV(0, "dout_aclk333_432_isp", "mout_aclk333_432_isp", + DIV_TOP1, 4, 3), DIV(0, "dout_aclk66", "mout_aclk66", DIV_TOP1, 8, 6), + DIV(0, "dout_aclk333_432_isp0", "mout_aclk333_432_isp0", + DIV_TOP1, 16, 3), DIV(0, "dout_aclk266", "mout_aclk266", DIV_TOP1, 20, 3), DIV(0, "dout_aclk166", "mout_aclk166", DIV_TOP1, 24, 3), DIV(0, "dout_aclk333", "mout_aclk333", DIV_TOP1, 28, 3), @@ -458,15 +773,16 @@ static struct samsung_div_clock exynos5420_div_clks[] __initdata = { DIV(0, "dout_aclk266_g2d", "mout_aclk266_g2d", DIV_TOP2, 12, 3), DIV(0, "dout_aclk_g3d", "mout_aclk_g3d", DIV_TOP2, 16, 3), DIV(0, "dout_aclk300_jpeg", "mout_aclk300_jpeg", DIV_TOP2, 20, 3), - DIV_A(0, "dout_aclk300_disp1", "mout_aclk300_disp1", - DIV_TOP2, 24, 3, "aclk300_disp1"), + DIV(0, "dout_aclk300_disp1", "mout_aclk300_disp1", DIV_TOP2, 24, 3), DIV(0, "dout_aclk300_gscl", "mout_aclk300_gscl", DIV_TOP2, 28, 3), /* DISP1 Block */ - DIV(0, "dout_fimd1", "mout_fimd1", DIV_DISP10, 0, 4), + DIV(0, "dout_fimd1", "mout_fimd1_final", DIV_DISP10, 0, 4), DIV(0, "dout_mipi1", "mout_mipi1", DIV_DISP10, 16, 8), DIV(0, "dout_dp1", "mout_dp1", DIV_DISP10, 24, 4), DIV(CLK_DOUT_PIXEL, "dout_hdmi_pixel", "mout_pixel", DIV_DISP10, 28, 4), + DIV(0, "dout_disp1_blk", "aclk200_disp1", DIV2_RATIO0, 16, 2), + DIV(0, "dout_aclk400_disp1", "mout_aclk400_disp1", DIV_TOP2, 4, 3), /* Audio Block */ DIV(0, "dout_maudio0", "mout_maudio0", DIV_MAU, 20, 4), @@ -484,6 +800,7 @@ static struct samsung_div_clock exynos5420_div_clks[] __initdata = { DIV(0, "dout_mmc2", "mout_mmc2", DIV_FSYS1, 20, 10), DIV(0, "dout_unipro", "mout_unipro", DIV_FSYS2, 24, 8), + DIV(0, "dout_mphy_refclk", "mout_mphy_refclk", DIV_FSYS2, 16, 8), /* UART and PWM */ DIV(0, "dout_uart0", "mout_uart0", DIV_PERIC0, 8, 4), @@ -497,6 +814,9 @@ static struct samsung_div_clock exynos5420_div_clks[] __initdata = { DIV(0, "dout_spi1", "mout_spi1", DIV_PERIC1, 24, 4), DIV(0, "dout_spi2", "mout_spi2", DIV_PERIC1, 28, 4), + /* Mfc Block */ + DIV(0, "dout_mfc_blk", "mout_user_aclk333", DIV4_RATIO, 0, 2), + /* PCM */ DIV(0, "dout_pcm1", "dout_audio1", DIV_PERIC2, 16, 8), DIV(0, "dout_pcm2", "dout_audio2", DIV_PERIC2, 24, 8), @@ -509,15 +829,43 @@ static struct samsung_div_clock exynos5420_div_clks[] __initdata = { DIV(0, "dout_audio2", "mout_audio2", DIV_PERIC3, 28, 4), /* SPI Pre-Ratio */ - DIV(0, "dout_pre_spi0", "dout_spi0", DIV_PERIC4, 8, 8), - DIV(0, "dout_pre_spi1", "dout_spi1", DIV_PERIC4, 16, 8), - DIV(0, "dout_pre_spi2", "dout_spi2", DIV_PERIC4, 24, 8), + DIV(0, "dout_spi0_pre", "dout_spi0", DIV_PERIC4, 8, 8), + DIV(0, "dout_spi1_pre", "dout_spi1", DIV_PERIC4, 16, 8), + DIV(0, "dout_spi2_pre", "dout_spi2", DIV_PERIC4, 24, 8), + + /* GSCL Block */ + DIV(0, "dout_gscl_blk_300", "mout_user_aclk300_gscl", + DIV2_RATIO0, 4, 2), + DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2), + + /* MSCL Block */ + DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2), + + /* PSGEN */ + DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1), + DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1), + + /* ISP Block */ + DIV(0, "dout_isp_sensor0", "mout_isp_sensor", SCLK_DIV_ISP0, 8, 8), + DIV(0, "dout_isp_sensor1", "mout_isp_sensor", SCLK_DIV_ISP0, 16, 8), + DIV(0, "dout_isp_sensor2", "mout_isp_sensor", SCLK_DIV_ISP0, 24, 8), + DIV(0, "dout_pwm_isp", "mout_pwm_isp", SCLK_DIV_ISP1, 28, 4), + DIV(0, "dout_uart_isp", "mout_uart_isp", SCLK_DIV_ISP1, 24, 4), + DIV(0, "dout_spi0_isp", "mout_spi0_isp", SCLK_DIV_ISP1, 16, 4), + DIV(0, "dout_spi1_isp", "mout_spi1_isp", SCLK_DIV_ISP1, 20, 4), + DIV_F(0, "dout_spi0_isp_pre", "dout_spi0_isp", SCLK_DIV_ISP1, 0, 8, + CLK_SET_RATE_PARENT, 0), + DIV_F(0, "dout_spi1_isp_pre", "dout_spi1_isp", SCLK_DIV_ISP1, 8, 8, + CLK_SET_RATE_PARENT, 0), }; -static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = { - /* TODO: Re-verify the CG bits for all the gate clocks */ - GATE_A(CLK_MCT, "pclk_st", "aclk66_psgen", GATE_BUS_PERIS1, 2, 0, 0, - "mct"), +static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = { + /* G2D */ + GATE(CLK_MDMA0, "mdma0", "aclk266_g2d", GATE_IP_G2D, 1, 0, 0), + GATE(CLK_SSS, "sss", "aclk266_g2d", GATE_IP_G2D, 2, 0, 0), + GATE(CLK_G2D, "g2d", "aclk333_g2d", GATE_IP_G2D, 3, 0, 0), + GATE(CLK_SMMU_MDMA0, "smmu_mdma0", "aclk266_g2d", GATE_IP_G2D, 5, 0, 0), + GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0), GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys", GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0), @@ -530,20 +878,42 @@ static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = { GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg", GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0), + GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0", + GATE_BUS_TOP, 5, 0, 0), GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl", GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl", GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0), - GATE(0, "pclk66_gpio", "mout_sw_aclk66", + GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp", + GATE_BUS_TOP, 8, 0, 0), + GATE(CLK_PCLK66_GPIO, "pclk66_gpio", "mout_user_pclk66_gpio", GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0), - GATE(0, "aclk66_psgen", "mout_aclk66_psgen", + GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen", GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0), - GATE(0, "aclk66_peric", "mout_aclk66_peric", - GATE_BUS_TOP, 11, 0, 0), + GATE(CLK_ACLK66_PERIC, "aclk66_peric", "mout_user_aclk66_peric", + GATE_BUS_TOP, 11, CLK_IGNORE_UNUSED, 0), + GATE(0, "aclk266_isp", "mout_user_aclk266_isp", + GATE_BUS_TOP, 13, 0, 0), GATE(0, "aclk166", "mout_user_aclk166", GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0), GATE(0, "aclk333", "mout_aclk333", GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0), + GATE(0, "aclk400_isp", "mout_user_aclk400_isp", + GATE_BUS_TOP, 16, 0, 0), + GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl", + GATE_BUS_TOP, 17, 0, 0), + GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1", + GATE_BUS_TOP, 18, 0, 0), + GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24", + GATE_BUS_TOP, 28, 0, 0), + GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m", + GATE_BUS_TOP, 29, 0, 0), + + GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1", + SRC_MASK_TOP2, 24, 0, 0), + + GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk", + SRC_MASK_TOP7, 20, 0, 0), /* sclk */ GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_uart0", @@ -554,11 +924,11 @@ static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = { GATE_TOP_SCLK_PERIC, 2, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_uart3", GATE_TOP_SCLK_PERIC, 3, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_pre_spi0", + GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_spi0_pre", GATE_TOP_SCLK_PERIC, 6, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_pre_spi1", + GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_spi1_pre", GATE_TOP_SCLK_PERIC, 7, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_pre_spi2", + GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_spi2_pre", GATE_TOP_SCLK_PERIC, 8, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif", GATE_TOP_SCLK_PERIC, 9, CLK_SET_RATE_PARENT, 0), @@ -588,164 +958,191 @@ static struct samsung_gate_clock exynos5420_gate_clks[] __initdata = { GATE(CLK_SCLK_USBD301, "sclk_usbd301", "dout_usbd301", GATE_TOP_SCLK_FSYS, 10, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_USBD301, "sclk_unipro", "dout_unipro", - SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0), - - GATE(CLK_SCLK_GSCL_WA, "sclk_gscl_wa", "aclK333_432_gscl", - GATE_TOP_SCLK_GSCL, 6, CLK_SET_RATE_PARENT, 0), - GATE(CLK_SCLK_GSCL_WB, "sclk_gscl_wb", "aclk333_432_gscl", - GATE_TOP_SCLK_GSCL, 7, CLK_SET_RATE_PARENT, 0), - /* Display */ GATE(CLK_SCLK_FIMD1, "sclk_fimd1", "dout_fimd1", - GATE_TOP_SCLK_DISP1, 0, CLK_SET_RATE_PARENT, 0), + GATE_TOP_SCLK_DISP1, 0, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_MIPI1, "sclk_mipi1", "dout_mipi1", - GATE_TOP_SCLK_DISP1, 3, CLK_SET_RATE_PARENT, 0), + GATE_TOP_SCLK_DISP1, 3, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi", - GATE_TOP_SCLK_DISP1, 9, CLK_SET_RATE_PARENT, 0), + GATE_TOP_SCLK_DISP1, 9, 0, 0), GATE(CLK_SCLK_PIXEL, "sclk_pixel", "dout_hdmi_pixel", - GATE_TOP_SCLK_DISP1, 10, CLK_SET_RATE_PARENT, 0), + GATE_TOP_SCLK_DISP1, 10, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1", - GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0), + GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0), /* Maudio Block */ GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0", GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0", GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0), - /* FSYS */ + + /* FSYS Block */ GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0), GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0), GATE(CLK_PDMA1, "pdma1", "aclk200_fsys", GATE_BUS_FSYS0, 2, 0, 0), GATE(CLK_UFS, "ufs", "aclk200_fsys2", GATE_BUS_FSYS0, 3, 0, 0), - GATE(CLK_RTIC, "rtic", "aclk200_fsys", GATE_BUS_FSYS0, 5, 0, 0), - GATE(CLK_MMC0, "mmc0", "aclk200_fsys2", GATE_BUS_FSYS0, 12, 0, 0), - GATE(CLK_MMC1, "mmc1", "aclk200_fsys2", GATE_BUS_FSYS0, 13, 0, 0), - GATE(CLK_MMC2, "mmc2", "aclk200_fsys2", GATE_BUS_FSYS0, 14, 0, 0), + GATE(CLK_RTIC, "rtic", "aclk200_fsys", GATE_IP_FSYS, 9, 0, 0), + GATE(CLK_MMC0, "mmc0", "aclk200_fsys2", GATE_IP_FSYS, 12, 0, 0), + GATE(CLK_MMC1, "mmc1", "aclk200_fsys2", GATE_IP_FSYS, 13, 0, 0), + GATE(CLK_MMC2, "mmc2", "aclk200_fsys2", GATE_IP_FSYS, 14, 0, 0), GATE(CLK_SROMC, "sromc", "aclk200_fsys2", - GATE_BUS_FSYS0, 19, CLK_IGNORE_UNUSED, 0), - GATE(CLK_USBH20, "usbh20", "aclk200_fsys", GATE_BUS_FSYS0, 20, 0, 0), - GATE(CLK_USBD300, "usbd300", "aclk200_fsys", GATE_BUS_FSYS0, 21, 0, 0), - GATE(CLK_USBD301, "usbd301", "aclk200_fsys", GATE_BUS_FSYS0, 28, 0, 0), - - /* UART */ - GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_BUS_PERIC, 4, 0, 0), - GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_BUS_PERIC, 5, 0, 0), - GATE_A(CLK_UART2, "uart2", "aclk66_peric", - GATE_BUS_PERIC, 6, CLK_IGNORE_UNUSED, 0, "uart2"), - GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_BUS_PERIC, 7, 0, 0), - /* I2C */ - GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_BUS_PERIC, 9, 0, 0), - GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_BUS_PERIC, 10, 0, 0), - GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_BUS_PERIC, 11, 0, 0), - GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_BUS_PERIC, 12, 0, 0), - GATE(CLK_I2C4, "i2c4", "aclk66_peric", GATE_BUS_PERIC, 13, 0, 0), - GATE(CLK_I2C5, "i2c5", "aclk66_peric", GATE_BUS_PERIC, 14, 0, 0), - GATE(CLK_I2C6, "i2c6", "aclk66_peric", GATE_BUS_PERIC, 15, 0, 0), - GATE(CLK_I2C7, "i2c7", "aclk66_peric", GATE_BUS_PERIC, 16, 0, 0), - GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_BUS_PERIC, 17, 0, - 0), - GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_BUS_PERIC, 18, 0, 0), - /* SPI */ - GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_BUS_PERIC, 19, 0, 0), - GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_BUS_PERIC, 20, 0, 0), - GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_BUS_PERIC, 21, 0, 0), - GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0), - /* I2S */ - GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_BUS_PERIC, 23, 0, 0), - GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_BUS_PERIC, 24, 0, 0), - /* PCM */ - GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_BUS_PERIC, 25, 0, 0), - GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_BUS_PERIC, 26, 0, 0), - /* PWM */ - GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_BUS_PERIC, 27, 0, 0), - /* SPDIF */ - GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_BUS_PERIC, 29, 0, 0), + GATE_IP_FSYS, 17, CLK_IGNORE_UNUSED, 0), + GATE(CLK_USBH20, "usbh20", "aclk200_fsys", GATE_IP_FSYS, 18, 0, 0), + GATE(CLK_USBD300, "usbd300", "aclk200_fsys", GATE_IP_FSYS, 19, 0, 0), + GATE(CLK_USBD301, "usbd301", "aclk200_fsys", GATE_IP_FSYS, 20, 0, 0), + GATE(CLK_SCLK_UNIPRO, "sclk_unipro", "dout_unipro", + SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0), - GATE(CLK_I2C8, "i2c8", "aclk66_peric", GATE_BUS_PERIC1, 0, 0, 0), - GATE(CLK_I2C9, "i2c9", "aclk66_peric", GATE_BUS_PERIC1, 1, 0, 0), - GATE(CLK_I2C10, "i2c10", "aclk66_peric", GATE_BUS_PERIC1, 2, 0, 0), + /* PERIC Block */ + GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_IP_PERIC, 0, 0, 0), + GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_IP_PERIC, 1, 0, 0), + GATE(CLK_UART2, "uart2", "aclk66_peric", GATE_IP_PERIC, 2, 0, 0), + GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_IP_PERIC, 3, 0, 0), + GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_IP_PERIC, 6, 0, 0), + GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_IP_PERIC, 7, 0, 0), + GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_IP_PERIC, 8, 0, 0), + GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_IP_PERIC, 9, 0, 0), + GATE(CLK_USI0, "usi0", "aclk66_peric", GATE_IP_PERIC, 10, 0, 0), + GATE(CLK_USI1, "usi1", "aclk66_peric", GATE_IP_PERIC, 11, 0, 0), + GATE(CLK_USI2, "usi2", "aclk66_peric", GATE_IP_PERIC, 12, 0, 0), + GATE(CLK_USI3, "usi3", "aclk66_peric", GATE_IP_PERIC, 13, 0, 0), + GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_IP_PERIC, 14, 0, 0), + GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_IP_PERIC, 15, 0, 0), + GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_IP_PERIC, 16, 0, 0), + GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_IP_PERIC, 17, 0, 0), + GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_IP_PERIC, 18, 0, 0), + GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_IP_PERIC, 20, 0, 0), + GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_IP_PERIC, 21, 0, 0), + GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_IP_PERIC, 22, 0, 0), + GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_IP_PERIC, 23, 0, 0), + GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_IP_PERIC, 24, 0, 0), + GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_IP_PERIC, 26, 0, 0), + GATE(CLK_USI4, "usi4", "aclk66_peric", GATE_IP_PERIC, 28, 0, 0), + GATE(CLK_USI5, "usi5", "aclk66_peric", GATE_IP_PERIC, 30, 0, 0), + GATE(CLK_USI6, "usi6", "aclk66_peric", GATE_IP_PERIC, 31, 0, 0), + GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0), + + /* PERIS Block */ GATE(CLK_CHIPID, "chipid", "aclk66_psgen", - GATE_BUS_PERIS0, 12, CLK_IGNORE_UNUSED, 0), + GATE_IP_PERIS, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_SYSREG, "sysreg", "aclk66_psgen", - GATE_BUS_PERIS0, 13, CLK_IGNORE_UNUSED, 0), - GATE(CLK_TZPC0, "tzpc0", "aclk66_psgen", GATE_BUS_PERIS0, 18, 0, 0), - GATE(CLK_TZPC1, "tzpc1", "aclk66_psgen", GATE_BUS_PERIS0, 19, 0, 0), - GATE(CLK_TZPC2, "tzpc2", "aclk66_psgen", GATE_BUS_PERIS0, 20, 0, 0), - GATE(CLK_TZPC3, "tzpc3", "aclk66_psgen", GATE_BUS_PERIS0, 21, 0, 0), - GATE(CLK_TZPC4, "tzpc4", "aclk66_psgen", GATE_BUS_PERIS0, 22, 0, 0), - GATE(CLK_TZPC5, "tzpc5", "aclk66_psgen", GATE_BUS_PERIS0, 23, 0, 0), - GATE(CLK_TZPC6, "tzpc6", "aclk66_psgen", GATE_BUS_PERIS0, 24, 0, 0), - GATE(CLK_TZPC7, "tzpc7", "aclk66_psgen", GATE_BUS_PERIS0, 25, 0, 0), - GATE(CLK_TZPC8, "tzpc8", "aclk66_psgen", GATE_BUS_PERIS0, 26, 0, 0), - GATE(CLK_TZPC9, "tzpc9", "aclk66_psgen", GATE_BUS_PERIS0, 27, 0, 0), - - GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk66_psgen", GATE_BUS_PERIS1, 0, 0, - 0), + GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0), + GATE(CLK_TZPC0, "tzpc0", "aclk66_psgen", GATE_IP_PERIS, 6, 0, 0), + GATE(CLK_TZPC1, "tzpc1", "aclk66_psgen", GATE_IP_PERIS, 7, 0, 0), + GATE(CLK_TZPC2, "tzpc2", "aclk66_psgen", GATE_IP_PERIS, 8, 0, 0), + GATE(CLK_TZPC3, "tzpc3", "aclk66_psgen", GATE_IP_PERIS, 9, 0, 0), + GATE(CLK_TZPC4, "tzpc4", "aclk66_psgen", GATE_IP_PERIS, 10, 0, 0), + GATE(CLK_TZPC5, "tzpc5", "aclk66_psgen", GATE_IP_PERIS, 11, 0, 0), + GATE(CLK_TZPC6, "tzpc6", "aclk66_psgen", GATE_IP_PERIS, 12, 0, 0), + GATE(CLK_TZPC7, "tzpc7", "aclk66_psgen", GATE_IP_PERIS, 13, 0, 0), + GATE(CLK_TZPC8, "tzpc8", "aclk66_psgen", GATE_IP_PERIS, 14, 0, 0), + GATE(CLK_TZPC9, "tzpc9", "aclk66_psgen", GATE_IP_PERIS, 15, 0, 0), + GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk66_psgen", GATE_IP_PERIS, 16, 0, 0), + GATE(CLK_MCT, "mct", "aclk66_psgen", GATE_IP_PERIS, 18, 0, 0), + GATE(CLK_WDT, "wdt", "aclk66_psgen", GATE_IP_PERIS, 19, 0, 0), + GATE(CLK_RTC, "rtc", "aclk66_psgen", GATE_IP_PERIS, 20, 0, 0), + GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_IP_PERIS, 21, 0, 0), + GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_IP_PERIS, 22, 0, 0), + GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0), - GATE(CLK_WDT, "wdt", "aclk66_psgen", GATE_BUS_PERIS1, 3, 0, 0), - GATE(CLK_RTC, "rtc", "aclk66_psgen", GATE_BUS_PERIS1, 4, 0, 0), - GATE(CLK_TMU, "tmu", "aclk66_psgen", GATE_BUS_PERIS1, 5, 0, 0), - GATE(CLK_TMU_GPU, "tmu_gpu", "aclk66_psgen", GATE_BUS_PERIS1, 6, 0, 0), + + /* GEN Block */ + GATE(CLK_ROTATOR, "rotator", "mout_user_aclk266", GATE_IP_GEN, 1, 0, 0), + GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0), + GATE(CLK_JPEG2, "jpeg2", "aclk300_jpeg", GATE_IP_GEN, 3, 0, 0), + GATE(CLK_MDMA1, "mdma1", "mout_user_aclk266", GATE_IP_GEN, 4, 0, 0), + GATE(CLK_TOP_RTC, "top_rtc", "aclk66_psgen", GATE_IP_GEN, 5, 0, 0), + GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "dout_gen_blk", + GATE_IP_GEN, 6, 0, 0), + GATE(CLK_SMMU_JPEG, "smmu_jpeg", "dout_jpg_blk", GATE_IP_GEN, 7, 0, 0), + GATE(CLK_SMMU_MDMA1, "smmu_mdma1", "dout_gen_blk", + GATE_IP_GEN, 9, 0, 0), + + /* GATE_IP_GEN doesn't list gates for smmu_jpeg2 and mc */ + GATE(CLK_SMMU_JPEG2, "smmu_jpeg2", "dout_jpg_blk", + GATE_BUS_GEN, 28, 0, 0), + GATE(CLK_MC, "mc", "aclk66_psgen", GATE_BUS_GEN, 12, 0, 0), + + /* GSCL Block */ + GATE(CLK_SCLK_GSCL_WA, "sclk_gscl_wa", "mout_user_aclk333_432_gscl", + GATE_TOP_SCLK_GSCL, 6, 0, 0), + GATE(CLK_SCLK_GSCL_WB, "sclk_gscl_wb", "mout_user_aclk333_432_gscl", + GATE_TOP_SCLK_GSCL, 7, 0, 0), GATE(CLK_GSCL0, "gscl0", "aclk300_gscl", GATE_IP_GSCL0, 0, 0, 0), GATE(CLK_GSCL1, "gscl1", "aclk300_gscl", GATE_IP_GSCL0, 1, 0, 0), - GATE(CLK_CLK_3AA, "clk_3aa", "aclk300_gscl", GATE_IP_GSCL0, 4, 0, 0), - - GATE(CLK_SMMU_3AA, "smmu_3aa", "aclk333_432_gscl", GATE_IP_GSCL1, 2, 0, - 0), - GATE(CLK_SMMU_FIMCL0, "smmu_fimcl0", "aclk333_432_gscl", + GATE(CLK_FIMC_3AA, "fimc_3aa", "aclk333_432_gscl", + GATE_IP_GSCL0, 4, 0, 0), + GATE(CLK_FIMC_LITE0, "fimc_lite0", "aclk333_432_gscl", + GATE_IP_GSCL0, 5, 0, 0), + GATE(CLK_FIMC_LITE1, "fimc_lite1", "aclk333_432_gscl", + GATE_IP_GSCL0, 6, 0, 0), + + GATE(CLK_SMMU_3AA, "smmu_3aa", "dout_gscl_blk_333", + GATE_IP_GSCL1, 2, 0, 0), + GATE(CLK_SMMU_FIMCL0, "smmu_fimcl0", "dout_gscl_blk_333", GATE_IP_GSCL1, 3, 0, 0), - GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "aclk333_432_gscl", + GATE(CLK_SMMU_FIMCL1, "smmu_fimcl1", "dout_gscl_blk_333", GATE_IP_GSCL1, 4, 0, 0), - GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "aclk300_gscl", GATE_IP_GSCL1, 6, 0, - 0), - GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "aclk300_gscl", GATE_IP_GSCL1, 7, 0, - 0), - GATE(CLK_GSCL_WA, "gscl_wa", "aclk300_gscl", GATE_IP_GSCL1, 12, 0, 0), - GATE(CLK_GSCL_WB, "gscl_wb", "aclk300_gscl", GATE_IP_GSCL1, 13, 0, 0), - GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "aclk333_432_gscl", + GATE(CLK_SMMU_GSCL0, "smmu_gscl0", "dout_gscl_blk_300", + GATE_IP_GSCL1, 6, 0, 0), + GATE(CLK_SMMU_GSCL1, "smmu_gscl1", "dout_gscl_blk_300", + GATE_IP_GSCL1, 7, 0, 0), + GATE(CLK_GSCL_WA, "gscl_wa", "sclk_gscl_wa", GATE_IP_GSCL1, 12, 0, 0), + GATE(CLK_GSCL_WB, "gscl_wb", "sclk_gscl_wb", GATE_IP_GSCL1, 13, 0, 0), + GATE(CLK_SMMU_FIMCL3, "smmu_fimcl3,", "dout_gscl_blk_333", GATE_IP_GSCL1, 16, 0, 0), GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl", GATE_IP_GSCL1, 17, 0, 0), + /* MSCL Block */ + GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0), + GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0), + GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0), + GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk", + GATE_IP_MSCL, 8, 0, 0), + GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk", + GATE_IP_MSCL, 9, 0, 0), + GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk", + GATE_IP_MSCL, 10, 0, 0), + GATE(CLK_FIMD1, "fimd1", "aclk300_disp1", GATE_IP_DISP1, 0, 0, 0), GATE(CLK_DSIM1, "dsim1", "aclk200_disp1", GATE_IP_DISP1, 3, 0, 0), GATE(CLK_DP1, "dp1", "aclk200_disp1", GATE_IP_DISP1, 4, 0, 0), - GATE(CLK_MIXER, "mixer", "aclk166", GATE_IP_DISP1, 5, 0, 0), + GATE(CLK_MIXER, "mixer", "aclk200_disp1", GATE_IP_DISP1, 5, 0, 0), GATE(CLK_HDMI, "hdmi", "aclk200_disp1", GATE_IP_DISP1, 6, 0, 0), - GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "aclk300_disp1", GATE_IP_DISP1, 8, 0, - 0), + GATE(CLK_SMMU_FIMD1M0, "smmu_fimd1m0", "dout_disp1_blk", + GATE_IP_DISP1, 7, 0, 0), + GATE(CLK_SMMU_FIMD1M1, "smmu_fimd1m1", "dout_disp1_blk", + GATE_IP_DISP1, 8, 0, 0), + GATE(CLK_SMMU_MIXER, "smmu_mixer", "aclk200_disp1", + GATE_IP_DISP1, 9, 0, 0), + + /* ISP */ + GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp", + GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "dout_spi0_isp_pre", + GATE_TOP_SCLK_ISP, 1, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "dout_spi1_isp_pre", + GATE_TOP_SCLK_ISP, 2, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "dout_pwm_isp", + GATE_TOP_SCLK_ISP, 3, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_ISP_SENSOR0, "sclk_isp_sensor0", "dout_isp_sensor0", + GATE_TOP_SCLK_ISP, 4, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_ISP_SENSOR1, "sclk_isp_sensor1", "dout_isp_sensor1", + GATE_TOP_SCLK_ISP, 8, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_ISP_SENSOR2, "sclk_isp_sensor2", "dout_isp_sensor2", + GATE_TOP_SCLK_ISP, 12, CLK_SET_RATE_PARENT, 0), GATE(CLK_MFC, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0), - GATE(CLK_SMMU_MFCL, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0), - GATE(CLK_SMMU_MFCR, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0), - - GATE(CLK_G3D, "g3d", "aclkg3d", GATE_IP_G3D, 9, 0, 0), + GATE(CLK_SMMU_MFCL, "smmu_mfcl", "dout_mfc_blk", GATE_IP_MFC, 1, 0, 0), + GATE(CLK_SMMU_MFCR, "smmu_mfcr", "dout_mfc_blk", GATE_IP_MFC, 2, 0, 0), - GATE(CLK_ROTATOR, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0), - GATE(CLK_JPEG, "jpeg", "aclk300_jpeg", GATE_IP_GEN, 2, 0, 0), - GATE(CLK_JPEG2, "jpeg2", "aclk300_jpeg", GATE_IP_GEN, 3, 0, 0), - GATE(CLK_MDMA1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0), - GATE(CLK_SMMU_ROTATOR, "smmu_rotator", "aclk266", GATE_IP_GEN, 6, 0, 0), - GATE(CLK_SMMU_JPEG, "smmu_jpeg", "aclk300_jpeg", GATE_IP_GEN, 7, 0, 0), - GATE(CLK_SMMU_MDMA1, "smmu_mdma1", "aclk266", GATE_IP_GEN, 9, 0, 0), - - GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0), - GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0), - GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0), - GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "aclk400_mscl", GATE_IP_MSCL, 8, 0, - 0), - GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "aclk400_mscl", GATE_IP_MSCL, 9, 0, - 0), - GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "aclk400_mscl", GATE_IP_MSCL, 10, 0, - 0), - GATE(CLK_SMMU_MIXER, "smmu_mixer", "aclk200_disp1", GATE_IP_DISP1, 9, 0, - 0), + GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0), }; -static struct samsung_pll_clock exynos5420_plls[nr_plls] __initdata = { +static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = { [apll] = PLL(pll_2550, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK, APLL_CON0, NULL), [cpll] = PLL(pll_2550, CLK_FOUT_CPLL, "fout_cpll", "fin_pll", CPLL_LOCK, @@ -776,8 +1173,11 @@ static struct of_device_id ext_clk_match[] __initdata = { }; /* register exynos5420 clocks */ -static void __init exynos5420_clk_init(struct device_node *np) +static void __init exynos5x_clk_init(struct device_node *np, + enum exynos5x_soc soc) { + struct samsung_clk_provider *ctx; + if (np) { reg_base = of_iomap(np, 0); if (!reg_base) @@ -786,23 +1186,56 @@ static void __init exynos5420_clk_init(struct device_node *np) panic("%s: unable to determine soc\n", __func__); } - samsung_clk_init(np, reg_base, CLK_NR_CLKS); - samsung_clk_of_register_fixed_ext(exynos5420_fixed_rate_ext_clks, - ARRAY_SIZE(exynos5420_fixed_rate_ext_clks), + exynos5x_soc = soc; + + ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); + if (!ctx) + panic("%s: unable to allocate context.\n", __func__); + + samsung_clk_of_register_fixed_ext(ctx, exynos5x_fixed_rate_ext_clks, + ARRAY_SIZE(exynos5x_fixed_rate_ext_clks), ext_clk_match); - samsung_clk_register_pll(exynos5420_plls, ARRAY_SIZE(exynos5420_plls), + samsung_clk_register_pll(ctx, exynos5x_plls, ARRAY_SIZE(exynos5x_plls), reg_base); - samsung_clk_register_fixed_rate(exynos5420_fixed_rate_clks, - ARRAY_SIZE(exynos5420_fixed_rate_clks)); - samsung_clk_register_fixed_factor(exynos5420_fixed_factor_clks, - ARRAY_SIZE(exynos5420_fixed_factor_clks)); - samsung_clk_register_mux(exynos5420_mux_clks, - ARRAY_SIZE(exynos5420_mux_clks)); - samsung_clk_register_div(exynos5420_div_clks, - ARRAY_SIZE(exynos5420_div_clks)); - samsung_clk_register_gate(exynos5420_gate_clks, - ARRAY_SIZE(exynos5420_gate_clks)); + samsung_clk_register_fixed_rate(ctx, exynos5x_fixed_rate_clks, + ARRAY_SIZE(exynos5x_fixed_rate_clks)); + samsung_clk_register_fixed_factor(ctx, exynos5x_fixed_factor_clks, + ARRAY_SIZE(exynos5x_fixed_factor_clks)); + samsung_clk_register_mux(ctx, exynos5x_mux_clks, + ARRAY_SIZE(exynos5x_mux_clks)); + samsung_clk_register_div(ctx, exynos5x_div_clks, + ARRAY_SIZE(exynos5x_div_clks)); + samsung_clk_register_gate(ctx, exynos5x_gate_clks, + ARRAY_SIZE(exynos5x_gate_clks)); + + if (soc == EXYNOS5420) { + samsung_clk_register_mux(ctx, exynos5420_mux_clks, + ARRAY_SIZE(exynos5420_mux_clks)); + samsung_clk_register_div(ctx, exynos5420_div_clks, + ARRAY_SIZE(exynos5420_div_clks)); + } else { + samsung_clk_register_fixed_factor( + ctx, exynos5800_fixed_factor_clks, + ARRAY_SIZE(exynos5800_fixed_factor_clks)); + samsung_clk_register_mux(ctx, exynos5800_mux_clks, + ARRAY_SIZE(exynos5800_mux_clks)); + samsung_clk_register_div(ctx, exynos5800_div_clks, + ARRAY_SIZE(exynos5800_div_clks)); + samsung_clk_register_gate(ctx, exynos5800_gate_clks, + ARRAY_SIZE(exynos5800_gate_clks)); + } exynos5420_clk_sleep_init(); } + +static void __init exynos5420_clk_init(struct device_node *np) +{ + exynos5x_clk_init(np, EXYNOS5420); +} CLK_OF_DECLARE(exynos5420_clk, "samsung,exynos5420-clock", exynos5420_clk_init); + +static void __init exynos5800_clk_init(struct device_node *np) +{ + exynos5x_clk_init(np, EXYNOS5800); +} +CLK_OF_DECLARE(exynos5800_clk, "samsung,exynos5800-clock", exynos5800_clk_init); diff --git a/drivers/clk/samsung/clk-exynos5440.c b/drivers/clk/samsung/clk-exynos5440.c index 2bfad5a993d..647f1440aa6 100644 --- a/drivers/clk/samsung/clk-exynos5440.c +++ b/drivers/clk/samsung/clk-exynos5440.c @@ -93,6 +93,7 @@ static struct of_device_id ext_clk_match[] __initdata = { static void __init exynos5440_clk_init(struct device_node *np) { void __iomem *reg_base; + struct samsung_clk_provider *ctx; reg_base = of_iomap(np, 0); if (!reg_base) { @@ -101,22 +102,25 @@ static void __init exynos5440_clk_init(struct device_node *np) return; } - samsung_clk_init(np, reg_base, CLK_NR_CLKS); - samsung_clk_of_register_fixed_ext(exynos5440_fixed_rate_ext_clks, + ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); + if (!ctx) + panic("%s: unable to allocate context.\n", __func__); + + samsung_clk_of_register_fixed_ext(ctx, exynos5440_fixed_rate_ext_clks, ARRAY_SIZE(exynos5440_fixed_rate_ext_clks), ext_clk_match); samsung_clk_register_pll2550x("cplla", "xtal", reg_base + 0x1c, 0x10); samsung_clk_register_pll2550x("cpllb", "xtal", reg_base + 0x20, 0x10); - samsung_clk_register_fixed_rate(exynos5440_fixed_rate_clks, + samsung_clk_register_fixed_rate(ctx, exynos5440_fixed_rate_clks, ARRAY_SIZE(exynos5440_fixed_rate_clks)); - samsung_clk_register_fixed_factor(exynos5440_fixed_factor_clks, + samsung_clk_register_fixed_factor(ctx, exynos5440_fixed_factor_clks, ARRAY_SIZE(exynos5440_fixed_factor_clks)); - samsung_clk_register_mux(exynos5440_mux_clks, + samsung_clk_register_mux(ctx, exynos5440_mux_clks, ARRAY_SIZE(exynos5440_mux_clks)); - samsung_clk_register_div(exynos5440_div_clks, + samsung_clk_register_div(ctx, exynos5440_div_clks, ARRAY_SIZE(exynos5440_div_clks)); - samsung_clk_register_gate(exynos5440_gate_clks, + samsung_clk_register_gate(ctx, exynos5440_gate_clks, ARRAY_SIZE(exynos5440_gate_clks)); pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("arm_clk")); diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index 81e6d2f49aa..b07fad2a916 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -11,6 +11,7 @@ #include <linux/errno.h> #include <linux/hrtimer.h> +#include <linux/delay.h> #include "clk.h" #include "clk-pll.h" @@ -59,6 +60,72 @@ static long samsung_pll_round_rate(struct clk_hw *hw, } /* + * PLL2126 Clock Type + */ + +#define PLL2126_MDIV_MASK (0xff) +#define PLL2126_PDIV_MASK (0x3f) +#define PLL2126_SDIV_MASK (0x3) +#define PLL2126_MDIV_SHIFT (16) +#define PLL2126_PDIV_SHIFT (8) +#define PLL2126_SDIV_SHIFT (0) + +static unsigned long samsung_pll2126_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 pll_con, mdiv, pdiv, sdiv; + u64 fvco = parent_rate; + + pll_con = __raw_readl(pll->con_reg); + mdiv = (pll_con >> PLL2126_MDIV_SHIFT) & PLL2126_MDIV_MASK; + pdiv = (pll_con >> PLL2126_PDIV_SHIFT) & PLL2126_PDIV_MASK; + sdiv = (pll_con >> PLL2126_SDIV_SHIFT) & PLL2126_SDIV_MASK; + + fvco *= (mdiv + 8); + do_div(fvco, (pdiv + 2) << sdiv); + + return (unsigned long)fvco; +} + +static const struct clk_ops samsung_pll2126_clk_ops = { + .recalc_rate = samsung_pll2126_recalc_rate, +}; + +/* + * PLL3000 Clock Type + */ + +#define PLL3000_MDIV_MASK (0xff) +#define PLL3000_PDIV_MASK (0x3) +#define PLL3000_SDIV_MASK (0x3) +#define PLL3000_MDIV_SHIFT (16) +#define PLL3000_PDIV_SHIFT (8) +#define PLL3000_SDIV_SHIFT (0) + +static unsigned long samsung_pll3000_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 pll_con, mdiv, pdiv, sdiv; + u64 fvco = parent_rate; + + pll_con = __raw_readl(pll->con_reg); + mdiv = (pll_con >> PLL3000_MDIV_SHIFT) & PLL3000_MDIV_MASK; + pdiv = (pll_con >> PLL3000_PDIV_SHIFT) & PLL3000_PDIV_MASK; + sdiv = (pll_con >> PLL3000_SDIV_SHIFT) & PLL3000_SDIV_MASK; + + fvco *= (2 * (mdiv + 8)); + do_div(fvco, pdiv << sdiv); + + return (unsigned long)fvco; +} + +static const struct clk_ops samsung_pll3000_clk_ops = { + .recalc_rate = samsung_pll3000_recalc_rate, +}; + +/* * PLL35xx Clock Type */ /* Maximum lock time can be 270 * PDIV cycles */ @@ -564,7 +631,9 @@ static const struct clk_ops samsung_pll46xx_clk_min_ops = { #define PLL6552_PDIV_MASK 0x3f #define PLL6552_SDIV_MASK 0x7 #define PLL6552_MDIV_SHIFT 16 +#define PLL6552_MDIV_SHIFT_2416 14 #define PLL6552_PDIV_SHIFT 8 +#define PLL6552_PDIV_SHIFT_2416 5 #define PLL6552_SDIV_SHIFT 0 static unsigned long samsung_pll6552_recalc_rate(struct clk_hw *hw, @@ -575,8 +644,13 @@ static unsigned long samsung_pll6552_recalc_rate(struct clk_hw *hw, u64 fvco = parent_rate; pll_con = __raw_readl(pll->con_reg); - mdiv = (pll_con >> PLL6552_MDIV_SHIFT) & PLL6552_MDIV_MASK; - pdiv = (pll_con >> PLL6552_PDIV_SHIFT) & PLL6552_PDIV_MASK; + if (pll->type == pll_6552_s3c2416) { + mdiv = (pll_con >> PLL6552_MDIV_SHIFT_2416) & PLL6552_MDIV_MASK; + pdiv = (pll_con >> PLL6552_PDIV_SHIFT_2416) & PLL6552_PDIV_MASK; + } else { + mdiv = (pll_con >> PLL6552_MDIV_SHIFT) & PLL6552_MDIV_MASK; + pdiv = (pll_con >> PLL6552_PDIV_SHIFT) & PLL6552_PDIV_MASK; + } sdiv = (pll_con >> PLL6552_SDIV_SHIFT) & PLL6552_SDIV_MASK; fvco *= mdiv; @@ -628,6 +702,169 @@ static const struct clk_ops samsung_pll6553_clk_ops = { }; /* + * PLL Clock Type of S3C24XX before S3C2443 + */ + +#define PLLS3C2410_MDIV_MASK (0xff) +#define PLLS3C2410_PDIV_MASK (0x1f) +#define PLLS3C2410_SDIV_MASK (0x3) +#define PLLS3C2410_MDIV_SHIFT (12) +#define PLLS3C2410_PDIV_SHIFT (4) +#define PLLS3C2410_SDIV_SHIFT (0) + +#define PLLS3C2410_ENABLE_REG_OFFSET 0x10 + +static unsigned long samsung_s3c2410_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 pll_con, mdiv, pdiv, sdiv; + u64 fvco = parent_rate; + + pll_con = __raw_readl(pll->con_reg); + mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK; + pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK; + sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK; + + fvco *= (mdiv + 8); + do_div(fvco, (pdiv + 2) << sdiv); + + return (unsigned int)fvco; +} + +static unsigned long samsung_s3c2440_mpll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 pll_con, mdiv, pdiv, sdiv; + u64 fvco = parent_rate; + + pll_con = __raw_readl(pll->con_reg); + mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK; + pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK; + sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK; + + fvco *= (2 * (mdiv + 8)); + do_div(fvco, (pdiv + 2) << sdiv); + + return (unsigned int)fvco; +} + +static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate, + unsigned long prate) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + const struct samsung_pll_rate_table *rate; + u32 tmp; + + /* Get required rate settings from table */ + rate = samsung_get_pll_settings(pll, drate); + if (!rate) { + pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, + drate, __clk_get_name(hw->clk)); + return -EINVAL; + } + + tmp = __raw_readl(pll->con_reg); + + /* Change PLL PMS values */ + tmp &= ~((PLLS3C2410_MDIV_MASK << PLLS3C2410_MDIV_SHIFT) | + (PLLS3C2410_PDIV_MASK << PLLS3C2410_PDIV_SHIFT) | + (PLLS3C2410_SDIV_MASK << PLLS3C2410_SDIV_SHIFT)); + tmp |= (rate->mdiv << PLLS3C2410_MDIV_SHIFT) | + (rate->pdiv << PLLS3C2410_PDIV_SHIFT) | + (rate->sdiv << PLLS3C2410_SDIV_SHIFT); + __raw_writel(tmp, pll->con_reg); + + /* Time to settle according to the manual */ + udelay(300); + + return 0; +} + +static int samsung_s3c2410_pll_enable(struct clk_hw *hw, int bit, bool enable) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 pll_en = __raw_readl(pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET); + u32 pll_en_orig = pll_en; + + if (enable) + pll_en &= ~BIT(bit); + else + pll_en |= BIT(bit); + + __raw_writel(pll_en, pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET); + + /* if we started the UPLL, then allow to settle */ + if (enable && (pll_en_orig & BIT(bit))) + udelay(300); + + return 0; +} + +static int samsung_s3c2410_mpll_enable(struct clk_hw *hw) +{ + return samsung_s3c2410_pll_enable(hw, 5, true); +} + +static void samsung_s3c2410_mpll_disable(struct clk_hw *hw) +{ + samsung_s3c2410_pll_enable(hw, 5, false); +} + +static int samsung_s3c2410_upll_enable(struct clk_hw *hw) +{ + return samsung_s3c2410_pll_enable(hw, 7, true); +} + +static void samsung_s3c2410_upll_disable(struct clk_hw *hw) +{ + samsung_s3c2410_pll_enable(hw, 7, false); +} + +static const struct clk_ops samsung_s3c2410_mpll_clk_min_ops = { + .recalc_rate = samsung_s3c2410_pll_recalc_rate, + .enable = samsung_s3c2410_mpll_enable, + .disable = samsung_s3c2410_mpll_disable, +}; + +static const struct clk_ops samsung_s3c2410_upll_clk_min_ops = { + .recalc_rate = samsung_s3c2410_pll_recalc_rate, + .enable = samsung_s3c2410_upll_enable, + .disable = samsung_s3c2410_upll_disable, +}; + +static const struct clk_ops samsung_s3c2440_mpll_clk_min_ops = { + .recalc_rate = samsung_s3c2440_mpll_recalc_rate, + .enable = samsung_s3c2410_mpll_enable, + .disable = samsung_s3c2410_mpll_disable, +}; + +static const struct clk_ops samsung_s3c2410_mpll_clk_ops = { + .recalc_rate = samsung_s3c2410_pll_recalc_rate, + .enable = samsung_s3c2410_mpll_enable, + .disable = samsung_s3c2410_mpll_disable, + .round_rate = samsung_pll_round_rate, + .set_rate = samsung_s3c2410_pll_set_rate, +}; + +static const struct clk_ops samsung_s3c2410_upll_clk_ops = { + .recalc_rate = samsung_s3c2410_pll_recalc_rate, + .enable = samsung_s3c2410_upll_enable, + .disable = samsung_s3c2410_upll_disable, + .round_rate = samsung_pll_round_rate, + .set_rate = samsung_s3c2410_pll_set_rate, +}; + +static const struct clk_ops samsung_s3c2440_mpll_clk_ops = { + .recalc_rate = samsung_s3c2440_mpll_recalc_rate, + .enable = samsung_s3c2410_mpll_enable, + .disable = samsung_s3c2410_mpll_disable, + .round_rate = samsung_pll_round_rate, + .set_rate = samsung_s3c2410_pll_set_rate, +}; + +/* * PLL2550x Clock Type */ @@ -710,8 +947,206 @@ struct clk * __init samsung_clk_register_pll2550x(const char *name, return clk; } -static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk, - void __iomem *base) +/* + * PLL2550xx Clock Type + */ + +/* Maximum lock time can be 270 * PDIV cycles */ +#define PLL2550XX_LOCK_FACTOR 270 + +#define PLL2550XX_M_MASK 0x3FF +#define PLL2550XX_P_MASK 0x3F +#define PLL2550XX_S_MASK 0x7 +#define PLL2550XX_LOCK_STAT_MASK 0x1 +#define PLL2550XX_M_SHIFT 9 +#define PLL2550XX_P_SHIFT 3 +#define PLL2550XX_S_SHIFT 0 +#define PLL2550XX_LOCK_STAT_SHIFT 21 + +static unsigned long samsung_pll2550xx_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 mdiv, pdiv, sdiv, pll_con; + u64 fvco = parent_rate; + + pll_con = __raw_readl(pll->con_reg); + mdiv = (pll_con >> PLL2550XX_M_SHIFT) & PLL2550XX_M_MASK; + pdiv = (pll_con >> PLL2550XX_P_SHIFT) & PLL2550XX_P_MASK; + sdiv = (pll_con >> PLL2550XX_S_SHIFT) & PLL2550XX_S_MASK; + + fvco *= mdiv; + do_div(fvco, (pdiv << sdiv)); + + return (unsigned long)fvco; +} + +static inline bool samsung_pll2550xx_mp_change(u32 mdiv, u32 pdiv, u32 pll_con) +{ + u32 old_mdiv, old_pdiv; + + old_mdiv = (pll_con >> PLL2550XX_M_SHIFT) & PLL2550XX_M_MASK; + old_pdiv = (pll_con >> PLL2550XX_P_SHIFT) & PLL2550XX_P_MASK; + + return mdiv != old_mdiv || pdiv != old_pdiv; +} + +static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate, + unsigned long prate) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + const struct samsung_pll_rate_table *rate; + u32 tmp; + + /* Get required rate settings from table */ + rate = samsung_get_pll_settings(pll, drate); + if (!rate) { + pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, + drate, __clk_get_name(hw->clk)); + return -EINVAL; + } + + tmp = __raw_readl(pll->con_reg); + + if (!(samsung_pll2550xx_mp_change(rate->mdiv, rate->pdiv, tmp))) { + /* If only s change, change just s value only*/ + tmp &= ~(PLL2550XX_S_MASK << PLL2550XX_S_SHIFT); + tmp |= rate->sdiv << PLL2550XX_S_SHIFT; + __raw_writel(tmp, pll->con_reg); + + return 0; + } + + /* Set PLL lock time. */ + __raw_writel(rate->pdiv * PLL2550XX_LOCK_FACTOR, pll->lock_reg); + + /* Change PLL PMS values */ + tmp &= ~((PLL2550XX_M_MASK << PLL2550XX_M_SHIFT) | + (PLL2550XX_P_MASK << PLL2550XX_P_SHIFT) | + (PLL2550XX_S_MASK << PLL2550XX_S_SHIFT)); + tmp |= (rate->mdiv << PLL2550XX_M_SHIFT) | + (rate->pdiv << PLL2550XX_P_SHIFT) | + (rate->sdiv << PLL2550XX_S_SHIFT); + __raw_writel(tmp, pll->con_reg); + + /* wait_lock_time */ + do { + cpu_relax(); + tmp = __raw_readl(pll->con_reg); + } while (!(tmp & (PLL2550XX_LOCK_STAT_MASK + << PLL2550XX_LOCK_STAT_SHIFT))); + + return 0; +} + +static const struct clk_ops samsung_pll2550xx_clk_ops = { + .recalc_rate = samsung_pll2550xx_recalc_rate, + .round_rate = samsung_pll_round_rate, + .set_rate = samsung_pll2550xx_set_rate, +}; + +static const struct clk_ops samsung_pll2550xx_clk_min_ops = { + .recalc_rate = samsung_pll2550xx_recalc_rate, +}; + +/* + * PLL2650XX Clock Type + */ + +/* Maximum lock time can be 3000 * PDIV cycles */ +#define PLL2650XX_LOCK_FACTOR 3000 + +#define PLL2650XX_MDIV_SHIFT 9 +#define PLL2650XX_PDIV_SHIFT 3 +#define PLL2650XX_SDIV_SHIFT 0 +#define PLL2650XX_KDIV_SHIFT 0 +#define PLL2650XX_MDIV_MASK 0x1ff +#define PLL2650XX_PDIV_MASK 0x3f +#define PLL2650XX_SDIV_MASK 0x7 +#define PLL2650XX_KDIV_MASK 0xffff +#define PLL2650XX_PLL_ENABLE_SHIFT 23 +#define PLL2650XX_PLL_LOCKTIME_SHIFT 21 +#define PLL2650XX_PLL_FOUTMASK_SHIFT 31 + +static unsigned long samsung_pll2650xx_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 mdiv, pdiv, sdiv, pll_con0, pll_con2; + s16 kdiv; + u64 fvco = parent_rate; + + pll_con0 = __raw_readl(pll->con_reg); + pll_con2 = __raw_readl(pll->con_reg + 8); + mdiv = (pll_con0 >> PLL2650XX_MDIV_SHIFT) & PLL2650XX_MDIV_MASK; + pdiv = (pll_con0 >> PLL2650XX_PDIV_SHIFT) & PLL2650XX_PDIV_MASK; + sdiv = (pll_con0 >> PLL2650XX_SDIV_SHIFT) & PLL2650XX_SDIV_MASK; + kdiv = (s16)(pll_con2 & PLL2650XX_KDIV_MASK); + + fvco *= (mdiv << 16) + kdiv; + do_div(fvco, (pdiv << sdiv)); + fvco >>= 16; + + return (unsigned long)fvco; +} + +static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate, + unsigned long parent_rate) +{ + struct samsung_clk_pll *pll = to_clk_pll(hw); + u32 tmp, pll_con0, pll_con2; + const struct samsung_pll_rate_table *rate; + + rate = samsung_get_pll_settings(pll, drate); + if (!rate) { + pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__, + drate, __clk_get_name(hw->clk)); + return -EINVAL; + } + + pll_con0 = __raw_readl(pll->con_reg); + pll_con2 = __raw_readl(pll->con_reg + 8); + + /* Change PLL PMS values */ + pll_con0 &= ~(PLL2650XX_MDIV_MASK << PLL2650XX_MDIV_SHIFT | + PLL2650XX_PDIV_MASK << PLL2650XX_PDIV_SHIFT | + PLL2650XX_SDIV_MASK << PLL2650XX_SDIV_SHIFT); + pll_con0 |= rate->mdiv << PLL2650XX_MDIV_SHIFT; + pll_con0 |= rate->pdiv << PLL2650XX_PDIV_SHIFT; + pll_con0 |= rate->sdiv << PLL2650XX_SDIV_SHIFT; + pll_con0 |= 1 << PLL2650XX_PLL_ENABLE_SHIFT; + pll_con0 |= 1 << PLL2650XX_PLL_FOUTMASK_SHIFT; + + pll_con2 &= ~(PLL2650XX_KDIV_MASK << PLL2650XX_KDIV_SHIFT); + pll_con2 |= ((~(rate->kdiv) + 1) & PLL2650XX_KDIV_MASK) + << PLL2650XX_KDIV_SHIFT; + + /* Set PLL lock time. */ + __raw_writel(PLL2650XX_LOCK_FACTOR * rate->pdiv, pll->lock_reg); + + __raw_writel(pll_con0, pll->con_reg); + __raw_writel(pll_con2, pll->con_reg + 8); + + do { + tmp = __raw_readl(pll->con_reg); + } while (!(tmp & (0x1 << PLL2650XX_PLL_LOCKTIME_SHIFT))); + + return 0; +} + +static const struct clk_ops samsung_pll2650xx_clk_ops = { + .recalc_rate = samsung_pll2650xx_recalc_rate, + .set_rate = samsung_pll2650xx_set_rate, + .round_rate = samsung_pll_round_rate, +}; + +static const struct clk_ops samsung_pll2650xx_clk_min_ops = { + .recalc_rate = samsung_pll2650xx_recalc_rate, +}; + +static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, + struct samsung_pll_clock *pll_clk, + void __iomem *base) { struct samsung_clk_pll *pll; struct clk *clk; @@ -746,6 +1181,12 @@ static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk, } switch (pll_clk->type) { + case pll_2126: + init.ops = &samsung_pll2126_clk_ops; + break; + case pll_3000: + init.ops = &samsung_pll3000_clk_ops; + break; /* clk_ops for 35xx and 2550 are similar */ case pll_35xx: case pll_2550: @@ -773,6 +1214,7 @@ static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk, init.ops = &samsung_pll36xx_clk_ops; break; case pll_6552: + case pll_6552_s3c2416: init.ops = &samsung_pll6552_clk_ops; break; case pll_6553: @@ -786,6 +1228,36 @@ static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk, else init.ops = &samsung_pll46xx_clk_ops; break; + case pll_s3c2410_mpll: + if (!pll->rate_table) + init.ops = &samsung_s3c2410_mpll_clk_min_ops; + else + init.ops = &samsung_s3c2410_mpll_clk_ops; + break; + case pll_s3c2410_upll: + if (!pll->rate_table) + init.ops = &samsung_s3c2410_upll_clk_min_ops; + else + init.ops = &samsung_s3c2410_upll_clk_ops; + break; + case pll_s3c2440_mpll: + if (!pll->rate_table) + init.ops = &samsung_s3c2440_mpll_clk_min_ops; + else + init.ops = &samsung_s3c2440_mpll_clk_ops; + break; + case pll_2550xx: + if (!pll->rate_table) + init.ops = &samsung_pll2550xx_clk_min_ops; + else + init.ops = &samsung_pll2550xx_clk_ops; + break; + case pll_2650xx: + if (!pll->rate_table) + init.ops = &samsung_pll2650xx_clk_min_ops; + else + init.ops = &samsung_pll2650xx_clk_ops; + break; default: pr_warn("%s: Unknown pll type for pll clk %s\n", __func__, pll_clk->name); @@ -804,7 +1276,7 @@ static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk, return; } - samsung_clk_add_lookup(clk, pll_clk->id); + samsung_clk_add_lookup(ctx, clk, pll_clk->id); if (!pll_clk->alias) return; @@ -815,11 +1287,12 @@ static void __init _samsung_clk_register_pll(struct samsung_pll_clock *pll_clk, __func__, pll_clk->name, ret); } -void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list, - unsigned int nr_pll, void __iomem *base) +void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx, + struct samsung_pll_clock *pll_list, + unsigned int nr_pll, void __iomem *base) { int cnt; for (cnt = 0; cnt < nr_pll; cnt++) - _samsung_clk_register_pll(&pll_list[cnt], base); + _samsung_clk_register_pll(ctx, &pll_list[cnt], base); } diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h index 6c39030080f..c0ed4d41fd9 100644 --- a/drivers/clk/samsung/clk-pll.h +++ b/drivers/clk/samsung/clk-pll.h @@ -13,6 +13,8 @@ #define __SAMSUNG_CLK_PLL_H enum samsung_pll_type { + pll_2126, + pll_3000, pll_35xx, pll_36xx, pll_2550, @@ -24,7 +26,13 @@ enum samsung_pll_type { pll_4650, pll_4650c, pll_6552, + pll_6552_s3c2416, pll_6553, + pll_s3c2410_mpll, + pll_s3c2410_upll, + pll_s3c2440_mpll, + pll_2550xx, + pll_2650xx, }; #define PLL_35XX_RATE(_rate, _m, _p, _s) \ diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c new file mode 100644 index 00000000000..0449cc0458e --- /dev/null +++ b/drivers/clk/samsung/clk-s3c2410-dclk.c @@ -0,0 +1,440 @@ +/* + * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for s3c24xx external clock output. + */ + +#include <linux/platform_device.h> +#include <linux/module.h> +#include "clk.h" + +/* legacy access to misccr, until dt conversion is finished */ +#include <mach/hardware.h> +#include <mach/regs-gpio.h> + +#define MUX_DCLK0 0 +#define MUX_DCLK1 1 +#define DIV_DCLK0 2 +#define DIV_DCLK1 3 +#define GATE_DCLK0 4 +#define GATE_DCLK1 5 +#define MUX_CLKOUT0 6 +#define MUX_CLKOUT1 7 +#define DCLK_MAX_CLKS (MUX_CLKOUT1 + 1) + +enum supported_socs { + S3C2410, + S3C2412, + S3C2440, + S3C2443, +}; + +struct s3c24xx_dclk_drv_data { + const char **clkout0_parent_names; + int clkout0_num_parents; + const char **clkout1_parent_names; + int clkout1_num_parents; + const char **mux_parent_names; + int mux_num_parents; +}; + +/* + * Clock for output-parent selection in misccr + */ + +struct s3c24xx_clkout { + struct clk_hw hw; + u32 mask; + u8 shift; +}; + +#define to_s3c24xx_clkout(_hw) container_of(_hw, struct s3c24xx_clkout, hw) + +static u8 s3c24xx_clkout_get_parent(struct clk_hw *hw) +{ + struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw); + int num_parents = __clk_get_num_parents(hw->clk); + u32 val; + + val = readl_relaxed(S3C24XX_MISCCR) >> clkout->shift; + val >>= clkout->shift; + val &= clkout->mask; + + if (val >= num_parents) + return -EINVAL; + + return val; +} + +static int s3c24xx_clkout_set_parent(struct clk_hw *hw, u8 index) +{ + struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw); + int ret = 0; + + s3c2410_modify_misccr((clkout->mask << clkout->shift), + (index << clkout->shift)); + + return ret; +} + +const struct clk_ops s3c24xx_clkout_ops = { + .get_parent = s3c24xx_clkout_get_parent, + .set_parent = s3c24xx_clkout_set_parent, + .determine_rate = __clk_mux_determine_rate, +}; + +struct clk *s3c24xx_register_clkout(struct device *dev, const char *name, + const char **parent_names, u8 num_parents, + u8 shift, u32 mask) +{ + struct s3c24xx_clkout *clkout; + struct clk *clk; + struct clk_init_data init; + + /* allocate the clkout */ + clkout = kzalloc(sizeof(*clkout), GFP_KERNEL); + if (!clkout) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &s3c24xx_clkout_ops; + init.flags = CLK_IS_BASIC; + init.parent_names = parent_names; + init.num_parents = num_parents; + + clkout->shift = shift; + clkout->mask = mask; + clkout->hw.init = &init; + + clk = clk_register(dev, &clkout->hw); + + return clk; +} + +/* + * dclk and clkout init + */ + +struct s3c24xx_dclk { + struct device *dev; + void __iomem *base; + struct clk_onecell_data clk_data; + struct notifier_block dclk0_div_change_nb; + struct notifier_block dclk1_div_change_nb; + spinlock_t dclk_lock; + unsigned long reg_save; +}; + +#define to_s3c24xx_dclk0(x) \ + container_of(x, struct s3c24xx_dclk, dclk0_div_change_nb) + +#define to_s3c24xx_dclk1(x) \ + container_of(x, struct s3c24xx_dclk, dclk1_div_change_nb) + +static const char *dclk_s3c2410_p[] = { "pclk", "uclk" }; +static const char *clkout0_s3c2410_p[] = { "mpll", "upll", "fclk", "hclk", "pclk", + "gate_dclk0" }; +static const char *clkout1_s3c2410_p[] = { "mpll", "upll", "fclk", "hclk", "pclk", + "gate_dclk1" }; + +static const char *clkout0_s3c2412_p[] = { "mpll", "upll", "rtc_clkout", + "hclk", "pclk", "gate_dclk0" }; +static const char *clkout1_s3c2412_p[] = { "xti", "upll", "fclk", "hclk", "pclk", + "gate_dclk1" }; + +static const char *clkout0_s3c2440_p[] = { "xti", "upll", "fclk", "hclk", "pclk", + "gate_dclk0" }; +static const char *clkout1_s3c2440_p[] = { "mpll", "upll", "rtc_clkout", + "hclk", "pclk", "gate_dclk1" }; + +static const char *dclk_s3c2443_p[] = { "pclk", "epll" }; +static const char *clkout0_s3c2443_p[] = { "xti", "epll", "armclk", "hclk", "pclk", + "gate_dclk0" }; +static const char *clkout1_s3c2443_p[] = { "dummy", "epll", "rtc_clkout", + "hclk", "pclk", "gate_dclk1" }; + +#define DCLKCON_DCLK_DIV_MASK 0xf +#define DCLKCON_DCLK0_DIV_SHIFT 4 +#define DCLKCON_DCLK0_CMP_SHIFT 8 +#define DCLKCON_DCLK1_DIV_SHIFT 20 +#define DCLKCON_DCLK1_CMP_SHIFT 24 + +static void s3c24xx_dclk_update_cmp(struct s3c24xx_dclk *s3c24xx_dclk, + int div_shift, int cmp_shift) +{ + unsigned long flags = 0; + u32 dclk_con, div, cmp; + + spin_lock_irqsave(&s3c24xx_dclk->dclk_lock, flags); + + dclk_con = readl_relaxed(s3c24xx_dclk->base); + + div = ((dclk_con >> div_shift) & DCLKCON_DCLK_DIV_MASK) + 1; + cmp = ((div + 1) / 2) - 1; + + dclk_con &= ~(DCLKCON_DCLK_DIV_MASK << cmp_shift); + dclk_con |= (cmp << cmp_shift); + + writel_relaxed(dclk_con, s3c24xx_dclk->base); + + spin_unlock_irqrestore(&s3c24xx_dclk->dclk_lock, flags); +} + +static int s3c24xx_dclk0_div_notify(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct s3c24xx_dclk *s3c24xx_dclk = to_s3c24xx_dclk0(nb); + + if (event == POST_RATE_CHANGE) { + s3c24xx_dclk_update_cmp(s3c24xx_dclk, + DCLKCON_DCLK0_DIV_SHIFT, DCLKCON_DCLK0_CMP_SHIFT); + } + + return NOTIFY_DONE; +} + +static int s3c24xx_dclk1_div_notify(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct s3c24xx_dclk *s3c24xx_dclk = to_s3c24xx_dclk1(nb); + + if (event == POST_RATE_CHANGE) { + s3c24xx_dclk_update_cmp(s3c24xx_dclk, + DCLKCON_DCLK1_DIV_SHIFT, DCLKCON_DCLK1_CMP_SHIFT); + } + + return NOTIFY_DONE; +} + +#ifdef CONFIG_PM_SLEEP +static int s3c24xx_dclk_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev); + + s3c24xx_dclk->reg_save = readl_relaxed(s3c24xx_dclk->base); + return 0; +} + +static int s3c24xx_dclk_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev); + + writel_relaxed(s3c24xx_dclk->reg_save, s3c24xx_dclk->base); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(s3c24xx_dclk_pm_ops, + s3c24xx_dclk_suspend, s3c24xx_dclk_resume); + +static int s3c24xx_dclk_probe(struct platform_device *pdev) +{ + struct s3c24xx_dclk *s3c24xx_dclk; + struct resource *mem; + struct clk **clk_table; + struct s3c24xx_dclk_drv_data *dclk_variant; + int ret, i; + + s3c24xx_dclk = devm_kzalloc(&pdev->dev, sizeof(*s3c24xx_dclk), + GFP_KERNEL); + if (!s3c24xx_dclk) + return -ENOMEM; + + s3c24xx_dclk->dev = &pdev->dev; + platform_set_drvdata(pdev, s3c24xx_dclk); + spin_lock_init(&s3c24xx_dclk->dclk_lock); + + clk_table = devm_kzalloc(&pdev->dev, + sizeof(struct clk *) * DCLK_MAX_CLKS, + GFP_KERNEL); + if (!clk_table) + return -ENOMEM; + + s3c24xx_dclk->clk_data.clks = clk_table; + s3c24xx_dclk->clk_data.clk_num = DCLK_MAX_CLKS; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + s3c24xx_dclk->base = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(s3c24xx_dclk->base)) + return PTR_ERR(s3c24xx_dclk->base); + + dclk_variant = (struct s3c24xx_dclk_drv_data *) + platform_get_device_id(pdev)->driver_data; + + + clk_table[MUX_DCLK0] = clk_register_mux(&pdev->dev, "mux_dclk0", + dclk_variant->mux_parent_names, + dclk_variant->mux_num_parents, 0, + s3c24xx_dclk->base, 1, 1, 0, + &s3c24xx_dclk->dclk_lock); + clk_table[MUX_DCLK1] = clk_register_mux(&pdev->dev, "mux_dclk1", + dclk_variant->mux_parent_names, + dclk_variant->mux_num_parents, 0, + s3c24xx_dclk->base, 17, 1, 0, + &s3c24xx_dclk->dclk_lock); + + clk_table[DIV_DCLK0] = clk_register_divider(&pdev->dev, "div_dclk0", + "mux_dclk0", 0, s3c24xx_dclk->base, + 4, 4, 0, &s3c24xx_dclk->dclk_lock); + clk_table[DIV_DCLK1] = clk_register_divider(&pdev->dev, "div_dclk1", + "mux_dclk1", 0, s3c24xx_dclk->base, + 20, 4, 0, &s3c24xx_dclk->dclk_lock); + + clk_table[GATE_DCLK0] = clk_register_gate(&pdev->dev, "gate_dclk0", + "div_dclk0", CLK_SET_RATE_PARENT, + s3c24xx_dclk->base, 0, 0, + &s3c24xx_dclk->dclk_lock); + clk_table[GATE_DCLK1] = clk_register_gate(&pdev->dev, "gate_dclk1", + "div_dclk1", CLK_SET_RATE_PARENT, + s3c24xx_dclk->base, 16, 0, + &s3c24xx_dclk->dclk_lock); + + clk_table[MUX_CLKOUT0] = s3c24xx_register_clkout(&pdev->dev, + "clkout0", dclk_variant->clkout0_parent_names, + dclk_variant->clkout0_num_parents, 4, 7); + clk_table[MUX_CLKOUT1] = s3c24xx_register_clkout(&pdev->dev, + "clkout1", dclk_variant->clkout1_parent_names, + dclk_variant->clkout1_num_parents, 8, 7); + + for (i = 0; i < DCLK_MAX_CLKS; i++) + if (IS_ERR(clk_table[i])) { + dev_err(&pdev->dev, "clock %d failed to register\n", i); + ret = PTR_ERR(clk_table[i]); + goto err_clk_register; + } + + ret = clk_register_clkdev(clk_table[MUX_DCLK0], "dclk0", NULL); + if (!ret) + ret = clk_register_clkdev(clk_table[MUX_DCLK1], "dclk1", NULL); + if (!ret) + ret = clk_register_clkdev(clk_table[MUX_CLKOUT0], + "clkout0", NULL); + if (!ret) + ret = clk_register_clkdev(clk_table[MUX_CLKOUT1], + "clkout1", NULL); + if (ret) { + dev_err(&pdev->dev, "failed to register aliases, %d\n", ret); + goto err_clk_register; + } + + s3c24xx_dclk->dclk0_div_change_nb.notifier_call = + s3c24xx_dclk0_div_notify; + + s3c24xx_dclk->dclk1_div_change_nb.notifier_call = + s3c24xx_dclk1_div_notify; + + ret = clk_notifier_register(clk_table[DIV_DCLK0], + &s3c24xx_dclk->dclk0_div_change_nb); + if (ret) + goto err_clk_register; + + ret = clk_notifier_register(clk_table[DIV_DCLK1], + &s3c24xx_dclk->dclk1_div_change_nb); + if (ret) + goto err_dclk_notify; + + return 0; + +err_dclk_notify: + clk_notifier_unregister(clk_table[DIV_DCLK0], + &s3c24xx_dclk->dclk0_div_change_nb); +err_clk_register: + for (i = 0; i < DCLK_MAX_CLKS; i++) + if (clk_table[i] && !IS_ERR(clk_table[i])) + clk_unregister(clk_table[i]); + + return ret; +} + +static int s3c24xx_dclk_remove(struct platform_device *pdev) +{ + struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev); + struct clk **clk_table = s3c24xx_dclk->clk_data.clks; + int i; + + clk_notifier_unregister(clk_table[DIV_DCLK1], + &s3c24xx_dclk->dclk1_div_change_nb); + clk_notifier_unregister(clk_table[DIV_DCLK0], + &s3c24xx_dclk->dclk0_div_change_nb); + + for (i = 0; i < DCLK_MAX_CLKS; i++) + clk_unregister(clk_table[i]); + + return 0; +} + +static struct s3c24xx_dclk_drv_data dclk_variants[] = { + [S3C2410] = { + .clkout0_parent_names = clkout0_s3c2410_p, + .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2410_p), + .clkout1_parent_names = clkout1_s3c2410_p, + .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2410_p), + .mux_parent_names = dclk_s3c2410_p, + .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p), + }, + [S3C2412] = { + .clkout0_parent_names = clkout0_s3c2412_p, + .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2412_p), + .clkout1_parent_names = clkout1_s3c2412_p, + .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2412_p), + .mux_parent_names = dclk_s3c2410_p, + .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p), + }, + [S3C2440] = { + .clkout0_parent_names = clkout0_s3c2440_p, + .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2440_p), + .clkout1_parent_names = clkout1_s3c2440_p, + .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2440_p), + .mux_parent_names = dclk_s3c2410_p, + .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p), + }, + [S3C2443] = { + .clkout0_parent_names = clkout0_s3c2443_p, + .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2443_p), + .clkout1_parent_names = clkout1_s3c2443_p, + .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2443_p), + .mux_parent_names = dclk_s3c2443_p, + .mux_num_parents = ARRAY_SIZE(dclk_s3c2443_p), + }, +}; + +static struct platform_device_id s3c24xx_dclk_driver_ids[] = { + { + .name = "s3c2410-dclk", + .driver_data = (kernel_ulong_t)&dclk_variants[S3C2410], + }, { + .name = "s3c2412-dclk", + .driver_data = (kernel_ulong_t)&dclk_variants[S3C2412], + }, { + .name = "s3c2440-dclk", + .driver_data = (kernel_ulong_t)&dclk_variants[S3C2440], + }, { + .name = "s3c2443-dclk", + .driver_data = (kernel_ulong_t)&dclk_variants[S3C2443], + }, + { } +}; + +MODULE_DEVICE_TABLE(platform, s3c24xx_dclk_driver_ids); + +static struct platform_driver s3c24xx_dclk_driver = { + .driver = { + .name = "s3c24xx-dclk", + .owner = THIS_MODULE, + .pm = &s3c24xx_dclk_pm_ops, + }, + .probe = s3c24xx_dclk_probe, + .remove = s3c24xx_dclk_remove, + .id_table = s3c24xx_dclk_driver_ids, +}; +module_platform_driver(s3c24xx_dclk_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); +MODULE_DESCRIPTION("Driver for the S3C24XX external clock outputs"); diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c new file mode 100644 index 00000000000..ba0716801db --- /dev/null +++ b/drivers/clk/samsung/clk-s3c2410.c @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for S3C2410 and following SoCs. + */ + +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/syscore_ops.h> + +#include <dt-bindings/clock/s3c2410.h> + +#include "clk.h" +#include "clk-pll.h" + +#define LOCKTIME 0x00 +#define MPLLCON 0x04 +#define UPLLCON 0x08 +#define CLKCON 0x0c +#define CLKSLOW 0x10 +#define CLKDIVN 0x14 +#define CAMDIVN 0x18 + +/* the soc types */ +enum supported_socs { + S3C2410, + S3C2440, + S3C2442, +}; + +/* list of PLLs to be registered */ +enum s3c2410_plls { + mpll, upll, +}; + +static void __iomem *reg_base; + +#ifdef CONFIG_PM_SLEEP +static struct samsung_clk_reg_dump *s3c2410_save; + +/* + * list of controller registers to be saved and restored during a + * suspend/resume cycle. + */ +static unsigned long s3c2410_clk_regs[] __initdata = { + LOCKTIME, + MPLLCON, + UPLLCON, + CLKCON, + CLKSLOW, + CLKDIVN, + CAMDIVN, +}; + +static int s3c2410_clk_suspend(void) +{ + samsung_clk_save(reg_base, s3c2410_save, + ARRAY_SIZE(s3c2410_clk_regs)); + + return 0; +} + +static void s3c2410_clk_resume(void) +{ + samsung_clk_restore(reg_base, s3c2410_save, + ARRAY_SIZE(s3c2410_clk_regs)); +} + +static struct syscore_ops s3c2410_clk_syscore_ops = { + .suspend = s3c2410_clk_suspend, + .resume = s3c2410_clk_resume, +}; + +static void s3c2410_clk_sleep_init(void) +{ + s3c2410_save = samsung_clk_alloc_reg_dump(s3c2410_clk_regs, + ARRAY_SIZE(s3c2410_clk_regs)); + if (!s3c2410_save) { + pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", + __func__); + return; + } + + register_syscore_ops(&s3c2410_clk_syscore_ops); + return; +} +#else +static void s3c2410_clk_sleep_init(void) {} +#endif + +PNAME(fclk_p) = { "mpll", "div_slow" }; + +struct samsung_mux_clock s3c2410_common_muxes[] __initdata = { + MUX(FCLK, "fclk", fclk_p, CLKSLOW, 4, 1), +}; + +static struct clk_div_table divslow_d[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 6 }, + { .val = 4, .div = 8 }, + { .val = 5, .div = 10 }, + { .val = 6, .div = 12 }, + { .val = 7, .div = 14 }, + { /* sentinel */ }, +}; + +struct samsung_div_clock s3c2410_common_dividers[] __initdata = { + DIV_T(0, "div_slow", "xti", CLKSLOW, 0, 3, divslow_d), + DIV(PCLK, "pclk", "hclk", CLKDIVN, 0, 1), +}; + +struct samsung_gate_clock s3c2410_common_gates[] __initdata = { + GATE(PCLK_SPI, "spi", "pclk", CLKCON, 18, 0, 0), + GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 17, 0, 0), + GATE(PCLK_I2C, "i2c", "pclk", CLKCON, 16, 0, 0), + GATE(PCLK_ADC, "adc", "pclk", CLKCON, 15, 0, 0), + GATE(PCLK_RTC, "rtc", "pclk", CLKCON, 14, 0, 0), + GATE(PCLK_GPIO, "gpio", "pclk", CLKCON, 13, CLK_IGNORE_UNUSED, 0), + GATE(PCLK_UART2, "uart2", "pclk", CLKCON, 12, 0, 0), + GATE(PCLK_UART1, "uart1", "pclk", CLKCON, 11, 0, 0), + GATE(PCLK_UART0, "uart0", "pclk", CLKCON, 10, 0, 0), + GATE(PCLK_SDI, "sdi", "pclk", CLKCON, 9, 0, 0), + GATE(PCLK_PWM, "pwm", "pclk", CLKCON, 8, 0, 0), + GATE(HCLK_USBD, "usb-device", "hclk", CLKCON, 7, 0, 0), + GATE(HCLK_USBH, "usb-host", "hclk", CLKCON, 6, 0, 0), + GATE(HCLK_LCD, "lcd", "hclk", CLKCON, 5, 0, 0), + GATE(HCLK_NAND, "nand", "hclk", CLKCON, 4, 0, 0), +}; + +/* should be added _after_ the soc-specific clocks are created */ +struct samsung_clock_alias s3c2410_common_aliases[] __initdata = { + ALIAS(PCLK_I2C, "s3c2410-i2c.0", "i2c"), + ALIAS(PCLK_ADC, NULL, "adc"), + ALIAS(PCLK_RTC, NULL, "rtc"), + ALIAS(PCLK_PWM, NULL, "timers"), + ALIAS(HCLK_LCD, NULL, "lcd"), + ALIAS(HCLK_USBD, NULL, "usb-device"), + ALIAS(HCLK_USBH, NULL, "usb-host"), + ALIAS(UCLK, NULL, "usb-bus-host"), + ALIAS(UCLK, NULL, "usb-bus-gadget"), + ALIAS(ARMCLK, NULL, "armclk"), + ALIAS(UCLK, NULL, "uclk"), + ALIAS(HCLK, NULL, "hclk"), + ALIAS(MPLL, NULL, "mpll"), + ALIAS(FCLK, NULL, "fclk"), +}; + +/* S3C2410 specific clocks */ + +static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = { + /* sorted in descending order */ + /* 2410A extras */ + PLL_35XX_RATE(270000000, 127, 1, 1), + PLL_35XX_RATE(268000000, 126, 1, 1), + PLL_35XX_RATE(266000000, 125, 1, 1), + PLL_35XX_RATE(226000000, 105, 1, 1), + PLL_35XX_RATE(210000000, 132, 2, 1), + /* 2410 common */ + PLL_35XX_RATE(203000000, 161, 3, 1), + PLL_35XX_RATE(192000000, 88, 1, 1), + PLL_35XX_RATE(186000000, 85, 1, 1), + PLL_35XX_RATE(180000000, 82, 1, 1), + PLL_35XX_RATE(170000000, 77, 1, 1), + PLL_35XX_RATE(158000000, 71, 1, 1), + PLL_35XX_RATE(152000000, 68, 1, 1), + PLL_35XX_RATE(147000000, 90, 2, 1), + PLL_35XX_RATE(135000000, 82, 2, 1), + PLL_35XX_RATE(124000000, 116, 1, 2), + PLL_35XX_RATE(118000000, 150, 2, 2), + PLL_35XX_RATE(113000000, 105, 1, 2), + PLL_35XX_RATE(101000000, 127, 2, 2), + PLL_35XX_RATE(90000000, 112, 2, 2), + PLL_35XX_RATE(85000000, 105, 2, 2), + PLL_35XX_RATE(79000000, 71, 1, 2), + PLL_35XX_RATE(68000000, 82, 2, 2), + PLL_35XX_RATE(56000000, 142, 2, 3), + PLL_35XX_RATE(48000000, 120, 2, 3), + PLL_35XX_RATE(51000000, 161, 3, 3), + PLL_35XX_RATE(45000000, 82, 1, 3), + PLL_35XX_RATE(34000000, 82, 2, 3), + { /* sentinel */ }, +}; + +static struct samsung_pll_clock s3c2410_plls[] __initdata = { + [mpll] = PLL(pll_s3c2410_mpll, MPLL, "mpll", "xti", + LOCKTIME, MPLLCON, NULL), + [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "xti", + LOCKTIME, UPLLCON, NULL), +}; + +struct samsung_div_clock s3c2410_dividers[] __initdata = { + DIV(HCLK, "hclk", "mpll", CLKDIVN, 1, 1), +}; + +struct samsung_fixed_factor_clock s3c2410_ffactor[] __initdata = { + /* + * armclk is directly supplied by the fclk, without + * switching possibility like on the s3c244x below. + */ + FFACTOR(ARMCLK, "armclk", "fclk", 1, 1, 0), + + /* uclk is fed from the unmodified upll */ + FFACTOR(UCLK, "uclk", "upll", 1, 1, 0), +}; + +struct samsung_clock_alias s3c2410_aliases[] __initdata = { + ALIAS(PCLK_UART0, "s3c2410-uart.0", "uart"), + ALIAS(PCLK_UART1, "s3c2410-uart.1", "uart"), + ALIAS(PCLK_UART2, "s3c2410-uart.2", "uart"), + ALIAS(PCLK_UART0, "s3c2410-uart.0", "clk_uart_baud0"), + ALIAS(PCLK_UART1, "s3c2410-uart.1", "clk_uart_baud0"), + ALIAS(PCLK_UART2, "s3c2410-uart.2", "clk_uart_baud0"), + ALIAS(UCLK, NULL, "clk_uart_baud1"), +}; + +/* S3C244x specific clocks */ + +static struct samsung_pll_rate_table pll_s3c244x_12mhz_tbl[] __initdata = { + /* sorted in descending order */ + PLL_35XX_RATE(400000000, 0x5c, 1, 1), + PLL_35XX_RATE(390000000, 0x7a, 2, 1), + PLL_35XX_RATE(380000000, 0x57, 1, 1), + PLL_35XX_RATE(370000000, 0xb1, 4, 1), + PLL_35XX_RATE(360000000, 0x70, 2, 1), + PLL_35XX_RATE(350000000, 0xa7, 4, 1), + PLL_35XX_RATE(340000000, 0x4d, 1, 1), + PLL_35XX_RATE(330000000, 0x66, 2, 1), + PLL_35XX_RATE(320000000, 0x98, 4, 1), + PLL_35XX_RATE(310000000, 0x93, 4, 1), + PLL_35XX_RATE(300000000, 0x75, 3, 1), + PLL_35XX_RATE(240000000, 0x70, 1, 2), + PLL_35XX_RATE(230000000, 0x6b, 1, 2), + PLL_35XX_RATE(220000000, 0x66, 1, 2), + PLL_35XX_RATE(210000000, 0x84, 2, 2), + PLL_35XX_RATE(200000000, 0x5c, 1, 2), + PLL_35XX_RATE(190000000, 0x57, 1, 2), + PLL_35XX_RATE(180000000, 0x70, 2, 2), + PLL_35XX_RATE(170000000, 0x4d, 1, 2), + PLL_35XX_RATE(160000000, 0x98, 4, 2), + PLL_35XX_RATE(150000000, 0x75, 3, 2), + PLL_35XX_RATE(120000000, 0x70, 1, 3), + PLL_35XX_RATE(110000000, 0x66, 1, 3), + PLL_35XX_RATE(100000000, 0x5c, 1, 3), + PLL_35XX_RATE(90000000, 0x70, 2, 3), + PLL_35XX_RATE(80000000, 0x98, 4, 3), + PLL_35XX_RATE(75000000, 0x75, 3, 3), + { /* sentinel */ }, +}; + +static struct samsung_pll_clock s3c244x_common_plls[] __initdata = { + [mpll] = PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti", + LOCKTIME, MPLLCON, NULL), + [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "xti", + LOCKTIME, UPLLCON, NULL), +}; + +PNAME(hclk_p) = { "fclk", "div_hclk_2", "div_hclk_4", "div_hclk_3" }; +PNAME(armclk_p) = { "fclk", "hclk" }; + +struct samsung_mux_clock s3c244x_common_muxes[] __initdata = { + MUX(HCLK, "hclk", hclk_p, CLKDIVN, 1, 2), + MUX(ARMCLK, "armclk", armclk_p, CAMDIVN, 12, 1), +}; + +struct samsung_fixed_factor_clock s3c244x_common_ffactor[] __initdata = { + FFACTOR(0, "div_hclk_2", "fclk", 1, 2, 0), + FFACTOR(0, "ff_cam", "div_cam", 2, 1, CLK_SET_RATE_PARENT), +}; + +static struct clk_div_table div_hclk_4_d[] = { + { .val = 0, .div = 4 }, + { .val = 1, .div = 8 }, + { /* sentinel */ }, +}; + +static struct clk_div_table div_hclk_3_d[] = { + { .val = 0, .div = 3 }, + { .val = 1, .div = 6 }, + { /* sentinel */ }, +}; + +struct samsung_div_clock s3c244x_common_dividers[] __initdata = { + DIV(UCLK, "uclk", "upll", CLKDIVN, 3, 1), + DIV(0, "div_hclk", "fclk", CLKDIVN, 1, 1), + DIV_T(0, "div_hclk_4", "fclk", CAMDIVN, 9, 1, div_hclk_4_d), + DIV_T(0, "div_hclk_3", "fclk", CAMDIVN, 8, 1, div_hclk_3_d), + DIV(0, "div_cam", "upll", CAMDIVN, 0, 3), +}; + +struct samsung_gate_clock s3c244x_common_gates[] __initdata = { + GATE(HCLK_CAM, "cam", "hclk", CLKCON, 19, 0, 0), +}; + +struct samsung_clock_alias s3c244x_common_aliases[] __initdata = { + ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"), + ALIAS(PCLK_UART1, "s3c2440-uart.1", "uart"), + ALIAS(PCLK_UART2, "s3c2440-uart.2", "uart"), + ALIAS(PCLK_UART0, "s3c2440-uart.0", "clk_uart_baud2"), + ALIAS(PCLK_UART1, "s3c2440-uart.1", "clk_uart_baud2"), + ALIAS(PCLK_UART2, "s3c2440-uart.2", "clk_uart_baud2"), + ALIAS(HCLK_CAM, NULL, "camif"), + ALIAS(CAMIF, NULL, "camif-upll"), +}; + +/* S3C2440 specific clocks */ + +PNAME(s3c2440_camif_p) = { "upll", "ff_cam" }; + +struct samsung_mux_clock s3c2440_muxes[] __initdata = { + MUX(CAMIF, "camif", s3c2440_camif_p, CAMDIVN, 4, 1), +}; + +struct samsung_gate_clock s3c2440_gates[] __initdata = { + GATE(PCLK_AC97, "ac97", "pclk", CLKCON, 20, 0, 0), +}; + +/* S3C2442 specific clocks */ + +struct samsung_fixed_factor_clock s3c2442_ffactor[] __initdata = { + FFACTOR(0, "upll_3", "upll", 1, 3, 0), +}; + +PNAME(s3c2442_camif_p) = { "upll", "ff_cam", "upll", "upll_3" }; + +struct samsung_mux_clock s3c2442_muxes[] __initdata = { + MUX(CAMIF, "camif", s3c2442_camif_p, CAMDIVN, 4, 2), +}; + +/* + * fixed rate clocks generated outside the soc + * Only necessary until the devicetree-move is complete + */ +#define XTI 1 +struct samsung_fixed_rate_clock s3c2410_common_frate_clks[] __initdata = { + FRATE(XTI, "xti", NULL, CLK_IS_ROOT, 0), +}; + +static void __init s3c2410_common_clk_register_fixed_ext( + struct samsung_clk_provider *ctx, + unsigned long xti_f) +{ + struct samsung_clock_alias xti_alias = ALIAS(XTI, NULL, "xtal"); + + s3c2410_common_frate_clks[0].fixed_rate = xti_f; + samsung_clk_register_fixed_rate(ctx, s3c2410_common_frate_clks, + ARRAY_SIZE(s3c2410_common_frate_clks)); + + samsung_clk_register_alias(ctx, &xti_alias, 1); +} + +void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f, + int current_soc, + void __iomem *base) +{ + struct samsung_clk_provider *ctx; + reg_base = base; + + if (np) { + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: failed to map registers\n", __func__); + } + + ctx = samsung_clk_init(np, reg_base, NR_CLKS); + if (!ctx) + panic("%s: unable to allocate context.\n", __func__); + + /* Register external clocks only in non-dt cases */ + if (!np) + s3c2410_common_clk_register_fixed_ext(ctx, xti_f); + + if (current_soc == 2410) { + if (_get_rate("xti") == 12 * MHZ) { + s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl; + s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl; + } + + /* Register PLLs. */ + samsung_clk_register_pll(ctx, s3c2410_plls, + ARRAY_SIZE(s3c2410_plls), reg_base); + + } else { /* S3C2440, S3C2442 */ + if (_get_rate("xti") == 12 * MHZ) { + /* + * plls follow different calculation schemes, with the + * upll following the same scheme as the s3c2410 plls + */ + s3c244x_common_plls[mpll].rate_table = + pll_s3c244x_12mhz_tbl; + s3c244x_common_plls[upll].rate_table = + pll_s3c2410_12mhz_tbl; + } + + /* Register PLLs. */ + samsung_clk_register_pll(ctx, s3c244x_common_plls, + ARRAY_SIZE(s3c244x_common_plls), reg_base); + } + + /* Register common internal clocks. */ + samsung_clk_register_mux(ctx, s3c2410_common_muxes, + ARRAY_SIZE(s3c2410_common_muxes)); + samsung_clk_register_div(ctx, s3c2410_common_dividers, + ARRAY_SIZE(s3c2410_common_dividers)); + samsung_clk_register_gate(ctx, s3c2410_common_gates, + ARRAY_SIZE(s3c2410_common_gates)); + + if (current_soc == S3C2440 || current_soc == S3C2442) { + samsung_clk_register_div(ctx, s3c244x_common_dividers, + ARRAY_SIZE(s3c244x_common_dividers)); + samsung_clk_register_gate(ctx, s3c244x_common_gates, + ARRAY_SIZE(s3c244x_common_gates)); + samsung_clk_register_mux(ctx, s3c244x_common_muxes, + ARRAY_SIZE(s3c244x_common_muxes)); + samsung_clk_register_fixed_factor(ctx, s3c244x_common_ffactor, + ARRAY_SIZE(s3c244x_common_ffactor)); + } + + /* Register SoC-specific clocks. */ + switch (current_soc) { + case S3C2410: + samsung_clk_register_div(ctx, s3c2410_dividers, + ARRAY_SIZE(s3c2410_dividers)); + samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor, + ARRAY_SIZE(s3c2410_ffactor)); + samsung_clk_register_alias(ctx, s3c2410_aliases, + ARRAY_SIZE(s3c2410_common_aliases)); + break; + case S3C2440: + samsung_clk_register_mux(ctx, s3c2440_muxes, + ARRAY_SIZE(s3c2440_muxes)); + samsung_clk_register_gate(ctx, s3c2440_gates, + ARRAY_SIZE(s3c2440_gates)); + break; + case S3C2442: + samsung_clk_register_mux(ctx, s3c2442_muxes, + ARRAY_SIZE(s3c2442_muxes)); + samsung_clk_register_fixed_factor(ctx, s3c2442_ffactor, + ARRAY_SIZE(s3c2442_ffactor)); + break; + } + + /* + * Register common aliases at the end, as some of the aliased clocks + * are SoC specific. + */ + samsung_clk_register_alias(ctx, s3c2410_common_aliases, + ARRAY_SIZE(s3c2410_common_aliases)); + + if (current_soc == S3C2440 || current_soc == S3C2442) { + samsung_clk_register_alias(ctx, s3c244x_common_aliases, + ARRAY_SIZE(s3c244x_common_aliases)); + } + + s3c2410_clk_sleep_init(); +} + +static void __init s3c2410_clk_init(struct device_node *np) +{ + s3c2410_common_clk_init(np, 0, S3C2410, 0); +} +CLK_OF_DECLARE(s3c2410_clk, "samsung,s3c2410-clock", s3c2410_clk_init); + +static void __init s3c2440_clk_init(struct device_node *np) +{ + s3c2410_common_clk_init(np, 0, S3C2440, 0); +} +CLK_OF_DECLARE(s3c2440_clk, "samsung,s3c2440-clock", s3c2440_clk_init); + +static void __init s3c2442_clk_init(struct device_node *np) +{ + s3c2410_common_clk_init(np, 0, S3C2442, 0); +} +CLK_OF_DECLARE(s3c2442_clk, "samsung,s3c2442-clock", s3c2442_clk_init); diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c new file mode 100644 index 00000000000..23e4313f625 --- /dev/null +++ b/drivers/clk/samsung/clk-s3c2412.c @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for S3C2412 and S3C2413. + */ + +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/syscore_ops.h> + +#include <dt-bindings/clock/s3c2412.h> + +#include "clk.h" +#include "clk-pll.h" + +#define LOCKTIME 0x00 +#define MPLLCON 0x04 +#define UPLLCON 0x08 +#define CLKCON 0x0c +#define CLKDIVN 0x14 +#define CLKSRC 0x1c + +/* list of PLLs to be registered */ +enum s3c2412_plls { + mpll, upll, +}; + +static void __iomem *reg_base; + +#ifdef CONFIG_PM_SLEEP +static struct samsung_clk_reg_dump *s3c2412_save; + +/* + * list of controller registers to be saved and restored during a + * suspend/resume cycle. + */ +static unsigned long s3c2412_clk_regs[] __initdata = { + LOCKTIME, + MPLLCON, + UPLLCON, + CLKCON, + CLKDIVN, + CLKSRC, +}; + +static int s3c2412_clk_suspend(void) +{ + samsung_clk_save(reg_base, s3c2412_save, + ARRAY_SIZE(s3c2412_clk_regs)); + + return 0; +} + +static void s3c2412_clk_resume(void) +{ + samsung_clk_restore(reg_base, s3c2412_save, + ARRAY_SIZE(s3c2412_clk_regs)); +} + +static struct syscore_ops s3c2412_clk_syscore_ops = { + .suspend = s3c2412_clk_suspend, + .resume = s3c2412_clk_resume, +}; + +static void s3c2412_clk_sleep_init(void) +{ + s3c2412_save = samsung_clk_alloc_reg_dump(s3c2412_clk_regs, + ARRAY_SIZE(s3c2412_clk_regs)); + if (!s3c2412_save) { + pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", + __func__); + return; + } + + register_syscore_ops(&s3c2412_clk_syscore_ops); + return; +} +#else +static void s3c2412_clk_sleep_init(void) {} +#endif + +static struct clk_div_table divxti_d[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 6 }, + { .val = 4, .div = 8 }, + { .val = 5, .div = 10 }, + { .val = 6, .div = 12 }, + { .val = 7, .div = 14 }, + { /* sentinel */ }, +}; + +struct samsung_div_clock s3c2412_dividers[] __initdata = { + DIV_T(0, "div_xti", "xti", CLKSRC, 0, 3, divxti_d), + DIV(0, "div_cam", "mux_cam", CLKDIVN, 16, 4), + DIV(0, "div_i2s", "mux_i2s", CLKDIVN, 12, 4), + DIV(0, "div_uart", "mux_uart", CLKDIVN, 8, 4), + DIV(0, "div_usb", "mux_usb", CLKDIVN, 6, 1), + DIV(0, "div_hclk_half", "hclk", CLKDIVN, 5, 1), + DIV(ARMDIV, "armdiv", "msysclk", CLKDIVN, 3, 1), + DIV(PCLK, "pclk", "hclk", CLKDIVN, 2, 1), + DIV(HCLK, "hclk", "armdiv", CLKDIVN, 0, 2), +}; + +struct samsung_fixed_factor_clock s3c2412_ffactor[] __initdata = { + FFACTOR(0, "ff_hclk", "hclk", 2, 1, CLK_SET_RATE_PARENT), +}; + +/* + * The first two use the OM[4] setting, which is not readable from + * software, so assume it is set to xti. + */ +PNAME(erefclk_p) = { "xti", "xti", "xti", "ext" }; +PNAME(urefclk_p) = { "xti", "xti", "xti", "ext" }; + +PNAME(camclk_p) = { "usysclk", "hclk" }; +PNAME(usbclk_p) = { "usysclk", "hclk" }; +PNAME(i2sclk_p) = { "erefclk", "mpll" }; +PNAME(uartclk_p) = { "erefclk", "mpll" }; +PNAME(usysclk_p) = { "urefclk", "upll" }; +PNAME(msysclk_p) = { "mdivclk", "mpll" }; +PNAME(mdivclk_p) = { "xti", "div_xti" }; +PNAME(armclk_p) = { "armdiv", "hclk" }; + +struct samsung_mux_clock s3c2412_muxes[] __initdata = { + MUX(0, "erefclk", erefclk_p, CLKSRC, 14, 2), + MUX(0, "urefclk", urefclk_p, CLKSRC, 12, 2), + MUX(0, "mux_cam", camclk_p, CLKSRC, 11, 1), + MUX(0, "mux_usb", usbclk_p, CLKSRC, 10, 1), + MUX(0, "mux_i2s", i2sclk_p, CLKSRC, 9, 1), + MUX(0, "mux_uart", uartclk_p, CLKSRC, 8, 1), + MUX(USYSCLK, "usysclk", usysclk_p, CLKSRC, 5, 1), + MUX(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1), + MUX(MDIVCLK, "mdivclk", mdivclk_p, CLKSRC, 3, 1), + MUX(ARMCLK, "armclk", armclk_p, CLKDIVN, 4, 1), +}; + +static struct samsung_pll_clock s3c2412_plls[] __initdata = { + [mpll] = PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti", + LOCKTIME, MPLLCON, NULL), + [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "urefclk", + LOCKTIME, UPLLCON, NULL), +}; + +struct samsung_gate_clock s3c2412_gates[] __initdata = { + GATE(PCLK_WDT, "wdt", "pclk", CLKCON, 28, 0, 0), + GATE(PCLK_SPI, "spi", "pclk", CLKCON, 27, 0, 0), + GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 26, 0, 0), + GATE(PCLK_I2C, "i2c", "pclk", CLKCON, 25, 0, 0), + GATE(PCLK_ADC, "adc", "pclk", CLKCON, 24, 0, 0), + GATE(PCLK_RTC, "rtc", "pclk", CLKCON, 23, 0, 0), + GATE(PCLK_GPIO, "gpio", "pclk", CLKCON, 22, CLK_IGNORE_UNUSED, 0), + GATE(PCLK_UART2, "uart2", "pclk", CLKCON, 21, 0, 0), + GATE(PCLK_UART1, "uart1", "pclk", CLKCON, 20, 0, 0), + GATE(PCLK_UART0, "uart0", "pclk", CLKCON, 19, 0, 0), + GATE(PCLK_SDI, "sdi", "pclk", CLKCON, 18, 0, 0), + GATE(PCLK_PWM, "pwm", "pclk", CLKCON, 17, 0, 0), + GATE(PCLK_USBD, "usb-device", "pclk", CLKCON, 16, 0, 0), + GATE(SCLK_CAM, "sclk_cam", "div_cam", CLKCON, 15, 0, 0), + GATE(SCLK_UART, "sclk_uart", "div_uart", CLKCON, 14, 0, 0), + GATE(SCLK_I2S, "sclk_i2s", "div_i2s", CLKCON, 13, 0, 0), + GATE(SCLK_USBH, "sclk_usbh", "div_usb", CLKCON, 12, 0, 0), + GATE(SCLK_USBD, "sclk_usbd", "div_usb", CLKCON, 11, 0, 0), + GATE(HCLK_HALF, "hclk_half", "div_hclk_half", CLKCON, 10, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_X2, "hclkx2", "ff_hclk", CLKCON, 9, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_SDRAM, "sdram", "hclk", CLKCON, 8, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_USBH, "usb-host", "hclk", CLKCON, 6, 0, 0), + GATE(HCLK_LCD, "lcd", "hclk", CLKCON, 5, 0, 0), + GATE(HCLK_NAND, "nand", "hclk", CLKCON, 4, 0, 0), + GATE(HCLK_DMA3, "dma3", "hclk", CLKCON, 3, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_DMA2, "dma2", "hclk", CLKCON, 2, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_DMA1, "dma1", "hclk", CLKCON, 1, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_DMA0, "dma0", "hclk", CLKCON, 0, CLK_IGNORE_UNUSED, 0), +}; + +struct samsung_clock_alias s3c2412_aliases[] __initdata = { + ALIAS(PCLK_UART0, "s3c2412-uart.0", "uart"), + ALIAS(PCLK_UART1, "s3c2412-uart.1", "uart"), + ALIAS(PCLK_UART2, "s3c2412-uart.2", "uart"), + ALIAS(PCLK_UART0, "s3c2412-uart.0", "clk_uart_baud2"), + ALIAS(PCLK_UART1, "s3c2412-uart.1", "clk_uart_baud2"), + ALIAS(PCLK_UART2, "s3c2412-uart.2", "clk_uart_baud2"), + ALIAS(SCLK_UART, NULL, "clk_uart_baud3"), + ALIAS(PCLK_I2C, "s3c2410-i2c.0", "i2c"), + ALIAS(PCLK_ADC, NULL, "adc"), + ALIAS(PCLK_RTC, NULL, "rtc"), + ALIAS(PCLK_PWM, NULL, "timers"), + ALIAS(HCLK_LCD, NULL, "lcd"), + ALIAS(PCLK_USBD, NULL, "usb-device"), + ALIAS(SCLK_USBD, NULL, "usb-bus-gadget"), + ALIAS(HCLK_USBH, NULL, "usb-host"), + ALIAS(SCLK_USBH, NULL, "usb-bus-host"), + ALIAS(ARMCLK, NULL, "armclk"), + ALIAS(HCLK, NULL, "hclk"), + ALIAS(MPLL, NULL, "mpll"), + ALIAS(MSYSCLK, NULL, "fclk"), +}; + +/* + * fixed rate clocks generated outside the soc + * Only necessary until the devicetree-move is complete + */ +#define XTI 1 +struct samsung_fixed_rate_clock s3c2412_common_frate_clks[] __initdata = { + FRATE(XTI, "xti", NULL, CLK_IS_ROOT, 0), + FRATE(0, "ext", NULL, CLK_IS_ROOT, 0), +}; + +static void __init s3c2412_common_clk_register_fixed_ext( + struct samsung_clk_provider *ctx, + unsigned long xti_f, unsigned long ext_f) +{ + /* xtal alias is necessary for the current cpufreq driver */ + struct samsung_clock_alias xti_alias = ALIAS(XTI, NULL, "xtal"); + + s3c2412_common_frate_clks[0].fixed_rate = xti_f; + s3c2412_common_frate_clks[1].fixed_rate = ext_f; + samsung_clk_register_fixed_rate(ctx, s3c2412_common_frate_clks, + ARRAY_SIZE(s3c2412_common_frate_clks)); + + samsung_clk_register_alias(ctx, &xti_alias, 1); +} + +void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f, + unsigned long ext_f, void __iomem *base) +{ + struct samsung_clk_provider *ctx; + reg_base = base; + + if (np) { + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: failed to map registers\n", __func__); + } + + ctx = samsung_clk_init(np, reg_base, NR_CLKS); + if (!ctx) + panic("%s: unable to allocate context.\n", __func__); + + /* Register external clocks only in non-dt cases */ + if (!np) + s3c2412_common_clk_register_fixed_ext(ctx, xti_f, ext_f); + + /* Register PLLs. */ + samsung_clk_register_pll(ctx, s3c2412_plls, ARRAY_SIZE(s3c2412_plls), + reg_base); + + /* Register common internal clocks. */ + samsung_clk_register_mux(ctx, s3c2412_muxes, ARRAY_SIZE(s3c2412_muxes)); + samsung_clk_register_div(ctx, s3c2412_dividers, + ARRAY_SIZE(s3c2412_dividers)); + samsung_clk_register_gate(ctx, s3c2412_gates, + ARRAY_SIZE(s3c2412_gates)); + samsung_clk_register_fixed_factor(ctx, s3c2412_ffactor, + ARRAY_SIZE(s3c2412_ffactor)); + samsung_clk_register_alias(ctx, s3c2412_aliases, + ARRAY_SIZE(s3c2412_aliases)); + + s3c2412_clk_sleep_init(); +} + +static void __init s3c2412_clk_init(struct device_node *np) +{ + s3c2412_common_clk_init(np, 0, 0, 0); +} +CLK_OF_DECLARE(s3c2412_clk, "samsung,s3c2412-clock", s3c2412_clk_init); diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c new file mode 100644 index 00000000000..c4bbdabebaa --- /dev/null +++ b/drivers/clk/samsung/clk-s3c2443.c @@ -0,0 +1,466 @@ +/* + * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for S3C2443 and following SoCs. + */ + +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/clk-provider.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/syscore_ops.h> + +#include <dt-bindings/clock/s3c2443.h> + +#include "clk.h" +#include "clk-pll.h" + +/* S3C2416 clock controller register offsets */ +#define LOCKCON0 0x00 +#define LOCKCON1 0x04 +#define MPLLCON 0x10 +#define EPLLCON 0x18 +#define EPLLCON_K 0x1C +#define CLKSRC 0x20 +#define CLKDIV0 0x24 +#define CLKDIV1 0x28 +#define CLKDIV2 0x2C +#define HCLKCON 0x30 +#define PCLKCON 0x34 +#define SCLKCON 0x38 + +/* the soc types */ +enum supported_socs { + S3C2416, + S3C2443, + S3C2450, +}; + +/* list of PLLs to be registered */ +enum s3c2443_plls { + mpll, epll, +}; + +static void __iomem *reg_base; + +#ifdef CONFIG_PM_SLEEP +static struct samsung_clk_reg_dump *s3c2443_save; + +/* + * list of controller registers to be saved and restored during a + * suspend/resume cycle. + */ +static unsigned long s3c2443_clk_regs[] __initdata = { + LOCKCON0, + LOCKCON1, + MPLLCON, + EPLLCON, + EPLLCON_K, + CLKSRC, + CLKDIV0, + CLKDIV1, + CLKDIV2, + PCLKCON, + HCLKCON, + SCLKCON, +}; + +static int s3c2443_clk_suspend(void) +{ + samsung_clk_save(reg_base, s3c2443_save, + ARRAY_SIZE(s3c2443_clk_regs)); + + return 0; +} + +static void s3c2443_clk_resume(void) +{ + samsung_clk_restore(reg_base, s3c2443_save, + ARRAY_SIZE(s3c2443_clk_regs)); +} + +static struct syscore_ops s3c2443_clk_syscore_ops = { + .suspend = s3c2443_clk_suspend, + .resume = s3c2443_clk_resume, +}; + +static void s3c2443_clk_sleep_init(void) +{ + s3c2443_save = samsung_clk_alloc_reg_dump(s3c2443_clk_regs, + ARRAY_SIZE(s3c2443_clk_regs)); + if (!s3c2443_save) { + pr_warn("%s: failed to allocate sleep save data, no sleep support!\n", + __func__); + return; + } + + register_syscore_ops(&s3c2443_clk_syscore_ops); + return; +} +#else +static void s3c2443_clk_sleep_init(void) {} +#endif + +PNAME(epllref_p) = { "mpllref", "mpllref", "xti", "ext" }; +PNAME(esysclk_p) = { "epllref", "epll" }; +PNAME(mpllref_p) = { "xti", "mdivclk" }; +PNAME(msysclk_p) = { "mpllref", "mpll" }; +PNAME(armclk_p) = { "armdiv" , "hclk" }; +PNAME(i2s0_p) = { "div_i2s0", "ext_i2s", "epllref", "epllref" }; + +struct samsung_mux_clock s3c2443_common_muxes[] __initdata = { + MUX(0, "epllref", epllref_p, CLKSRC, 7, 2), + MUX(ESYSCLK, "esysclk", esysclk_p, CLKSRC, 6, 1), + MUX(0, "mpllref", mpllref_p, CLKSRC, 3, 1), + MUX_A(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1, "msysclk"), + MUX_A(ARMCLK, "armclk", armclk_p, CLKDIV0, 13, 1, "armclk"), + MUX(0, "mux_i2s0", i2s0_p, CLKSRC, 14, 2), +}; + +static struct clk_div_table hclk_d[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 2 }, + { .val = 3, .div = 4 }, + { /* sentinel */ }, +}; + +static struct clk_div_table mdivclk_d[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 3 }, + { .val = 2, .div = 5 }, + { .val = 3, .div = 7 }, + { .val = 4, .div = 9 }, + { .val = 5, .div = 11 }, + { .val = 6, .div = 13 }, + { .val = 7, .div = 15 }, + { /* sentinel */ }, +}; + +struct samsung_div_clock s3c2443_common_dividers[] __initdata = { + DIV_T(0, "mdivclk", "xti", CLKDIV0, 6, 3, mdivclk_d), + DIV(0, "prediv", "msysclk", CLKDIV0, 4, 2), + DIV_T(HCLK, "hclk", "prediv", CLKDIV0, 0, 2, hclk_d), + DIV(PCLK, "pclk", "hclk", CLKDIV0, 2, 1), + DIV(0, "div_hsspi0_epll", "esysclk", CLKDIV1, 24, 2), + DIV(0, "div_fimd", "esysclk", CLKDIV1, 16, 8), + DIV(0, "div_i2s0", "esysclk", CLKDIV1, 12, 4), + DIV(0, "div_uart", "esysclk", CLKDIV1, 8, 4), + DIV(0, "div_hsmmc1", "esysclk", CLKDIV1, 6, 2), + DIV(0, "div_usbhost", "esysclk", CLKDIV1, 4, 2), +}; + +struct samsung_gate_clock s3c2443_common_gates[] __initdata = { + GATE(SCLK_HSMMC_EXT, "sclk_hsmmcext", "ext", SCLKCON, 13, 0, 0), + GATE(SCLK_HSMMC1, "sclk_hsmmc1", "div_hsmmc1", SCLKCON, 12, 0, 0), + GATE(SCLK_FIMD, "sclk_fimd", "div_fimd", SCLKCON, 10, 0, 0), + GATE(SCLK_I2S0, "sclk_i2s0", "mux_i2s0", SCLKCON, 9, 0, 0), + GATE(SCLK_UART, "sclk_uart", "div_uart", SCLKCON, 8, 0, 0), + GATE(SCLK_USBH, "sclk_usbhost", "div_usbhost", SCLKCON, 1, 0, 0), + GATE(HCLK_DRAM, "dram", "hclk", HCLKCON, 19, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_SSMC, "ssmc", "hclk", HCLKCON, 18, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_HSMMC1, "hsmmc1", "hclk", HCLKCON, 16, 0, 0), + GATE(HCLK_USBD, "usb-device", "hclk", HCLKCON, 12, 0, 0), + GATE(HCLK_USBH, "usb-host", "hclk", HCLKCON, 11, 0, 0), + GATE(HCLK_LCD, "lcd", "hclk", HCLKCON, 9, 0, 0), + GATE(HCLK_DMA5, "dma5", "hclk", HCLKCON, 5, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_DMA4, "dma4", "hclk", HCLKCON, 4, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_DMA3, "dma3", "hclk", HCLKCON, 3, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_DMA2, "dma2", "hclk", HCLKCON, 2, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_DMA1, "dma1", "hclk", HCLKCON, 1, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_DMA0, "dma0", "hclk", HCLKCON, 0, CLK_IGNORE_UNUSED, 0), + GATE(PCLK_GPIO, "gpio", "pclk", PCLKCON, 13, CLK_IGNORE_UNUSED, 0), + GATE(PCLK_RTC, "rtc", "pclk", PCLKCON, 12, 0, 0), + GATE(PCLK_WDT, "wdt", "pclk", PCLKCON, 11, 0, 0), + GATE(PCLK_PWM, "pwm", "pclk", PCLKCON, 10, 0, 0), + GATE(PCLK_I2S0, "i2s0", "pclk", PCLKCON, 9, 0, 0), + GATE(PCLK_AC97, "ac97", "pclk", PCLKCON, 8, 0, 0), + GATE(PCLK_ADC, "adc", "pclk", PCLKCON, 7, 0, 0), + GATE(PCLK_SPI0, "spi0", "pclk", PCLKCON, 6, 0, 0), + GATE(PCLK_I2C0, "i2c0", "pclk", PCLKCON, 4, 0, 0), + GATE(PCLK_UART3, "uart3", "pclk", PCLKCON, 3, 0, 0), + GATE(PCLK_UART2, "uart2", "pclk", PCLKCON, 2, 0, 0), + GATE(PCLK_UART1, "uart1", "pclk", PCLKCON, 1, 0, 0), + GATE(PCLK_UART0, "uart0", "pclk", PCLKCON, 0, 0, 0), +}; + +struct samsung_clock_alias s3c2443_common_aliases[] __initdata = { + ALIAS(HCLK, NULL, "hclk"), + ALIAS(HCLK_SSMC, NULL, "nand"), + ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"), + ALIAS(PCLK_UART1, "s3c2440-uart.1", "uart"), + ALIAS(PCLK_UART2, "s3c2440-uart.2", "uart"), + ALIAS(PCLK_UART3, "s3c2440-uart.3", "uart"), + ALIAS(PCLK_UART0, "s3c2440-uart.0", "clk_uart_baud2"), + ALIAS(PCLK_UART1, "s3c2440-uart.1", "clk_uart_baud2"), + ALIAS(PCLK_UART2, "s3c2440-uart.2", "clk_uart_baud2"), + ALIAS(PCLK_UART3, "s3c2440-uart.3", "clk_uart_baud2"), + ALIAS(SCLK_UART, NULL, "clk_uart_baud3"), + ALIAS(PCLK_PWM, NULL, "timers"), + ALIAS(PCLK_RTC, NULL, "rtc"), + ALIAS(PCLK_WDT, NULL, "watchdog"), + ALIAS(PCLK_ADC, NULL, "adc"), + ALIAS(PCLK_I2C0, "s3c2410-i2c.0", "i2c"), + ALIAS(HCLK_USBD, NULL, "usb-device"), + ALIAS(HCLK_USBH, NULL, "usb-host"), + ALIAS(SCLK_USBH, NULL, "usb-bus-host"), + ALIAS(PCLK_SPI0, "s3c2443-spi.0", "spi"), + ALIAS(PCLK_SPI0, "s3c2443-spi.0", "spi_busclk0"), + ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "hsmmc"), + ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.0"), + ALIAS(PCLK_I2S0, "samsung-i2s.0", "iis"), + ALIAS(SCLK_I2S0, NULL, "i2s-if"), + ALIAS(HCLK_LCD, NULL, "lcd"), + ALIAS(SCLK_FIMD, NULL, "sclk_fimd"), +}; + +/* S3C2416 specific clocks */ + +static struct samsung_pll_clock s3c2416_pll_clks[] __initdata = { + [mpll] = PLL(pll_6552_s3c2416, 0, "mpll", "mpllref", + LOCKCON0, MPLLCON, NULL), + [epll] = PLL(pll_6553, 0, "epll", "epllref", + LOCKCON1, EPLLCON, NULL), +}; + +PNAME(s3c2416_hsmmc0_p) = { "sclk_hsmmc0", "sclk_hsmmcext" }; +PNAME(s3c2416_hsmmc1_p) = { "sclk_hsmmc1", "sclk_hsmmcext" }; +PNAME(s3c2416_hsspi0_p) = { "hsspi0_epll", "hsspi0_mpll" }; + +static struct clk_div_table armdiv_s3c2416_d[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 3 }, + { .val = 3, .div = 4 }, + { .val = 5, .div = 6 }, + { .val = 7, .div = 8 }, + { /* sentinel */ }, +}; + +struct samsung_div_clock s3c2416_dividers[] __initdata = { + DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 3, armdiv_s3c2416_d), + DIV(0, "div_hsspi0_mpll", "msysclk", CLKDIV2, 0, 4), + DIV(0, "div_hsmmc0", "esysclk", CLKDIV2, 6, 2), +}; + +struct samsung_mux_clock s3c2416_muxes[] __initdata = { + MUX(MUX_HSMMC0, "mux_hsmmc0", s3c2416_hsmmc0_p, CLKSRC, 16, 1), + MUX(MUX_HSMMC1, "mux_hsmmc1", s3c2416_hsmmc1_p, CLKSRC, 17, 1), + MUX(MUX_HSSPI0, "mux_hsspi0", s3c2416_hsspi0_p, CLKSRC, 18, 1), +}; + +struct samsung_gate_clock s3c2416_gates[] __initdata = { + GATE(0, "hsspi0_mpll", "div_hsspi0_mpll", SCLKCON, 19, 0, 0), + GATE(0, "hsspi0_epll", "div_hsspi0_epll", SCLKCON, 14, 0, 0), + GATE(0, "sclk_hsmmc0", "div_hsmmc0", SCLKCON, 6, 0, 0), + GATE(HCLK_2D, "2d", "hclk", HCLKCON, 20, 0, 0), + GATE(HCLK_HSMMC0, "hsmmc0", "hclk", HCLKCON, 15, 0, 0), + GATE(HCLK_IROM, "irom", "hclk", HCLKCON, 13, CLK_IGNORE_UNUSED, 0), + GATE(PCLK_PCM, "pcm", "pclk", PCLKCON, 19, 0, 0), +}; + +struct samsung_clock_alias s3c2416_aliases[] __initdata = { + ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"), + ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"), + ALIAS(MUX_HSMMC0, "s3c-sdhci.0", "mmc_busclk.2"), + ALIAS(MUX_HSMMC1, "s3c-sdhci.1", "mmc_busclk.2"), + ALIAS(MUX_HSSPI0, "s3c2443-spi.0", "spi_busclk2"), + ALIAS(ARMDIV, NULL, "armdiv"), +}; + +/* S3C2443 specific clocks */ + +static struct samsung_pll_clock s3c2443_pll_clks[] __initdata = { + [mpll] = PLL(pll_3000, 0, "mpll", "mpllref", + LOCKCON0, MPLLCON, NULL), + [epll] = PLL(pll_2126, 0, "epll", "epllref", + LOCKCON1, EPLLCON, NULL), +}; + +static struct clk_div_table armdiv_s3c2443_d[] = { + { .val = 0, .div = 1 }, + { .val = 8, .div = 2 }, + { .val = 2, .div = 3 }, + { .val = 9, .div = 4 }, + { .val = 10, .div = 6 }, + { .val = 11, .div = 8 }, + { .val = 13, .div = 12 }, + { .val = 15, .div = 16 }, + { /* sentinel */ }, +}; + +struct samsung_div_clock s3c2443_dividers[] __initdata = { + DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 4, armdiv_s3c2443_d), + DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4), +}; + +struct samsung_gate_clock s3c2443_gates[] __initdata = { + GATE(SCLK_HSSPI0, "sclk_hsspi0", "div_hsspi0_epll", SCLKCON, 14, 0, 0), + GATE(SCLK_CAM, "sclk_cam", "div_cam", SCLKCON, 11, 0, 0), + GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_CAM, "cam", "hclk", HCLKCON, 8, 0, 0), + GATE(PCLK_SPI1, "spi1", "pclk", PCLKCON, 15, 0, 0), + GATE(PCLK_SDI, "sdi", "pclk", PCLKCON, 5, 0, 0), +}; + +struct samsung_clock_alias s3c2443_aliases[] __initdata = { + ALIAS(SCLK_HSSPI0, "s3c2443-spi.0", "spi_busclk2"), + ALIAS(SCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.2"), + ALIAS(SCLK_CAM, NULL, "camif-upll"), + ALIAS(PCLK_SPI1, "s3c2410-spi.0", "spi"), + ALIAS(PCLK_SDI, NULL, "sdi"), + ALIAS(HCLK_CFC, NULL, "cfc"), + ALIAS(ARMDIV, NULL, "armdiv"), +}; + +/* S3C2450 specific clocks */ + +PNAME(s3c2450_cam_p) = { "div_cam", "hclk" }; +PNAME(s3c2450_hsspi1_p) = { "hsspi1_epll", "hsspi1_mpll" }; +PNAME(i2s1_p) = { "div_i2s1", "ext_i2s", "epllref", "epllref" }; + +struct samsung_div_clock s3c2450_dividers[] __initdata = { + DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4), + DIV(0, "div_hsspi1_epll", "esysclk", CLKDIV2, 24, 2), + DIV(0, "div_hsspi1_mpll", "msysclk", CLKDIV2, 16, 4), + DIV(0, "div_i2s1", "esysclk", CLKDIV2, 12, 4), +}; + +struct samsung_mux_clock s3c2450_muxes[] __initdata = { + MUX(0, "mux_cam", s3c2450_cam_p, CLKSRC, 20, 1), + MUX(MUX_HSSPI1, "mux_hsspi1", s3c2450_hsspi1_p, CLKSRC, 19, 1), + MUX(0, "mux_i2s1", i2s1_p, CLKSRC, 12, 2), +}; + +struct samsung_gate_clock s3c2450_gates[] __initdata = { + GATE(SCLK_I2S1, "sclk_i2s1", "div_i2s1", SCLKCON, 5, 0, 0), + GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, 0, 0), + GATE(HCLK_CAM, "cam", "hclk", HCLKCON, 8, 0, 0), + GATE(HCLK_DMA7, "dma7", "hclk", HCLKCON, 7, CLK_IGNORE_UNUSED, 0), + GATE(HCLK_DMA6, "dma6", "hclk", HCLKCON, 6, CLK_IGNORE_UNUSED, 0), + GATE(PCLK_I2S1, "i2s1", "pclk", PCLKCON, 17, 0, 0), + GATE(PCLK_I2C1, "i2c1", "pclk", PCLKCON, 16, 0, 0), + GATE(PCLK_SPI1, "spi1", "pclk", PCLKCON, 14, 0, 0), +}; + +struct samsung_clock_alias s3c2450_aliases[] __initdata = { + ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi"), + ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi_busclk0"), + ALIAS(MUX_HSSPI1, "s3c2443-spi.1", "spi_busclk2"), + ALIAS(PCLK_I2C1, "s3c2410-i2c.1", "i2c"), +}; + +/* + * fixed rate clocks generated outside the soc + * Only necessary until the devicetree-move is complete + */ +struct samsung_fixed_rate_clock s3c2443_common_frate_clks[] __initdata = { + FRATE(0, "xti", NULL, CLK_IS_ROOT, 0), + FRATE(0, "ext", NULL, CLK_IS_ROOT, 0), + FRATE(0, "ext_i2s", NULL, CLK_IS_ROOT, 0), + FRATE(0, "ext_uart", NULL, CLK_IS_ROOT, 0), +}; + +static void __init s3c2443_common_clk_register_fixed_ext( + struct samsung_clk_provider *ctx, unsigned long xti_f) +{ + s3c2443_common_frate_clks[0].fixed_rate = xti_f; + samsung_clk_register_fixed_rate(ctx, s3c2443_common_frate_clks, + ARRAY_SIZE(s3c2443_common_frate_clks)); +} + +void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f, + int current_soc, + void __iomem *base) +{ + struct samsung_clk_provider *ctx; + reg_base = base; + + if (np) { + reg_base = of_iomap(np, 0); + if (!reg_base) + panic("%s: failed to map registers\n", __func__); + } + + ctx = samsung_clk_init(np, reg_base, NR_CLKS); + if (!ctx) + panic("%s: unable to allocate context.\n", __func__); + + /* Register external clocks only in non-dt cases */ + if (!np) + s3c2443_common_clk_register_fixed_ext(ctx, xti_f); + + /* Register PLLs. */ + if (current_soc == S3C2416 || current_soc == S3C2450) + samsung_clk_register_pll(ctx, s3c2416_pll_clks, + ARRAY_SIZE(s3c2416_pll_clks), reg_base); + else + samsung_clk_register_pll(ctx, s3c2443_pll_clks, + ARRAY_SIZE(s3c2443_pll_clks), reg_base); + + /* Register common internal clocks. */ + samsung_clk_register_mux(ctx, s3c2443_common_muxes, + ARRAY_SIZE(s3c2443_common_muxes)); + samsung_clk_register_div(ctx, s3c2443_common_dividers, + ARRAY_SIZE(s3c2443_common_dividers)); + samsung_clk_register_gate(ctx, s3c2443_common_gates, + ARRAY_SIZE(s3c2443_common_gates)); + samsung_clk_register_alias(ctx, s3c2443_common_aliases, + ARRAY_SIZE(s3c2443_common_aliases)); + + /* Register SoC-specific clocks. */ + switch (current_soc) { + case S3C2450: + samsung_clk_register_div(ctx, s3c2450_dividers, + ARRAY_SIZE(s3c2450_dividers)); + samsung_clk_register_mux(ctx, s3c2450_muxes, + ARRAY_SIZE(s3c2450_muxes)); + samsung_clk_register_gate(ctx, s3c2450_gates, + ARRAY_SIZE(s3c2450_gates)); + samsung_clk_register_alias(ctx, s3c2450_aliases, + ARRAY_SIZE(s3c2450_aliases)); + /* fall through, as s3c2450 extends the s3c2416 clocks */ + case S3C2416: + samsung_clk_register_div(ctx, s3c2416_dividers, + ARRAY_SIZE(s3c2416_dividers)); + samsung_clk_register_mux(ctx, s3c2416_muxes, + ARRAY_SIZE(s3c2416_muxes)); + samsung_clk_register_gate(ctx, s3c2416_gates, + ARRAY_SIZE(s3c2416_gates)); + samsung_clk_register_alias(ctx, s3c2416_aliases, + ARRAY_SIZE(s3c2416_aliases)); + break; + case S3C2443: + samsung_clk_register_div(ctx, s3c2443_dividers, + ARRAY_SIZE(s3c2443_dividers)); + samsung_clk_register_gate(ctx, s3c2443_gates, + ARRAY_SIZE(s3c2443_gates)); + samsung_clk_register_alias(ctx, s3c2443_aliases, + ARRAY_SIZE(s3c2443_aliases)); + break; + } + + s3c2443_clk_sleep_init(); +} + +static void __init s3c2416_clk_init(struct device_node *np) +{ + s3c2443_common_clk_init(np, 0, S3C2416, 0); +} +CLK_OF_DECLARE(s3c2416_clk, "samsung,s3c2416-clock", s3c2416_clk_init); + +static void __init s3c2443_clk_init(struct device_node *np) +{ + s3c2443_common_clk_init(np, 0, S3C2443, 0); +} +CLK_OF_DECLARE(s3c2443_clk, "samsung,s3c2443-clock", s3c2443_clk_init); + +static void __init s3c2450_clk_init(struct device_node *np) +{ + s3c2443_common_clk_init(np, 0, S3C2450, 0); +} +CLK_OF_DECLARE(s3c2450_clk, "samsung,s3c2450-clock", s3c2450_clk_init); diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c index 8bda658137a..efa16ee592c 100644 --- a/drivers/clk/samsung/clk-s3c64xx.c +++ b/drivers/clk/samsung/clk-s3c64xx.c @@ -442,12 +442,14 @@ static struct samsung_clock_alias s3c6410_clock_aliases[] = { ALIAS(MEM0_SROM, NULL, "srom"), }; -static void __init s3c64xx_clk_register_fixed_ext(unsigned long fin_pll_f, - unsigned long xusbxti_f) +static void __init s3c64xx_clk_register_fixed_ext( + struct samsung_clk_provider *ctx, + unsigned long fin_pll_f, + unsigned long xusbxti_f) { s3c64xx_fixed_rate_ext_clks[0].fixed_rate = fin_pll_f; s3c64xx_fixed_rate_ext_clks[1].fixed_rate = xusbxti_f; - samsung_clk_register_fixed_rate(s3c64xx_fixed_rate_ext_clks, + samsung_clk_register_fixed_rate(ctx, s3c64xx_fixed_rate_ext_clks, ARRAY_SIZE(s3c64xx_fixed_rate_ext_clks)); } @@ -456,6 +458,8 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, unsigned long xusbxti_f, bool s3c6400, void __iomem *base) { + struct samsung_clk_provider *ctx; + reg_base = base; is_s3c6400 = s3c6400; @@ -465,48 +469,50 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f, panic("%s: failed to map registers\n", __func__); } - samsung_clk_init(np, reg_base, NR_CLKS); + ctx = samsung_clk_init(np, reg_base, NR_CLKS); + if (!ctx) + panic("%s: unable to allocate context.\n", __func__); /* Register external clocks. */ if (!np) - s3c64xx_clk_register_fixed_ext(xtal_f, xusbxti_f); + s3c64xx_clk_register_fixed_ext(ctx, xtal_f, xusbxti_f); /* Register PLLs. */ - samsung_clk_register_pll(s3c64xx_pll_clks, + samsung_clk_register_pll(ctx, s3c64xx_pll_clks, ARRAY_SIZE(s3c64xx_pll_clks), reg_base); /* Register common internal clocks. */ - samsung_clk_register_fixed_rate(s3c64xx_fixed_rate_clks, + samsung_clk_register_fixed_rate(ctx, s3c64xx_fixed_rate_clks, ARRAY_SIZE(s3c64xx_fixed_rate_clks)); - samsung_clk_register_mux(s3c64xx_mux_clks, + samsung_clk_register_mux(ctx, s3c64xx_mux_clks, ARRAY_SIZE(s3c64xx_mux_clks)); - samsung_clk_register_div(s3c64xx_div_clks, + samsung_clk_register_div(ctx, s3c64xx_div_clks, ARRAY_SIZE(s3c64xx_div_clks)); - samsung_clk_register_gate(s3c64xx_gate_clks, + samsung_clk_register_gate(ctx, s3c64xx_gate_clks, ARRAY_SIZE(s3c64xx_gate_clks)); /* Register SoC-specific clocks. */ if (is_s3c6400) { - samsung_clk_register_mux(s3c6400_mux_clks, + samsung_clk_register_mux(ctx, s3c6400_mux_clks, ARRAY_SIZE(s3c6400_mux_clks)); - samsung_clk_register_div(s3c6400_div_clks, + samsung_clk_register_div(ctx, s3c6400_div_clks, ARRAY_SIZE(s3c6400_div_clks)); - samsung_clk_register_gate(s3c6400_gate_clks, + samsung_clk_register_gate(ctx, s3c6400_gate_clks, ARRAY_SIZE(s3c6400_gate_clks)); - samsung_clk_register_alias(s3c6400_clock_aliases, + samsung_clk_register_alias(ctx, s3c6400_clock_aliases, ARRAY_SIZE(s3c6400_clock_aliases)); } else { - samsung_clk_register_mux(s3c6410_mux_clks, + samsung_clk_register_mux(ctx, s3c6410_mux_clks, ARRAY_SIZE(s3c6410_mux_clks)); - samsung_clk_register_div(s3c6410_div_clks, + samsung_clk_register_div(ctx, s3c6410_div_clks, ARRAY_SIZE(s3c6410_div_clks)); - samsung_clk_register_gate(s3c6410_gate_clks, + samsung_clk_register_gate(ctx, s3c6410_gate_clks, ARRAY_SIZE(s3c6410_gate_clks)); - samsung_clk_register_alias(s3c6410_clock_aliases, + samsung_clk_register_alias(ctx, s3c6410_clock_aliases, ARRAY_SIZE(s3c6410_clock_aliases)); } - samsung_clk_register_alias(s3c64xx_clock_aliases, + samsung_clk_register_alias(ctx, s3c64xx_clock_aliases, ARRAY_SIZE(s3c64xx_clock_aliases)); s3c64xx_clk_sleep_init(); diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index 91bec3ebdc8..49629c71c9e 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -14,13 +14,6 @@ #include <linux/syscore_ops.h> #include "clk.h" -static DEFINE_SPINLOCK(lock); -static struct clk **clk_table; -static void __iomem *reg_base; -#ifdef CONFIG_OF -static struct clk_onecell_data clk_data; -#endif - void samsung_clk_save(void __iomem *base, struct samsung_clk_reg_dump *rd, unsigned int num_regs) @@ -55,40 +48,58 @@ struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump( } /* setup the essentials required to support clock lookup using ccf */ -void __init samsung_clk_init(struct device_node *np, void __iomem *base, - unsigned long nr_clks) +struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np, + void __iomem *base, unsigned long nr_clks) { - reg_base = base; + struct samsung_clk_provider *ctx; + struct clk **clk_table; + int ret; + int i; - clk_table = kzalloc(sizeof(struct clk *) * nr_clks, GFP_KERNEL); + ctx = kzalloc(sizeof(struct samsung_clk_provider), GFP_KERNEL); + if (!ctx) + panic("could not allocate clock provider context.\n"); + + clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL); if (!clk_table) panic("could not allocate clock lookup table\n"); + for (i = 0; i < nr_clks; ++i) + clk_table[i] = ERR_PTR(-ENOENT); + + ctx->reg_base = base; + ctx->clk_data.clks = clk_table; + ctx->clk_data.clk_num = nr_clks; + spin_lock_init(&ctx->lock); + if (!np) - return; + return ctx; -#ifdef CONFIG_OF - clk_data.clks = clk_table; - clk_data.clk_num = nr_clks; - of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); -#endif + ret = of_clk_add_provider(np, of_clk_src_onecell_get, + &ctx->clk_data); + if (ret) + panic("could not register clock provide\n"); + + return ctx; } /* add a clock instance to the clock lookup table used for dt based lookup */ -void samsung_clk_add_lookup(struct clk *clk, unsigned int id) +void samsung_clk_add_lookup(struct samsung_clk_provider *ctx, struct clk *clk, + unsigned int id) { - if (clk_table && id) - clk_table[id] = clk; + if (ctx->clk_data.clks && id) + ctx->clk_data.clks[id] = clk; } /* register a list of aliases */ -void __init samsung_clk_register_alias(struct samsung_clock_alias *list, - unsigned int nr_clk) +void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx, + struct samsung_clock_alias *list, + unsigned int nr_clk) { struct clk *clk; unsigned int idx, ret; - if (!clk_table) { + if (!ctx->clk_data.clks) { pr_err("%s: clock table missing\n", __func__); return; } @@ -100,7 +111,7 @@ void __init samsung_clk_register_alias(struct samsung_clock_alias *list, continue; } - clk = clk_table[list->id]; + clk = ctx->clk_data.clks[list->id]; if (!clk) { pr_err("%s: failed to find clock %d\n", __func__, list->id); @@ -115,7 +126,7 @@ void __init samsung_clk_register_alias(struct samsung_clock_alias *list, } /* register a list of fixed clocks */ -void __init samsung_clk_register_fixed_rate( +void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx, struct samsung_fixed_rate_clock *list, unsigned int nr_clk) { struct clk *clk; @@ -130,7 +141,7 @@ void __init samsung_clk_register_fixed_rate( continue; } - samsung_clk_add_lookup(clk, list->id); + samsung_clk_add_lookup(ctx, clk, list->id); /* * Unconditionally add a clock lookup for the fixed rate clocks. @@ -144,7 +155,7 @@ void __init samsung_clk_register_fixed_rate( } /* register a list of fixed factor clocks */ -void __init samsung_clk_register_fixed_factor( +void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx, struct samsung_fixed_factor_clock *list, unsigned int nr_clk) { struct clk *clk; @@ -159,28 +170,30 @@ void __init samsung_clk_register_fixed_factor( continue; } - samsung_clk_add_lookup(clk, list->id); + samsung_clk_add_lookup(ctx, clk, list->id); } } /* register a list of mux clocks */ -void __init samsung_clk_register_mux(struct samsung_mux_clock *list, - unsigned int nr_clk) +void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx, + struct samsung_mux_clock *list, + unsigned int nr_clk) { struct clk *clk; unsigned int idx, ret; for (idx = 0; idx < nr_clk; idx++, list++) { clk = clk_register_mux(NULL, list->name, list->parent_names, - list->num_parents, list->flags, reg_base + list->offset, - list->shift, list->width, list->mux_flags, &lock); + list->num_parents, list->flags, + ctx->reg_base + list->offset, + list->shift, list->width, list->mux_flags, &ctx->lock); if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, list->name); continue; } - samsung_clk_add_lookup(clk, list->id); + samsung_clk_add_lookup(ctx, clk, list->id); /* register a clock lookup only if a clock alias is specified */ if (list->alias) { @@ -194,8 +207,9 @@ void __init samsung_clk_register_mux(struct samsung_mux_clock *list, } /* register a list of div clocks */ -void __init samsung_clk_register_div(struct samsung_div_clock *list, - unsigned int nr_clk) +void __init samsung_clk_register_div(struct samsung_clk_provider *ctx, + struct samsung_div_clock *list, + unsigned int nr_clk) { struct clk *clk; unsigned int idx, ret; @@ -203,22 +217,22 @@ void __init samsung_clk_register_div(struct samsung_div_clock *list, for (idx = 0; idx < nr_clk; idx++, list++) { if (list->table) clk = clk_register_divider_table(NULL, list->name, - list->parent_name, list->flags, - reg_base + list->offset, list->shift, - list->width, list->div_flags, - list->table, &lock); + list->parent_name, list->flags, + ctx->reg_base + list->offset, + list->shift, list->width, list->div_flags, + list->table, &ctx->lock); else clk = clk_register_divider(NULL, list->name, - list->parent_name, list->flags, - reg_base + list->offset, list->shift, - list->width, list->div_flags, &lock); + list->parent_name, list->flags, + ctx->reg_base + list->offset, list->shift, + list->width, list->div_flags, &ctx->lock); if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, list->name); continue; } - samsung_clk_add_lookup(clk, list->id); + samsung_clk_add_lookup(ctx, clk, list->id); /* register a clock lookup only if a clock alias is specified */ if (list->alias) { @@ -232,16 +246,17 @@ void __init samsung_clk_register_div(struct samsung_div_clock *list, } /* register a list of gate clocks */ -void __init samsung_clk_register_gate(struct samsung_gate_clock *list, - unsigned int nr_clk) +void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx, + struct samsung_gate_clock *list, + unsigned int nr_clk) { struct clk *clk; unsigned int idx, ret; for (idx = 0; idx < nr_clk; idx++, list++) { clk = clk_register_gate(NULL, list->name, list->parent_name, - list->flags, reg_base + list->offset, - list->bit_idx, list->gate_flags, &lock); + list->flags, ctx->reg_base + list->offset, + list->bit_idx, list->gate_flags, &ctx->lock); if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, list->name); @@ -257,7 +272,7 @@ void __init samsung_clk_register_gate(struct samsung_gate_clock *list, __func__, list->alias); } - samsung_clk_add_lookup(clk, list->id); + samsung_clk_add_lookup(ctx, clk, list->id); } } @@ -266,21 +281,21 @@ void __init samsung_clk_register_gate(struct samsung_gate_clock *list, * tree and register it */ #ifdef CONFIG_OF -void __init samsung_clk_of_register_fixed_ext( +void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx, struct samsung_fixed_rate_clock *fixed_rate_clk, unsigned int nr_fixed_rate_clk, struct of_device_id *clk_matches) { const struct of_device_id *match; - struct device_node *np; + struct device_node *clk_np; u32 freq; - for_each_matching_node_and_match(np, clk_matches, &match) { - if (of_property_read_u32(np, "clock-frequency", &freq)) + for_each_matching_node_and_match(clk_np, clk_matches, &match) { + if (of_property_read_u32(clk_np, "clock-frequency", &freq)) continue; - fixed_rate_clk[(u32)match->data].fixed_rate = freq; + fixed_rate_clk[(unsigned long)match->data].fixed_rate = freq; } - samsung_clk_register_fixed_rate(fixed_rate_clk, nr_fixed_rate_clk); + samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk); } #endif diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index c7141ba826e..9693b80d924 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h @@ -22,6 +22,18 @@ #include "clk-pll.h" /** + * struct samsung_clk_provider: information about clock provider + * @reg_base: virtual address for the register base. + * @clk_data: holds clock related data like clk* and number of clocks. + * @lock: maintains exclusion bwtween callbacks for a given clock-provider. + */ +struct samsung_clk_provider { + void __iomem *reg_base; + struct clk_onecell_data clk_data; + spinlock_t lock; +}; + +/** * struct samsung_clock_alias: information about mux clock * @id: platform specific id of the clock. * @dev_name: name of the device to which this clock belongs. @@ -312,40 +324,52 @@ struct samsung_pll_clock { __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE, \ _lock, _con, _rtable, _alias) -extern void __init samsung_clk_init(struct device_node *np, void __iomem *base, - unsigned long nr_clks); +extern struct samsung_clk_provider *__init samsung_clk_init( + struct device_node *np, void __iomem *base, + unsigned long nr_clks); extern void __init samsung_clk_of_register_fixed_ext( - struct samsung_fixed_rate_clock *fixed_rate_clk, - unsigned int nr_fixed_rate_clk, - struct of_device_id *clk_matches); + struct samsung_clk_provider *ctx, + struct samsung_fixed_rate_clock *fixed_rate_clk, + unsigned int nr_fixed_rate_clk, + struct of_device_id *clk_matches); -extern void samsung_clk_add_lookup(struct clk *clk, unsigned int id); +extern void samsung_clk_add_lookup(struct samsung_clk_provider *ctx, + struct clk *clk, unsigned int id); -extern void samsung_clk_register_alias(struct samsung_clock_alias *list, - unsigned int nr_clk); +extern void samsung_clk_register_alias(struct samsung_clk_provider *ctx, + struct samsung_clock_alias *list, + unsigned int nr_clk); extern void __init samsung_clk_register_fixed_rate( - struct samsung_fixed_rate_clock *clk_list, unsigned int nr_clk); + struct samsung_clk_provider *ctx, + struct samsung_fixed_rate_clock *clk_list, + unsigned int nr_clk); extern void __init samsung_clk_register_fixed_factor( - struct samsung_fixed_factor_clock *list, unsigned int nr_clk); -extern void __init samsung_clk_register_mux(struct samsung_mux_clock *clk_list, - unsigned int nr_clk); -extern void __init samsung_clk_register_div(struct samsung_div_clock *clk_list, - unsigned int nr_clk); -extern void __init samsung_clk_register_gate( - struct samsung_gate_clock *clk_list, unsigned int nr_clk); -extern void __init samsung_clk_register_pll(struct samsung_pll_clock *pll_list, - unsigned int nr_clk, void __iomem *base); + struct samsung_clk_provider *ctx, + struct samsung_fixed_factor_clock *list, + unsigned int nr_clk); +extern void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx, + struct samsung_mux_clock *clk_list, + unsigned int nr_clk); +extern void __init samsung_clk_register_div(struct samsung_clk_provider *ctx, + struct samsung_div_clock *clk_list, + unsigned int nr_clk); +extern void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx, + struct samsung_gate_clock *clk_list, + unsigned int nr_clk); +extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx, + struct samsung_pll_clock *pll_list, + unsigned int nr_clk, void __iomem *base); extern unsigned long _get_rate(const char *clk_name); extern void samsung_clk_save(void __iomem *base, - struct samsung_clk_reg_dump *rd, - unsigned int num_regs); + struct samsung_clk_reg_dump *rd, + unsigned int num_regs); extern void samsung_clk_restore(void __iomem *base, - const struct samsung_clk_reg_dump *rd, - unsigned int num_regs); + const struct samsung_clk_reg_dump *rd, + unsigned int num_regs); extern struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump( - const unsigned long *rdump, - unsigned long nr_rdump); + const unsigned long *rdump, + unsigned long nr_rdump); #endif /* __SAMSUNG_CLK_H */ diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c index 2e5810c88d1..1f6324e29a8 100644 --- a/drivers/clk/shmobile/clk-mstp.c +++ b/drivers/clk/shmobile/clk-mstp.c @@ -156,6 +156,7 @@ cpg_mstp_clock_register(const char *name, const char *parent_name, static void __init cpg_mstp_clocks_init(struct device_node *np) { struct mstp_clock_group *group; + const char *idxname; struct clk **clks; unsigned int i; @@ -184,6 +185,11 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) for (i = 0; i < MSTP_MAX_CLOCKS; ++i) clks[i] = ERR_PTR(-ENOENT); + if (of_find_property(np, "clock-indices", &i)) + idxname = "clock-indices"; + else + idxname = "renesas,clock-indices"; + for (i = 0; i < MSTP_MAX_CLOCKS; ++i) { const char *parent_name; const char *name; @@ -197,8 +203,7 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) continue; parent_name = of_clk_get_parent_name(np, i); - ret = of_property_read_u32_index(np, "renesas,clock-indices", i, - &clkidx); + ret = of_property_read_u32_index(np, idxname, i, &clkidx); if (parent_name == NULL || ret < 0) break; diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index 88dafb5e962..de6da957a09 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c @@ -20,6 +20,7 @@ #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/of.h> +#include <linux/of_address.h> #include "clk.h" @@ -43,6 +44,8 @@ #define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) +void __iomem *clk_mgr_base_addr; + static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, unsigned long parent_rate) { @@ -87,6 +90,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, const char *clk_name = node->name; const char *parent_name[SOCFPGA_MAX_PARENTS]; struct clk_init_data init; + struct device_node *clkmgr_np; int rc; int i = 0; @@ -96,6 +100,9 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, if (WARN_ON(!pll_clk)) return NULL; + clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); + clk_mgr_base_addr = of_iomap(clkmgr_np, 0); + BUG_ON(!clk_mgr_base_addr); pll_clk->hw.reg = clk_mgr_base_addr + reg; of_property_read_string(node, "clock-output-names", &clk_name); diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c index 35a960a993f..43db947e5f0 100644 --- a/drivers/clk/socfpga/clk.c +++ b/drivers/clk/socfpga/clk.c @@ -17,28 +17,11 @@ * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#include <linux/clk.h> -#include <linux/clkdev.h> -#include <linux/clk-provider.h> -#include <linux/io.h> #include <linux/of.h> -#include <linux/of_address.h> #include "clk.h" -void __iomem *clk_mgr_base_addr; - -static const struct of_device_id socfpga_child_clocks[] __initconst = { - { .compatible = "altr,socfpga-pll-clock", socfpga_pll_init, }, - { .compatible = "altr,socfpga-perip-clk", socfpga_periph_init, }, - { .compatible = "altr,socfpga-gate-clk", socfpga_gate_init, }, - {}, -}; - -static void __init socfpga_clkmgr_init(struct device_node *node) -{ - clk_mgr_base_addr = of_iomap(node, 0); - of_clk_init(socfpga_child_clocks); -} -CLK_OF_DECLARE(socfpga_mgr, "altr,clk-mgr", socfpga_clkmgr_init); +CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init); +CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init); +CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init); diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index bca0a0badbf..a886702f7c8 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c @@ -521,8 +521,10 @@ static struct clk * __init clkgen_odf_register(const char *parent_name, gate->lock = odf_lock; div = kzalloc(sizeof(*div), GFP_KERNEL); - if (!div) + if (!div) { + kfree(gate); return ERR_PTR(-ENOMEM); + } div->flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; div->reg = reg + pll_data->odf[odf].offset; diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 0d20241e077..6aad8abc69a 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -58,9 +58,9 @@ #define PLLDU_LFCON_SET_DIVN 600 #define PLLE_BASE_DIVCML_SHIFT 24 -#define PLLE_BASE_DIVCML_WIDTH 4 +#define PLLE_BASE_DIVCML_MASK 0xf #define PLLE_BASE_DIVP_SHIFT 16 -#define PLLE_BASE_DIVP_WIDTH 7 +#define PLLE_BASE_DIVP_WIDTH 6 #define PLLE_BASE_DIVN_SHIFT 8 #define PLLE_BASE_DIVN_WIDTH 8 #define PLLE_BASE_DIVM_SHIFT 0 @@ -183,6 +183,14 @@ #define divp_mask(p) (p->params->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK :\ mask(p->params->div_nmp->divp_width)) +#define divm_shift(p) (p)->params->div_nmp->divm_shift +#define divn_shift(p) (p)->params->div_nmp->divn_shift +#define divp_shift(p) (p)->params->div_nmp->divp_shift + +#define divm_mask_shifted(p) (divm_mask(p) << divm_shift(p)) +#define divn_mask_shifted(p) (divn_mask(p) << divn_shift(p)) +#define divp_mask_shifted(p) (divp_mask(p) << divp_shift(p)) + #define divm_max(p) (divm_mask(p)) #define divn_max(p) (divn_mask(p)) #define divp_max(p) (1 << (divp_mask(p))) @@ -476,13 +484,12 @@ static void _update_pll_mnp(struct tegra_clk_pll *pll, } else { val = pll_readl_base(pll); - val &= ~((divm_mask(pll) << div_nmp->divm_shift) | - (divn_mask(pll) << div_nmp->divn_shift) | - (divp_mask(pll) << div_nmp->divp_shift)); + val &= ~(divm_mask_shifted(pll) | divn_mask_shifted(pll) | + divp_mask_shifted(pll)); - val |= ((cfg->m << div_nmp->divm_shift) | - (cfg->n << div_nmp->divn_shift) | - (cfg->p << div_nmp->divp_shift)); + val |= (cfg->m << divm_shift(pll)) | + (cfg->n << divn_shift(pll)) | + (cfg->p << divp_shift(pll)); pll_writel_base(val, pll); } @@ -730,11 +737,12 @@ static int clk_plle_enable(struct clk_hw *hw) if (pll->params->flags & TEGRA_PLLE_CONFIGURE) { /* configure dividers */ val = pll_readl_base(pll); - val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); - val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); - val |= sel.m << pll->params->div_nmp->divm_shift; - val |= sel.n << pll->params->div_nmp->divn_shift; - val |= sel.p << pll->params->div_nmp->divp_shift; + val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) | + divm_mask_shifted(pll)); + val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT); + val |= sel.m << divm_shift(pll); + val |= sel.n << divn_shift(pll); + val |= sel.p << divp_shift(pll); val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; pll_writel_base(val, pll); } @@ -745,10 +753,11 @@ static int clk_plle_enable(struct clk_hw *hw) pll_writel_misc(val, pll); val = readl(pll->clk_base + PLLE_SS_CTRL); + val &= ~PLLE_SS_COEFFICIENTS_MASK; val |= PLLE_SS_DISABLE; writel(val, pll->clk_base + PLLE_SS_CTRL); - val |= pll_readl_base(pll); + val = pll_readl_base(pll); val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE); pll_writel_base(val, pll); @@ -1292,10 +1301,11 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) pll_writel(val, PLLE_SS_CTRL, pll); val = pll_readl_base(pll); - val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll)); - val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT); - val |= sel.m << pll->params->div_nmp->divm_shift; - val |= sel.n << pll->params->div_nmp->divn_shift; + val &= ~(divp_mask_shifted(pll) | divn_mask_shifted(pll) | + divm_mask_shifted(pll)); + val &= ~(PLLE_BASE_DIVCML_MASK << PLLE_BASE_DIVCML_SHIFT); + val |= sel.m << divm_shift(pll); + val |= sel.n << divn_shift(pll); val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT; pll_writel_base(val, pll); udelay(1); @@ -1410,6 +1420,15 @@ struct clk *tegra_clk_register_pll(const char *name, const char *parent_name, return clk; } +static struct div_nmp pll_e_nmp = { + .divn_shift = PLLE_BASE_DIVN_SHIFT, + .divn_width = PLLE_BASE_DIVN_WIDTH, + .divm_shift = PLLE_BASE_DIVM_SHIFT, + .divm_width = PLLE_BASE_DIVM_WIDTH, + .divp_shift = PLLE_BASE_DIVP_SHIFT, + .divp_width = PLLE_BASE_DIVP_WIDTH, +}; + struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, void __iomem *clk_base, void __iomem *pmc, unsigned long flags, struct tegra_clk_pll_params *pll_params, @@ -1420,6 +1439,10 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, pll_params->flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS; pll_params->flags |= TEGRA_PLL_HAS_LOCK_ENABLE; + + if (!pll_params->div_nmp) + pll_params->div_nmp = &pll_e_nmp; + pll = _tegra_init_pll(clk_base, pmc, pll_params, lock); if (IS_ERR(pll)) return ERR_CAST(pll); @@ -1557,9 +1580,8 @@ struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name, int m; m = _pll_fixed_mdiv(pll_params, parent_rate); - val = m << PLL_BASE_DIVM_SHIFT; - val |= (pll_params->vco_min / parent_rate) - << PLL_BASE_DIVN_SHIFT; + val = m << divm_shift(pll); + val |= (pll_params->vco_min / parent_rate) << divn_shift(pll); pll_writel_base(val, pll); } @@ -1718,7 +1740,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name, "pll_re_vco"); } else { val_aux &= ~(PLLE_AUX_PLLRE_SEL | PLLE_AUX_PLLP_SEL); - pll_writel(val, pll_params->aux_reg, pll); + pll_writel(val_aux, pll_params->aux_reg, pll); } clk = _tegra_clk_register_pll(pll, name, parent_name, flags, diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c index 67c8de572c5..527a43da3d3 100644 --- a/drivers/clk/ti/clk-43xx.c +++ b/drivers/clk/ti/clk-43xx.c @@ -105,6 +105,12 @@ static struct ti_dt_clk am43xx_clks[] = { DT_CLK(NULL, "func_12m_clk", "func_12m_clk"), DT_CLK(NULL, "vtp_clk_div", "vtp_clk_div"), DT_CLK(NULL, "usbphy_32khz_clkmux", "usbphy_32khz_clkmux"), + DT_CLK("48300200.ehrpwm", "tbclk", "ehrpwm0_tbclk"), + DT_CLK("48302200.ehrpwm", "tbclk", "ehrpwm1_tbclk"), + DT_CLK("48304200.ehrpwm", "tbclk", "ehrpwm2_tbclk"), + DT_CLK("48306200.ehrpwm", "tbclk", "ehrpwm3_tbclk"), + DT_CLK("48308200.ehrpwm", "tbclk", "ehrpwm4_tbclk"), + DT_CLK("4830a200.ehrpwm", "tbclk", "ehrpwm5_tbclk"), { .node_name = NULL }, }; diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig new file mode 100644 index 00000000000..1530c9352a7 --- /dev/null +++ b/drivers/clk/versatile/Kconfig @@ -0,0 +1,26 @@ +config COMMON_CLK_VERSATILE + bool "Clock driver for ARM Reference designs" + depends on ARCH_INTEGRATOR || ARCH_REALVIEW || ARCH_VEXPRESS || ARM64 + ---help--- + Supports clocking on ARM Reference designs: + - Integrator/AP and Integrator/CP + - RealView PB1176, EB, PB11MP and PBX + - Versatile Express + +config CLK_SP810 + bool "Clock driver for ARM SP810 System Controller" + depends on COMMON_CLK_VERSATILE + default y if ARCH_VEXPRESS + ---help--- + Supports clock muxing (REFCLK/TIMCLK to TIMERCLKEN0-3) capabilities + of the ARM SP810 System Controller cell. + +config CLK_VEXPRESS_OSC + bool "Clock driver for Versatile Express OSC clock generators" + depends on COMMON_CLK_VERSATILE + depends on VEXPRESS_CONFIG + default y if ARCH_VEXPRESS + ---help--- + Simple regmap-based driver driving clock generators on Versatile + Express platforms hidden behind its configuration infrastructure, + commonly known as OSCs. diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile index c16ca787170..fd449f9b006 100644 --- a/drivers/clk/versatile/Makefile +++ b/drivers/clk/versatile/Makefile @@ -3,5 +3,6 @@ obj-$(CONFIG_ICST) += clk-icst.o obj-$(CONFIG_ARCH_INTEGRATOR) += clk-integrator.o obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o -obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o clk-sp810.o -obj-$(CONFIG_VEXPRESS_CONFIG) += clk-vexpress-osc.o +obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o +obj-$(CONFIG_CLK_SP810) += clk-sp810.o +obj-$(CONFIG_CLK_VEXPRESS_OSC) += clk-vexpress-osc.o diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c index a535c7bf857..529a59c0fbf 100644 --- a/drivers/clk/versatile/clk-vexpress-osc.c +++ b/drivers/clk/versatile/clk-vexpress-osc.c @@ -11,8 +11,6 @@ * Copyright (C) 2012 ARM Limited */ -#define pr_fmt(fmt) "vexpress-osc: " fmt - #include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/err.h> @@ -22,7 +20,7 @@ #include <linux/vexpress.h> struct vexpress_osc { - struct vexpress_config_func *func; + struct regmap *reg; struct clk_hw hw; unsigned long rate_min; unsigned long rate_max; @@ -36,7 +34,7 @@ static unsigned long vexpress_osc_recalc_rate(struct clk_hw *hw, struct vexpress_osc *osc = to_vexpress_osc(hw); u32 rate; - vexpress_config_read(osc->func, 0, &rate); + regmap_read(osc->reg, 0, &rate); return rate; } @@ -60,7 +58,7 @@ static int vexpress_osc_set_rate(struct clk_hw *hw, unsigned long rate, { struct vexpress_osc *osc = to_vexpress_osc(hw); - return vexpress_config_write(osc->func, 0, rate); + return regmap_write(osc->reg, 0, rate); } static struct clk_ops vexpress_osc_ops = { @@ -70,56 +68,31 @@ static struct clk_ops vexpress_osc_ops = { }; -struct clk * __init vexpress_osc_setup(struct device *dev) -{ - struct clk_init_data init; - struct vexpress_osc *osc = kzalloc(sizeof(*osc), GFP_KERNEL); - - if (!osc) - return NULL; - - osc->func = vexpress_config_func_get_by_dev(dev); - if (!osc->func) { - kfree(osc); - return NULL; - } - - init.name = dev_name(dev); - init.ops = &vexpress_osc_ops; - init.flags = CLK_IS_ROOT; - init.num_parents = 0; - osc->hw.init = &init; - - return clk_register(NULL, &osc->hw); -} - -void __init vexpress_osc_of_setup(struct device_node *node) +static int vexpress_osc_probe(struct platform_device *pdev) { + struct clk_lookup *cl = pdev->dev.platform_data; /* Non-DT lookup */ struct clk_init_data init; struct vexpress_osc *osc; struct clk *clk; u32 range[2]; - osc = kzalloc(sizeof(*osc), GFP_KERNEL); + osc = devm_kzalloc(&pdev->dev, sizeof(*osc), GFP_KERNEL); if (!osc) - return; + return -ENOMEM; - osc->func = vexpress_config_func_get_by_node(node); - if (!osc->func) { - pr_err("Failed to obtain config func for node '%s'!\n", - node->full_name); - goto error; - } + osc->reg = devm_regmap_init_vexpress_config(&pdev->dev); + if (IS_ERR(osc->reg)) + return PTR_ERR(osc->reg); - if (of_property_read_u32_array(node, "freq-range", range, + if (of_property_read_u32_array(pdev->dev.of_node, "freq-range", range, ARRAY_SIZE(range)) == 0) { osc->rate_min = range[0]; osc->rate_max = range[1]; } - of_property_read_string(node, "clock-output-names", &init.name); - if (!init.name) - init.name = node->full_name; + if (of_property_read_string(pdev->dev.of_node, "clock-output-names", + &init.name) != 0) + init.name = dev_name(&pdev->dev); init.ops = &vexpress_osc_ops; init.flags = CLK_IS_ROOT; @@ -128,20 +101,37 @@ void __init vexpress_osc_of_setup(struct device_node *node) osc->hw.init = &init; clk = clk_register(NULL, &osc->hw); - if (IS_ERR(clk)) { - pr_err("Failed to register clock '%s'!\n", init.name); - goto error; + if (IS_ERR(clk)) + return PTR_ERR(clk); + + of_clk_add_provider(pdev->dev.of_node, of_clk_src_simple_get, clk); + + /* Only happens for non-DT cases */ + if (cl) { + cl->clk = clk; + clkdev_add(cl); } - of_clk_add_provider(node, of_clk_src_simple_get, clk); + dev_dbg(&pdev->dev, "Registered clock '%s'\n", init.name); - pr_debug("Registered clock '%s'\n", init.name); + return 0; +} - return; +static struct of_device_id vexpress_osc_of_match[] = { + { .compatible = "arm,vexpress-osc", }, + {} +}; -error: - if (osc->func) - vexpress_config_func_put(osc->func); - kfree(osc); +static struct platform_driver vexpress_osc_driver = { + .driver = { + .name = "vexpress-osc", + .of_match_table = vexpress_osc_of_match, + }, + .probe = vexpress_osc_probe, +}; + +static int __init vexpress_osc_init(void) +{ + return platform_driver_register(&vexpress_osc_driver); } -CLK_OF_DECLARE(vexpress_soc, "arm,vexpress-osc", vexpress_osc_of_setup); +core_initcall(vexpress_osc_init); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 96918e1f26a..43f1acf0d1d 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -191,3 +191,14 @@ config EM_TIMER_STI config CLKSRC_QCOM bool + +config CLKSRC_VERSATILE + bool "ARM Versatile (Express) reference platforms clock source" + depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET + select CLKSRC_OF + default y if MFD_VEXPRESS_SYSREG + help + This option enables clock source based on free running + counter available in the "System Registers" block of + ARM Versatile, RealView and Versatile Express reference + platforms. diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 98cb6c51aa8..6f25bdffc17 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -39,3 +39,4 @@ obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o +obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 57e823c44d2..5163ec13429 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI]; static struct clock_event_device __percpu *arch_timer_evt; static bool arch_timer_use_virtual = true; +static bool arch_timer_c3stop; static bool arch_timer_mem_use_virtual; /* @@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type, clk->features = CLOCK_EVT_FEAT_ONESHOT; if (type == ARCH_CP15_TIMER) { - clk->features |= CLOCK_EVT_FEAT_C3STOP; + if (arch_timer_c3stop) + clk->features |= CLOCK_EVT_FEAT_C3STOP; clk->name = "arch_sys_timer"; clk->rating = 450; clk->cpumask = cpumask_of(smp_processor_id()); @@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np) } } + arch_timer_c3stop = !of_property_read_bool(np, "always-on"); + arch_timer_register(); arch_timer_common_init(); } diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index 49fbe2847c8..7a08811df9a 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -118,11 +118,11 @@ static void ttc_set_interval(struct ttc_timer *timer, u32 ctrl_reg; /* Disable the counter, set the counter value and re-enable counter */ - ctrl_reg = __raw_readl(timer->base_addr + TTC_CNT_CNTRL_OFFSET); + ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; - __raw_writel(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); + writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); - __raw_writel(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET); + writel_relaxed(cycles, timer->base_addr + TTC_INTR_VAL_OFFSET); /* * Reset the counter (0x10) so that it starts from 0, one-shot @@ -130,7 +130,7 @@ static void ttc_set_interval(struct ttc_timer *timer, */ ctrl_reg |= CNT_CNTRL_RESET; ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; - __raw_writel(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); + writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); } /** @@ -147,7 +147,7 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id) struct ttc_timer *timer = &ttce->ttc; /* Acknowledge the interrupt and call event handler */ - __raw_readl(timer->base_addr + TTC_ISR_OFFSET); + readl_relaxed(timer->base_addr + TTC_ISR_OFFSET); ttce->ce.event_handler(&ttce->ce); @@ -163,13 +163,13 @@ static cycle_t __ttc_clocksource_read(struct clocksource *cs) { struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc; - return (cycle_t)__raw_readl(timer->base_addr + + return (cycle_t)readl_relaxed(timer->base_addr + TTC_COUNT_VAL_OFFSET); } static u64 notrace ttc_sched_clock_read(void) { - return __raw_readl(ttc_sched_clock_val_reg); + return readl_relaxed(ttc_sched_clock_val_reg); } /** @@ -211,17 +211,17 @@ static void ttc_set_mode(enum clock_event_mode mode, case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: - ctrl_reg = __raw_readl(timer->base_addr + + ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); ctrl_reg |= TTC_CNT_CNTRL_DISABLE_MASK; - __raw_writel(ctrl_reg, + writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); break; case CLOCK_EVT_MODE_RESUME: - ctrl_reg = __raw_readl(timer->base_addr + + ctrl_reg = readl_relaxed(timer->base_addr + TTC_CNT_CNTRL_OFFSET); ctrl_reg &= ~TTC_CNT_CNTRL_DISABLE_MASK; - __raw_writel(ctrl_reg, + writel_relaxed(ctrl_reg, timer->base_addr + TTC_CNT_CNTRL_OFFSET); break; } @@ -266,8 +266,8 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, * of an abort. */ ttccs->scale_clk_ctrl_reg_old = - __raw_readl(ttccs->ttc.base_addr + - TTC_CLK_CNTRL_OFFSET); + readl_relaxed(ttccs->ttc.base_addr + + TTC_CLK_CNTRL_OFFSET); psv = (ttccs->scale_clk_ctrl_reg_old & TTC_CLK_CNTRL_PSV_MASK) >> @@ -291,8 +291,8 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, return NOTIFY_DONE; /* scale up: adjust divider now - before frequency change */ - __raw_writel(ttccs->scale_clk_ctrl_reg_new, - ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); + writel_relaxed(ttccs->scale_clk_ctrl_reg_new, + ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); break; } case POST_RATE_CHANGE: @@ -301,8 +301,8 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, return NOTIFY_OK; /* scale down: adjust divider now - after frequency change */ - __raw_writel(ttccs->scale_clk_ctrl_reg_new, - ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); + writel_relaxed(ttccs->scale_clk_ctrl_reg_new, + ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); break; case ABORT_RATE_CHANGE: @@ -311,8 +311,8 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb, return NOTIFY_OK; /* restore original register value */ - __raw_writel(ttccs->scale_clk_ctrl_reg_old, - ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); + writel_relaxed(ttccs->scale_clk_ctrl_reg_old, + ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); /* fall through */ default: return NOTIFY_DONE; @@ -359,10 +359,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base) * with no interrupt and it rolls over at 0xFFFF. Pre-scale * it by 32 also. Let it start running now. */ - __raw_writel(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET); - __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, + writel_relaxed(0x0, ttccs->ttc.base_addr + TTC_IER_OFFSET); + writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, ttccs->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); - __raw_writel(CNT_CNTRL_RESET, + writel_relaxed(CNT_CNTRL_RESET, ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE); @@ -438,10 +438,10 @@ static void __init ttc_setup_clockevent(struct clk *clk, * is prescaled by 32 using the interval interrupt. Leave it * disabled for now. */ - __raw_writel(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); - __raw_writel(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, + writel_relaxed(0x23, ttcce->ttc.base_addr + TTC_CNT_CNTRL_OFFSET); + writel_relaxed(CLK_CNTRL_PRESCALE | CLK_CNTRL_PRESCALE_EN, ttcce->ttc.base_addr + TTC_CLK_CNTRL_OFFSET); - __raw_writel(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET); + writel_relaxed(0x1, ttcce->ttc.base_addr + TTC_IER_OFFSET); err = request_irq(irq, ttc_clock_event_interrupt, IRQF_TIMER, ttcce->ce.name, ttcce); @@ -490,7 +490,7 @@ static void __init ttc_timer_init(struct device_node *timer) BUG(); } - clksel = __raw_readl(timer_baseaddr + TTC_CLK_CNTRL_OFFSET); + clksel = readl_relaxed(timer_baseaddr + TTC_CLK_CNTRL_OFFSET); clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); clk_cs = of_clk_get(timer, clksel); if (IS_ERR(clk_cs)) { @@ -498,7 +498,7 @@ static void __init ttc_timer_init(struct device_node *timer) BUG(); } - clksel = __raw_readl(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); + clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET); clksel = !!(clksel & TTC_CLK_CNTRL_CSRC_MASK); clk_ce = of_clk_get(timer, clksel); if (IS_ERR(clk_ce)) { diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index acf5a329d53..8d6420013a0 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -24,6 +24,7 @@ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/clocksource.h> +#include <linux/sched_clock.h> #define EXYNOS4_MCTREG(x) (x) #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100) @@ -192,12 +193,19 @@ struct clocksource mct_frc = { .resume = exynos4_frc_resume, }; +static u64 notrace exynos4_read_sched_clock(void) +{ + return exynos4_frc_read(&mct_frc); +} + static void __init exynos4_clocksource_init(void) { exynos4_mct_frc_start(0, 0); if (clocksource_register_hz(&mct_frc, clk_rate)) panic("%s: can't register clocksource\n", mct_frc.name); + + sched_clock_register(exynos4_read_sched_clock, 64, clk_rate); } static void exynos4_mct_comp0_stop(void) diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 0b1836a6c53..bc8d025ce86 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -11,40 +11,93 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/delay.h> +#include <linux/err.h> #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h> #include <linux/interrupt.h> -#include <linux/ioport.h> #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h> #include <linux/irq.h> -#include <linux/err.h> -#include <linux/delay.h> -#include <linux/clocksource.h> -#include <linux/clockchips.h> -#include <linux/sh_timer.h> -#include <linux/slab.h> #include <linux/module.h> +#include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/sh_timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +struct sh_cmt_device; + +/* + * The CMT comes in 5 different identified flavours, depending not only on the + * SoC but also on the particular instance. The following table lists the main + * characteristics of those flavours. + * + * 16B 32B 32B-F 48B 48B-2 + * ----------------------------------------------------------------------------- + * Channels 2 1/4 1 6 2/8 + * Control Width 16 16 16 16 32 + * Counter Width 16 32 32 32/48 32/48 + * Shared Start/Stop Y Y Y Y N + * + * The 48-bit gen2 version has a per-channel start/stop register located in the + * channel registers block. All other versions have a shared start/stop register + * located in the global space. + * + * Channels are indexed from 0 to N-1 in the documentation. The channel index + * infers the start/stop bit position in the control register and the channel + * registers block address. Some CMT instances have a subset of channels + * available, in which case the index in the documentation doesn't match the + * "real" index as implemented in hardware. This is for instance the case with + * CMT0 on r8a7740, which is a 32-bit variant with a single channel numbered 0 + * in the documentation but using start/stop bit 5 and having its registers + * block at 0x60. + * + * Similarly CMT0 on r8a73a4, r8a7790 and r8a7791, while implementing 32-bit + * channels only, is a 48-bit gen2 CMT with the 48-bit channels unavailable. + */ + +enum sh_cmt_model { + SH_CMT_16BIT, + SH_CMT_32BIT, + SH_CMT_32BIT_FAST, + SH_CMT_48BIT, + SH_CMT_48BIT_GEN2, +}; + +struct sh_cmt_info { + enum sh_cmt_model model; -struct sh_cmt_priv { - void __iomem *mapbase; - void __iomem *mapbase_str; - struct clk *clk; unsigned long width; /* 16 or 32 bit version of hardware block */ unsigned long overflow_bit; unsigned long clear_bits; - struct irqaction irqaction; - struct platform_device *pdev; + /* callbacks for CMSTR and CMCSR access */ + unsigned long (*read_control)(void __iomem *base, unsigned long offs); + void (*write_control)(void __iomem *base, unsigned long offs, + unsigned long value); + + /* callbacks for CMCNT and CMCOR access */ + unsigned long (*read_count)(void __iomem *base, unsigned long offs); + void (*write_count)(void __iomem *base, unsigned long offs, + unsigned long value); +}; + +struct sh_cmt_channel { + struct sh_cmt_device *cmt; + + unsigned int index; /* Index in the documentation */ + unsigned int hwidx; /* Real hardware index */ + + void __iomem *iostart; + void __iomem *ioctrl; + + unsigned int timer_bit; unsigned long flags; unsigned long match_value; unsigned long next_match_value; @@ -55,38 +108,52 @@ struct sh_cmt_priv { struct clocksource cs; unsigned long total_cycles; bool cs_enabled; +}; - /* callbacks for CMSTR and CMCSR access */ - unsigned long (*read_control)(void __iomem *base, unsigned long offs); - void (*write_control)(void __iomem *base, unsigned long offs, - unsigned long value); +struct sh_cmt_device { + struct platform_device *pdev; - /* callbacks for CMCNT and CMCOR access */ - unsigned long (*read_count)(void __iomem *base, unsigned long offs); - void (*write_count)(void __iomem *base, unsigned long offs, - unsigned long value); + const struct sh_cmt_info *info; + bool legacy; + + void __iomem *mapbase_ch; + void __iomem *mapbase; + struct clk *clk; + + struct sh_cmt_channel *channels; + unsigned int num_channels; + + bool has_clockevent; + bool has_clocksource; }; -/* Examples of supported CMT timer register layouts and I/O access widths: - * - * "16-bit counter and 16-bit control" as found on sh7263: - * CMSTR 0xfffec000 16-bit - * CMCSR 0xfffec002 16-bit - * CMCNT 0xfffec004 16-bit - * CMCOR 0xfffec006 16-bit - * - * "32-bit counter and 16-bit control" as found on sh7372, sh73a0, r8a7740: - * CMSTR 0xffca0000 16-bit - * CMCSR 0xffca0060 16-bit - * CMCNT 0xffca0064 32-bit - * CMCOR 0xffca0068 32-bit - * - * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790: - * CMSTR 0xffca0500 32-bit - * CMCSR 0xffca0510 32-bit - * CMCNT 0xffca0514 32-bit - * CMCOR 0xffca0518 32-bit - */ +#define SH_CMT16_CMCSR_CMF (1 << 7) +#define SH_CMT16_CMCSR_CMIE (1 << 6) +#define SH_CMT16_CMCSR_CKS8 (0 << 0) +#define SH_CMT16_CMCSR_CKS32 (1 << 0) +#define SH_CMT16_CMCSR_CKS128 (2 << 0) +#define SH_CMT16_CMCSR_CKS512 (3 << 0) +#define SH_CMT16_CMCSR_CKS_MASK (3 << 0) + +#define SH_CMT32_CMCSR_CMF (1 << 15) +#define SH_CMT32_CMCSR_OVF (1 << 14) +#define SH_CMT32_CMCSR_WRFLG (1 << 13) +#define SH_CMT32_CMCSR_STTF (1 << 12) +#define SH_CMT32_CMCSR_STPF (1 << 11) +#define SH_CMT32_CMCSR_SSIE (1 << 10) +#define SH_CMT32_CMCSR_CMS (1 << 9) +#define SH_CMT32_CMCSR_CMM (1 << 8) +#define SH_CMT32_CMCSR_CMTOUT_IE (1 << 7) +#define SH_CMT32_CMCSR_CMR_NONE (0 << 4) +#define SH_CMT32_CMCSR_CMR_DMA (1 << 4) +#define SH_CMT32_CMCSR_CMR_IRQ (2 << 4) +#define SH_CMT32_CMCSR_CMR_MASK (3 << 4) +#define SH_CMT32_CMCSR_DBGIVD (1 << 3) +#define SH_CMT32_CMCSR_CKS_RCLK8 (4 << 0) +#define SH_CMT32_CMCSR_CKS_RCLK32 (5 << 0) +#define SH_CMT32_CMCSR_CKS_RCLK128 (6 << 0) +#define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0) +#define SH_CMT32_CMCSR_CKS_MASK (7 << 0) static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs) { @@ -110,64 +177,123 @@ static void sh_cmt_write32(void __iomem *base, unsigned long offs, iowrite32(value, base + (offs << 2)); } +static const struct sh_cmt_info sh_cmt_info[] = { + [SH_CMT_16BIT] = { + .model = SH_CMT_16BIT, + .width = 16, + .overflow_bit = SH_CMT16_CMCSR_CMF, + .clear_bits = ~SH_CMT16_CMCSR_CMF, + .read_control = sh_cmt_read16, + .write_control = sh_cmt_write16, + .read_count = sh_cmt_read16, + .write_count = sh_cmt_write16, + }, + [SH_CMT_32BIT] = { + .model = SH_CMT_32BIT, + .width = 32, + .overflow_bit = SH_CMT32_CMCSR_CMF, + .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), + .read_control = sh_cmt_read16, + .write_control = sh_cmt_write16, + .read_count = sh_cmt_read32, + .write_count = sh_cmt_write32, + }, + [SH_CMT_32BIT_FAST] = { + .model = SH_CMT_32BIT_FAST, + .width = 32, + .overflow_bit = SH_CMT32_CMCSR_CMF, + .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), + .read_control = sh_cmt_read16, + .write_control = sh_cmt_write16, + .read_count = sh_cmt_read32, + .write_count = sh_cmt_write32, + }, + [SH_CMT_48BIT] = { + .model = SH_CMT_48BIT, + .width = 32, + .overflow_bit = SH_CMT32_CMCSR_CMF, + .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), + .read_control = sh_cmt_read32, + .write_control = sh_cmt_write32, + .read_count = sh_cmt_read32, + .write_count = sh_cmt_write32, + }, + [SH_CMT_48BIT_GEN2] = { + .model = SH_CMT_48BIT_GEN2, + .width = 32, + .overflow_bit = SH_CMT32_CMCSR_CMF, + .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), + .read_control = sh_cmt_read32, + .write_control = sh_cmt_write32, + .read_count = sh_cmt_read32, + .write_count = sh_cmt_write32, + }, +}; + #define CMCSR 0 /* channel register */ #define CMCNT 1 /* channel register */ #define CMCOR 2 /* channel register */ -static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p) +static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch) { - return p->read_control(p->mapbase_str, 0); + if (ch->iostart) + return ch->cmt->info->read_control(ch->iostart, 0); + else + return ch->cmt->info->read_control(ch->cmt->mapbase, 0); } -static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p) +static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, + unsigned long value) { - return p->read_control(p->mapbase, CMCSR); + if (ch->iostart) + ch->cmt->info->write_control(ch->iostart, 0, value); + else + ch->cmt->info->write_control(ch->cmt->mapbase, 0, value); } -static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p) +static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch) { - return p->read_count(p->mapbase, CMCNT); + return ch->cmt->info->read_control(ch->ioctrl, CMCSR); } -static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p, +static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, unsigned long value) { - p->write_control(p->mapbase_str, 0, value); + ch->cmt->info->write_control(ch->ioctrl, CMCSR, value); } -static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p, - unsigned long value) +static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch) { - p->write_control(p->mapbase, CMCSR, value); + return ch->cmt->info->read_count(ch->ioctrl, CMCNT); } -static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p, +static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, unsigned long value) { - p->write_count(p->mapbase, CMCNT, value); + ch->cmt->info->write_count(ch->ioctrl, CMCNT, value); } -static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p, +static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, unsigned long value) { - p->write_count(p->mapbase, CMCOR, value); + ch->cmt->info->write_count(ch->ioctrl, CMCOR, value); } -static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, +static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch, int *has_wrapped) { unsigned long v1, v2, v3; int o1, o2; - o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit; + o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit; /* Make sure the timer value is stable. Stolen from acpi_pm.c */ do { o2 = o1; - v1 = sh_cmt_read_cmcnt(p); - v2 = sh_cmt_read_cmcnt(p); - v3 = sh_cmt_read_cmcnt(p); - o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit; + v1 = sh_cmt_read_cmcnt(ch); + v2 = sh_cmt_read_cmcnt(ch); + v3 = sh_cmt_read_cmcnt(ch); + o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit; } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); @@ -177,52 +303,56 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, static DEFINE_RAW_SPINLOCK(sh_cmt_lock); -static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) +static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start) { - struct sh_timer_config *cfg = p->pdev->dev.platform_data; unsigned long flags, value; /* start stop register shared by multiple timer channels */ raw_spin_lock_irqsave(&sh_cmt_lock, flags); - value = sh_cmt_read_cmstr(p); + value = sh_cmt_read_cmstr(ch); if (start) - value |= 1 << cfg->timer_bit; + value |= 1 << ch->timer_bit; else - value &= ~(1 << cfg->timer_bit); + value &= ~(1 << ch->timer_bit); - sh_cmt_write_cmstr(p, value); + sh_cmt_write_cmstr(ch, value); raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); } -static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) +static int sh_cmt_enable(struct sh_cmt_channel *ch, unsigned long *rate) { int k, ret; - pm_runtime_get_sync(&p->pdev->dev); - dev_pm_syscore_device(&p->pdev->dev, true); + pm_runtime_get_sync(&ch->cmt->pdev->dev); + dev_pm_syscore_device(&ch->cmt->pdev->dev, true); /* enable clock */ - ret = clk_enable(p->clk); + ret = clk_enable(ch->cmt->clk); if (ret) { - dev_err(&p->pdev->dev, "cannot enable clock\n"); + dev_err(&ch->cmt->pdev->dev, "ch%u: cannot enable clock\n", + ch->index); goto err0; } /* make sure channel is disabled */ - sh_cmt_start_stop_ch(p, 0); + sh_cmt_start_stop_ch(ch, 0); /* configure channel, periodic mode and maximum timeout */ - if (p->width == 16) { - *rate = clk_get_rate(p->clk) / 512; - sh_cmt_write_cmcsr(p, 0x43); + if (ch->cmt->info->width == 16) { + *rate = clk_get_rate(ch->cmt->clk) / 512; + sh_cmt_write_cmcsr(ch, SH_CMT16_CMCSR_CMIE | + SH_CMT16_CMCSR_CKS512); } else { - *rate = clk_get_rate(p->clk) / 8; - sh_cmt_write_cmcsr(p, 0x01a4); + *rate = clk_get_rate(ch->cmt->clk) / 8; + sh_cmt_write_cmcsr(ch, SH_CMT32_CMCSR_CMM | + SH_CMT32_CMCSR_CMTOUT_IE | + SH_CMT32_CMCSR_CMR_IRQ | + SH_CMT32_CMCSR_CKS_RCLK8); } - sh_cmt_write_cmcor(p, 0xffffffff); - sh_cmt_write_cmcnt(p, 0); + sh_cmt_write_cmcor(ch, 0xffffffff); + sh_cmt_write_cmcnt(ch, 0); /* * According to the sh73a0 user's manual, as CMCNT can be operated @@ -236,41 +366,42 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) * take RCLKx2 at maximum. */ for (k = 0; k < 100; k++) { - if (!sh_cmt_read_cmcnt(p)) + if (!sh_cmt_read_cmcnt(ch)) break; udelay(1); } - if (sh_cmt_read_cmcnt(p)) { - dev_err(&p->pdev->dev, "cannot clear CMCNT\n"); + if (sh_cmt_read_cmcnt(ch)) { + dev_err(&ch->cmt->pdev->dev, "ch%u: cannot clear CMCNT\n", + ch->index); ret = -ETIMEDOUT; goto err1; } /* enable channel */ - sh_cmt_start_stop_ch(p, 1); + sh_cmt_start_stop_ch(ch, 1); return 0; err1: /* stop clock */ - clk_disable(p->clk); + clk_disable(ch->cmt->clk); err0: return ret; } -static void sh_cmt_disable(struct sh_cmt_priv *p) +static void sh_cmt_disable(struct sh_cmt_channel *ch) { /* disable channel */ - sh_cmt_start_stop_ch(p, 0); + sh_cmt_start_stop_ch(ch, 0); /* disable interrupts in CMT block */ - sh_cmt_write_cmcsr(p, 0); + sh_cmt_write_cmcsr(ch, 0); /* stop clock */ - clk_disable(p->clk); + clk_disable(ch->cmt->clk); - dev_pm_syscore_device(&p->pdev->dev, false); - pm_runtime_put(&p->pdev->dev); + dev_pm_syscore_device(&ch->cmt->pdev->dev, false); + pm_runtime_put(&ch->cmt->pdev->dev); } /* private flags */ @@ -280,24 +411,24 @@ static void sh_cmt_disable(struct sh_cmt_priv *p) #define FLAG_SKIPEVENT (1 << 3) #define FLAG_IRQCONTEXT (1 << 4) -static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, +static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch, int absolute) { unsigned long new_match; - unsigned long value = p->next_match_value; + unsigned long value = ch->next_match_value; unsigned long delay = 0; unsigned long now = 0; int has_wrapped; - now = sh_cmt_get_counter(p, &has_wrapped); - p->flags |= FLAG_REPROGRAM; /* force reprogram */ + now = sh_cmt_get_counter(ch, &has_wrapped); + ch->flags |= FLAG_REPROGRAM; /* force reprogram */ if (has_wrapped) { /* we're competing with the interrupt handler. * -> let the interrupt handler reprogram the timer. * -> interrupt number two handles the event. */ - p->flags |= FLAG_SKIPEVENT; + ch->flags |= FLAG_SKIPEVENT; return; } @@ -309,20 +440,20 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, * but don't save the new match value yet. */ new_match = now + value + delay; - if (new_match > p->max_match_value) - new_match = p->max_match_value; + if (new_match > ch->max_match_value) + new_match = ch->max_match_value; - sh_cmt_write_cmcor(p, new_match); + sh_cmt_write_cmcor(ch, new_match); - now = sh_cmt_get_counter(p, &has_wrapped); - if (has_wrapped && (new_match > p->match_value)) { + now = sh_cmt_get_counter(ch, &has_wrapped); + if (has_wrapped && (new_match > ch->match_value)) { /* we are changing to a greater match value, * so this wrap must be caused by the counter * matching the old value. * -> first interrupt reprograms the timer. * -> interrupt number two handles the event. */ - p->flags |= FLAG_SKIPEVENT; + ch->flags |= FLAG_SKIPEVENT; break; } @@ -333,7 +464,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, * -> save programmed match value. * -> let isr handle the event. */ - p->match_value = new_match; + ch->match_value = new_match; break; } @@ -344,7 +475,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, * -> save programmed match value. * -> let isr handle the event. */ - p->match_value = new_match; + ch->match_value = new_match; break; } @@ -360,138 +491,141 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, delay = 1; if (!delay) - dev_warn(&p->pdev->dev, "too long delay\n"); + dev_warn(&ch->cmt->pdev->dev, "ch%u: too long delay\n", + ch->index); } while (delay); } -static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) +static void __sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) { - if (delta > p->max_match_value) - dev_warn(&p->pdev->dev, "delta out of range\n"); + if (delta > ch->max_match_value) + dev_warn(&ch->cmt->pdev->dev, "ch%u: delta out of range\n", + ch->index); - p->next_match_value = delta; - sh_cmt_clock_event_program_verify(p, 0); + ch->next_match_value = delta; + sh_cmt_clock_event_program_verify(ch, 0); } -static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) +static void sh_cmt_set_next(struct sh_cmt_channel *ch, unsigned long delta) { unsigned long flags; - raw_spin_lock_irqsave(&p->lock, flags); - __sh_cmt_set_next(p, delta); - raw_spin_unlock_irqrestore(&p->lock, flags); + raw_spin_lock_irqsave(&ch->lock, flags); + __sh_cmt_set_next(ch, delta); + raw_spin_unlock_irqrestore(&ch->lock, flags); } static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) { - struct sh_cmt_priv *p = dev_id; + struct sh_cmt_channel *ch = dev_id; /* clear flags */ - sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits); + sh_cmt_write_cmcsr(ch, sh_cmt_read_cmcsr(ch) & + ch->cmt->info->clear_bits); /* update clock source counter to begin with if enabled * the wrap flag should be cleared by the timer specific * isr before we end up here. */ - if (p->flags & FLAG_CLOCKSOURCE) - p->total_cycles += p->match_value + 1; + if (ch->flags & FLAG_CLOCKSOURCE) + ch->total_cycles += ch->match_value + 1; - if (!(p->flags & FLAG_REPROGRAM)) - p->next_match_value = p->max_match_value; + if (!(ch->flags & FLAG_REPROGRAM)) + ch->next_match_value = ch->max_match_value; - p->flags |= FLAG_IRQCONTEXT; + ch->flags |= FLAG_IRQCONTEXT; - if (p->flags & FLAG_CLOCKEVENT) { - if (!(p->flags & FLAG_SKIPEVENT)) { - if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { - p->next_match_value = p->max_match_value; - p->flags |= FLAG_REPROGRAM; + if (ch->flags & FLAG_CLOCKEVENT) { + if (!(ch->flags & FLAG_SKIPEVENT)) { + if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) { + ch->next_match_value = ch->max_match_value; + ch->flags |= FLAG_REPROGRAM; } - p->ced.event_handler(&p->ced); + ch->ced.event_handler(&ch->ced); } } - p->flags &= ~FLAG_SKIPEVENT; + ch->flags &= ~FLAG_SKIPEVENT; - if (p->flags & FLAG_REPROGRAM) { - p->flags &= ~FLAG_REPROGRAM; - sh_cmt_clock_event_program_verify(p, 1); + if (ch->flags & FLAG_REPROGRAM) { + ch->flags &= ~FLAG_REPROGRAM; + sh_cmt_clock_event_program_verify(ch, 1); - if (p->flags & FLAG_CLOCKEVENT) - if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) - || (p->match_value == p->next_match_value)) - p->flags &= ~FLAG_REPROGRAM; + if (ch->flags & FLAG_CLOCKEVENT) + if ((ch->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) + || (ch->match_value == ch->next_match_value)) + ch->flags &= ~FLAG_REPROGRAM; } - p->flags &= ~FLAG_IRQCONTEXT; + ch->flags &= ~FLAG_IRQCONTEXT; return IRQ_HANDLED; } -static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) +static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag) { int ret = 0; unsigned long flags; - raw_spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&ch->lock, flags); - if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) - ret = sh_cmt_enable(p, &p->rate); + if (!(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) + ret = sh_cmt_enable(ch, &ch->rate); if (ret) goto out; - p->flags |= flag; + ch->flags |= flag; /* setup timeout if no clockevent */ - if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) - __sh_cmt_set_next(p, p->max_match_value); + if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT))) + __sh_cmt_set_next(ch, ch->max_match_value); out: - raw_spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&ch->lock, flags); return ret; } -static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) +static void sh_cmt_stop(struct sh_cmt_channel *ch, unsigned long flag) { unsigned long flags; unsigned long f; - raw_spin_lock_irqsave(&p->lock, flags); + raw_spin_lock_irqsave(&ch->lock, flags); - f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); - p->flags &= ~flag; + f = ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); + ch->flags &= ~flag; - if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) - sh_cmt_disable(p); + if (f && !(ch->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) + sh_cmt_disable(ch); /* adjust the timeout to maximum if only clocksource left */ - if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) - __sh_cmt_set_next(p, p->max_match_value); + if ((flag == FLAG_CLOCKEVENT) && (ch->flags & FLAG_CLOCKSOURCE)) + __sh_cmt_set_next(ch, ch->max_match_value); - raw_spin_unlock_irqrestore(&p->lock, flags); + raw_spin_unlock_irqrestore(&ch->lock, flags); } -static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) +static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs) { - return container_of(cs, struct sh_cmt_priv, cs); + return container_of(cs, struct sh_cmt_channel, cs); } static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) { - struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); unsigned long flags, raw; unsigned long value; int has_wrapped; - raw_spin_lock_irqsave(&p->lock, flags); - value = p->total_cycles; - raw = sh_cmt_get_counter(p, &has_wrapped); + raw_spin_lock_irqsave(&ch->lock, flags); + value = ch->total_cycles; + raw = sh_cmt_get_counter(ch, &has_wrapped); if (unlikely(has_wrapped)) - raw += p->match_value + 1; - raw_spin_unlock_irqrestore(&p->lock, flags); + raw += ch->match_value + 1; + raw_spin_unlock_irqrestore(&ch->lock, flags); return value + raw; } @@ -499,53 +633,53 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) static int sh_cmt_clocksource_enable(struct clocksource *cs) { int ret; - struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); - WARN_ON(p->cs_enabled); + WARN_ON(ch->cs_enabled); - p->total_cycles = 0; + ch->total_cycles = 0; - ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); + ret = sh_cmt_start(ch, FLAG_CLOCKSOURCE); if (!ret) { - __clocksource_updatefreq_hz(cs, p->rate); - p->cs_enabled = true; + __clocksource_updatefreq_hz(cs, ch->rate); + ch->cs_enabled = true; } return ret; } static void sh_cmt_clocksource_disable(struct clocksource *cs) { - struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); - WARN_ON(!p->cs_enabled); + WARN_ON(!ch->cs_enabled); - sh_cmt_stop(p, FLAG_CLOCKSOURCE); - p->cs_enabled = false; + sh_cmt_stop(ch, FLAG_CLOCKSOURCE); + ch->cs_enabled = false; } static void sh_cmt_clocksource_suspend(struct clocksource *cs) { - struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); - sh_cmt_stop(p, FLAG_CLOCKSOURCE); - pm_genpd_syscore_poweroff(&p->pdev->dev); + sh_cmt_stop(ch, FLAG_CLOCKSOURCE); + pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); } static void sh_cmt_clocksource_resume(struct clocksource *cs) { - struct sh_cmt_priv *p = cs_to_sh_cmt(cs); + struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); - pm_genpd_syscore_poweron(&p->pdev->dev); - sh_cmt_start(p, FLAG_CLOCKSOURCE); + pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); + sh_cmt_start(ch, FLAG_CLOCKSOURCE); } -static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, - char *name, unsigned long rating) +static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch, + const char *name) { - struct clocksource *cs = &p->cs; + struct clocksource *cs = &ch->cs; cs->name = name; - cs->rating = rating; + cs->rating = 125; cs->read = sh_cmt_clocksource_read; cs->enable = sh_cmt_clocksource_enable; cs->disable = sh_cmt_clocksource_disable; @@ -554,47 +688,48 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; - dev_info(&p->pdev->dev, "used as clock source\n"); + dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n", + ch->index); /* Register with dummy 1 Hz value, gets updated in ->enable() */ clocksource_register_hz(cs, 1); return 0; } -static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) +static struct sh_cmt_channel *ced_to_sh_cmt(struct clock_event_device *ced) { - return container_of(ced, struct sh_cmt_priv, ced); + return container_of(ced, struct sh_cmt_channel, ced); } -static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) +static void sh_cmt_clock_event_start(struct sh_cmt_channel *ch, int periodic) { - struct clock_event_device *ced = &p->ced; + struct clock_event_device *ced = &ch->ced; - sh_cmt_start(p, FLAG_CLOCKEVENT); + sh_cmt_start(ch, FLAG_CLOCKEVENT); /* TODO: calculate good shift from rate and counter bit width */ ced->shift = 32; - ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); - ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); + ced->mult = div_sc(ch->rate, NSEC_PER_SEC, ced->shift); + ced->max_delta_ns = clockevent_delta2ns(ch->max_match_value, ced); ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); if (periodic) - sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1); + sh_cmt_set_next(ch, ((ch->rate + HZ/2) / HZ) - 1); else - sh_cmt_set_next(p, p->max_match_value); + sh_cmt_set_next(ch, ch->max_match_value); } static void sh_cmt_clock_event_mode(enum clock_event_mode mode, struct clock_event_device *ced) { - struct sh_cmt_priv *p = ced_to_sh_cmt(ced); + struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); /* deal with old setting first */ switch (ced->mode) { case CLOCK_EVT_MODE_PERIODIC: case CLOCK_EVT_MODE_ONESHOT: - sh_cmt_stop(p, FLAG_CLOCKEVENT); + sh_cmt_stop(ch, FLAG_CLOCKEVENT); break; default: break; @@ -602,16 +737,18 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode, switch (mode) { case CLOCK_EVT_MODE_PERIODIC: - dev_info(&p->pdev->dev, "used for periodic clock events\n"); - sh_cmt_clock_event_start(p, 1); + dev_info(&ch->cmt->pdev->dev, + "ch%u: used for periodic clock events\n", ch->index); + sh_cmt_clock_event_start(ch, 1); break; case CLOCK_EVT_MODE_ONESHOT: - dev_info(&p->pdev->dev, "used for oneshot clock events\n"); - sh_cmt_clock_event_start(p, 0); + dev_info(&ch->cmt->pdev->dev, + "ch%u: used for oneshot clock events\n", ch->index); + sh_cmt_clock_event_start(ch, 0); break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: - sh_cmt_stop(p, FLAG_CLOCKEVENT); + sh_cmt_stop(ch, FLAG_CLOCKEVENT); break; default: break; @@ -621,196 +758,341 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode, static int sh_cmt_clock_event_next(unsigned long delta, struct clock_event_device *ced) { - struct sh_cmt_priv *p = ced_to_sh_cmt(ced); + struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); - if (likely(p->flags & FLAG_IRQCONTEXT)) - p->next_match_value = delta - 1; + if (likely(ch->flags & FLAG_IRQCONTEXT)) + ch->next_match_value = delta - 1; else - sh_cmt_set_next(p, delta - 1); + sh_cmt_set_next(ch, delta - 1); return 0; } static void sh_cmt_clock_event_suspend(struct clock_event_device *ced) { - struct sh_cmt_priv *p = ced_to_sh_cmt(ced); + struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); - pm_genpd_syscore_poweroff(&p->pdev->dev); - clk_unprepare(p->clk); + pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); + clk_unprepare(ch->cmt->clk); } static void sh_cmt_clock_event_resume(struct clock_event_device *ced) { - struct sh_cmt_priv *p = ced_to_sh_cmt(ced); + struct sh_cmt_channel *ch = ced_to_sh_cmt(ced); - clk_prepare(p->clk); - pm_genpd_syscore_poweron(&p->pdev->dev); + clk_prepare(ch->cmt->clk); + pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); } -static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, - char *name, unsigned long rating) +static int sh_cmt_register_clockevent(struct sh_cmt_channel *ch, + const char *name) { - struct clock_event_device *ced = &p->ced; + struct clock_event_device *ced = &ch->ced; + int irq; + int ret; - memset(ced, 0, sizeof(*ced)); + irq = platform_get_irq(ch->cmt->pdev, ch->cmt->legacy ? 0 : ch->index); + if (irq < 0) { + dev_err(&ch->cmt->pdev->dev, "ch%u: failed to get irq\n", + ch->index); + return irq; + } + + ret = request_irq(irq, sh_cmt_interrupt, + IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, + dev_name(&ch->cmt->pdev->dev), ch); + if (ret) { + dev_err(&ch->cmt->pdev->dev, "ch%u: failed to request irq %d\n", + ch->index, irq); + return ret; + } ced->name = name; ced->features = CLOCK_EVT_FEAT_PERIODIC; ced->features |= CLOCK_EVT_FEAT_ONESHOT; - ced->rating = rating; - ced->cpumask = cpumask_of(0); + ced->rating = 125; + ced->cpumask = cpu_possible_mask; ced->set_next_event = sh_cmt_clock_event_next; ced->set_mode = sh_cmt_clock_event_mode; ced->suspend = sh_cmt_clock_event_suspend; ced->resume = sh_cmt_clock_event_resume; - dev_info(&p->pdev->dev, "used for clock events\n"); + dev_info(&ch->cmt->pdev->dev, "ch%u: used for clock events\n", + ch->index); clockevents_register_device(ced); + + return 0; } -static int sh_cmt_register(struct sh_cmt_priv *p, char *name, - unsigned long clockevent_rating, - unsigned long clocksource_rating) +static int sh_cmt_register(struct sh_cmt_channel *ch, const char *name, + bool clockevent, bool clocksource) { - if (clockevent_rating) - sh_cmt_register_clockevent(p, name, clockevent_rating); + int ret; + + if (clockevent) { + ch->cmt->has_clockevent = true; + ret = sh_cmt_register_clockevent(ch, name); + if (ret < 0) + return ret; + } - if (clocksource_rating) - sh_cmt_register_clocksource(p, name, clocksource_rating); + if (clocksource) { + ch->cmt->has_clocksource = true; + sh_cmt_register_clocksource(ch, name); + } return 0; } -static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) +static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, + unsigned int hwidx, bool clockevent, + bool clocksource, struct sh_cmt_device *cmt) { - struct sh_timer_config *cfg = pdev->dev.platform_data; - struct resource *res, *res2; - int irq, ret; - ret = -ENXIO; + int ret; - memset(p, 0, sizeof(*p)); - p->pdev = pdev; + /* Skip unused channels. */ + if (!clockevent && !clocksource) + return 0; - if (!cfg) { - dev_err(&p->pdev->dev, "missing platform data\n"); - goto err0; + ch->cmt = cmt; + ch->index = index; + ch->hwidx = hwidx; + + /* + * Compute the address of the channel control register block. For the + * timers with a per-channel start/stop register, compute its address + * as well. + * + * For legacy configuration the address has been mapped explicitly. + */ + if (cmt->legacy) { + ch->ioctrl = cmt->mapbase_ch; + } else { + switch (cmt->info->model) { + case SH_CMT_16BIT: + ch->ioctrl = cmt->mapbase + 2 + ch->hwidx * 6; + break; + case SH_CMT_32BIT: + case SH_CMT_48BIT: + ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; + break; + case SH_CMT_32BIT_FAST: + /* + * The 32-bit "fast" timer has a single channel at hwidx + * 5 but is located at offset 0x40 instead of 0x60 for + * some reason. + */ + ch->ioctrl = cmt->mapbase + 0x40; + break; + case SH_CMT_48BIT_GEN2: + ch->iostart = cmt->mapbase + ch->hwidx * 0x100; + ch->ioctrl = ch->iostart + 0x10; + break; + } } - res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&p->pdev->dev, "failed to get I/O memory\n"); - goto err0; + if (cmt->info->width == (sizeof(ch->max_match_value) * 8)) + ch->max_match_value = ~0; + else + ch->max_match_value = (1 << cmt->info->width) - 1; + + ch->match_value = ch->max_match_value; + raw_spin_lock_init(&ch->lock); + + if (cmt->legacy) { + ch->timer_bit = ch->hwidx; + } else { + ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 + ? 0 : ch->hwidx; } - /* optional resource for the shared timer start/stop register */ - res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1); + ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), + clockevent, clocksource); + if (ret) { + dev_err(&cmt->pdev->dev, "ch%u: registration failed\n", + ch->index); + return ret; + } + ch->cs_enabled = false; - irq = platform_get_irq(p->pdev, 0); - if (irq < 0) { - dev_err(&p->pdev->dev, "failed to get irq\n"); - goto err0; + return 0; +} + +static int sh_cmt_map_memory(struct sh_cmt_device *cmt) +{ + struct resource *mem; + + mem = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); + return -ENXIO; } - /* map memory, let mapbase point to our channel */ - p->mapbase = ioremap_nocache(res->start, resource_size(res)); - if (p->mapbase == NULL) { - dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); - goto err0; + cmt->mapbase = ioremap_nocache(mem->start, resource_size(mem)); + if (cmt->mapbase == NULL) { + dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); + return -ENXIO; } - /* map second resource for CMSTR */ - p->mapbase_str = ioremap_nocache(res2 ? res2->start : - res->start - cfg->channel_offset, - res2 ? resource_size(res2) : 2); - if (p->mapbase_str == NULL) { - dev_err(&p->pdev->dev, "failed to remap I/O second memory\n"); - goto err1; + return 0; +} + +static int sh_cmt_map_memory_legacy(struct sh_cmt_device *cmt) +{ + struct sh_timer_config *cfg = cmt->pdev->dev.platform_data; + struct resource *res, *res2; + + /* map memory, let mapbase_ch point to our channel */ + res = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&cmt->pdev->dev, "failed to get I/O memory\n"); + return -ENXIO; } - /* request irq using setup_irq() (too early for request_irq()) */ - p->irqaction.name = dev_name(&p->pdev->dev); - p->irqaction.handler = sh_cmt_interrupt; - p->irqaction.dev_id = p; - p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING; - - /* get hold of clock */ - p->clk = clk_get(&p->pdev->dev, "cmt_fck"); - if (IS_ERR(p->clk)) { - dev_err(&p->pdev->dev, "cannot get clock\n"); - ret = PTR_ERR(p->clk); - goto err2; + cmt->mapbase_ch = ioremap_nocache(res->start, resource_size(res)); + if (cmt->mapbase_ch == NULL) { + dev_err(&cmt->pdev->dev, "failed to remap I/O memory\n"); + return -ENXIO; } - ret = clk_prepare(p->clk); - if (ret < 0) - goto err3; + /* optional resource for the shared timer start/stop register */ + res2 = platform_get_resource(cmt->pdev, IORESOURCE_MEM, 1); - if (res2 && (resource_size(res2) == 4)) { - /* assume both CMSTR and CMCSR to be 32-bit */ - p->read_control = sh_cmt_read32; - p->write_control = sh_cmt_write32; - } else { - p->read_control = sh_cmt_read16; - p->write_control = sh_cmt_write16; + /* map second resource for CMSTR */ + cmt->mapbase = ioremap_nocache(res2 ? res2->start : + res->start - cfg->channel_offset, + res2 ? resource_size(res2) : 2); + if (cmt->mapbase == NULL) { + dev_err(&cmt->pdev->dev, "failed to remap I/O second memory\n"); + iounmap(cmt->mapbase_ch); + return -ENXIO; } - if (resource_size(res) == 6) { - p->width = 16; - p->read_count = sh_cmt_read16; - p->write_count = sh_cmt_write16; - p->overflow_bit = 0x80; - p->clear_bits = ~0x80; - } else { - p->width = 32; - p->read_count = sh_cmt_read32; - p->write_count = sh_cmt_write32; - p->overflow_bit = 0x8000; - p->clear_bits = ~0xc000; + /* identify the model based on the resources */ + if (resource_size(res) == 6) + cmt->info = &sh_cmt_info[SH_CMT_16BIT]; + else if (res2 && (resource_size(res2) == 4)) + cmt->info = &sh_cmt_info[SH_CMT_48BIT_GEN2]; + else + cmt->info = &sh_cmt_info[SH_CMT_32BIT]; + + return 0; +} + +static void sh_cmt_unmap_memory(struct sh_cmt_device *cmt) +{ + iounmap(cmt->mapbase); + if (cmt->mapbase_ch) + iounmap(cmt->mapbase_ch); +} + +static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) +{ + struct sh_timer_config *cfg = pdev->dev.platform_data; + const struct platform_device_id *id = pdev->id_entry; + unsigned int hw_channels; + int ret; + + memset(cmt, 0, sizeof(*cmt)); + cmt->pdev = pdev; + + if (!cfg) { + dev_err(&cmt->pdev->dev, "missing platform data\n"); + return -ENXIO; } - if (p->width == (sizeof(p->max_match_value) * 8)) - p->max_match_value = ~0; + cmt->info = (const struct sh_cmt_info *)id->driver_data; + cmt->legacy = cmt->info ? false : true; + + /* Get hold of clock. */ + cmt->clk = clk_get(&cmt->pdev->dev, cmt->legacy ? "cmt_fck" : "fck"); + if (IS_ERR(cmt->clk)) { + dev_err(&cmt->pdev->dev, "cannot get clock\n"); + return PTR_ERR(cmt->clk); + } + + ret = clk_prepare(cmt->clk); + if (ret < 0) + goto err_clk_put; + + /* + * Map the memory resource(s). We need to support both the legacy + * platform device configuration (with one device per channel) and the + * new version (with multiple channels per device). + */ + if (cmt->legacy) + ret = sh_cmt_map_memory_legacy(cmt); else - p->max_match_value = (1 << p->width) - 1; + ret = sh_cmt_map_memory(cmt); - p->match_value = p->max_match_value; - raw_spin_lock_init(&p->lock); + if (ret < 0) + goto err_clk_unprepare; - ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev), - cfg->clockevent_rating, - cfg->clocksource_rating); - if (ret) { - dev_err(&p->pdev->dev, "registration failed\n"); - goto err4; + /* Allocate and setup the channels. */ + if (cmt->legacy) { + cmt->num_channels = 1; + hw_channels = 0; + } else { + cmt->num_channels = hweight8(cfg->channels_mask); + hw_channels = cfg->channels_mask; } - p->cs_enabled = false; - ret = setup_irq(irq, &p->irqaction); - if (ret) { - dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); - goto err4; + cmt->channels = kzalloc(cmt->num_channels * sizeof(*cmt->channels), + GFP_KERNEL); + if (cmt->channels == NULL) { + ret = -ENOMEM; + goto err_unmap; } - platform_set_drvdata(pdev, p); + if (cmt->legacy) { + ret = sh_cmt_setup_channel(&cmt->channels[0], + cfg->timer_bit, cfg->timer_bit, + cfg->clockevent_rating != 0, + cfg->clocksource_rating != 0, cmt); + if (ret < 0) + goto err_unmap; + } else { + unsigned int mask = hw_channels; + unsigned int i; + + /* + * Use the first channel as a clock event device and the second + * channel as a clock source. If only one channel is available + * use it for both. + */ + for (i = 0; i < cmt->num_channels; ++i) { + unsigned int hwidx = ffs(mask) - 1; + bool clocksource = i == 1 || cmt->num_channels == 1; + bool clockevent = i == 0; + + ret = sh_cmt_setup_channel(&cmt->channels[i], i, hwidx, + clockevent, clocksource, + cmt); + if (ret < 0) + goto err_unmap; + + mask &= ~(1 << hwidx); + } + } + + platform_set_drvdata(pdev, cmt); return 0; -err4: - clk_unprepare(p->clk); -err3: - clk_put(p->clk); -err2: - iounmap(p->mapbase_str); -err1: - iounmap(p->mapbase); -err0: + +err_unmap: + kfree(cmt->channels); + sh_cmt_unmap_memory(cmt); +err_clk_unprepare: + clk_unprepare(cmt->clk); +err_clk_put: + clk_put(cmt->clk); return ret; } static int sh_cmt_probe(struct platform_device *pdev) { - struct sh_cmt_priv *p = platform_get_drvdata(pdev); - struct sh_timer_config *cfg = pdev->dev.platform_data; + struct sh_cmt_device *cmt = platform_get_drvdata(pdev); int ret; if (!is_early_platform_device(pdev)) { @@ -818,20 +1100,20 @@ static int sh_cmt_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); } - if (p) { + if (cmt) { dev_info(&pdev->dev, "kept as earlytimer\n"); goto out; } - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (p == NULL) { + cmt = kzalloc(sizeof(*cmt), GFP_KERNEL); + if (cmt == NULL) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } - ret = sh_cmt_setup(p, pdev); + ret = sh_cmt_setup(cmt, pdev); if (ret) { - kfree(p); + kfree(cmt); pm_runtime_idle(&pdev->dev); return ret; } @@ -839,7 +1121,7 @@ static int sh_cmt_probe(struct platform_device *pdev) return 0; out: - if (cfg->clockevent_rating || cfg->clocksource_rating) + if (cmt->has_clockevent || cmt->has_clocksource) pm_runtime_irq_safe(&pdev->dev); else pm_runtime_idle(&pdev->dev); @@ -852,12 +1134,24 @@ static int sh_cmt_remove(struct platform_device *pdev) return -EBUSY; /* cannot unregister clockevent and clocksource */ } +static const struct platform_device_id sh_cmt_id_table[] = { + { "sh_cmt", 0 }, + { "sh-cmt-16", (kernel_ulong_t)&sh_cmt_info[SH_CMT_16BIT] }, + { "sh-cmt-32", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT] }, + { "sh-cmt-32-fast", (kernel_ulong_t)&sh_cmt_info[SH_CMT_32BIT_FAST] }, + { "sh-cmt-48", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT] }, + { "sh-cmt-48-gen2", (kernel_ulong_t)&sh_cmt_info[SH_CMT_48BIT_GEN2] }, + { } +}; +MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); + static struct platform_driver sh_cmt_device_driver = { .probe = sh_cmt_probe, .remove = sh_cmt_remove, .driver = { .name = "sh_cmt", - } + }, + .id_table = sh_cmt_id_table, }; static int __init sh_cmt_init(void) diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index e30d76e0a6f..f2c1c36139e 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -11,37 +11,48 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/delay.h> +#include <linux/err.h> #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h> #include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/delay.h> #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h> #include <linux/irq.h> -#include <linux/err.h> -#include <linux/clockchips.h> -#include <linux/sh_timer.h> -#include <linux/slab.h> #include <linux/module.h> +#include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/sh_timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +struct sh_mtu2_device; + +struct sh_mtu2_channel { + struct sh_mtu2_device *mtu; + unsigned int index; + + void __iomem *base; + int irq; + + struct clock_event_device ced; +}; + +struct sh_mtu2_device { + struct platform_device *pdev; -struct sh_mtu2_priv { void __iomem *mapbase; struct clk *clk; - struct irqaction irqaction; - struct platform_device *pdev; - unsigned long rate; - unsigned long periodic; - struct clock_event_device ced; + + struct sh_mtu2_channel *channels; + unsigned int num_channels; + + bool legacy; + bool has_clockevent; }; static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); @@ -55,6 +66,88 @@ static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); #define TCNT 5 /* channel register */ #define TGR 6 /* channel register */ +#define TCR_CCLR_NONE (0 << 5) +#define TCR_CCLR_TGRA (1 << 5) +#define TCR_CCLR_TGRB (2 << 5) +#define TCR_CCLR_SYNC (3 << 5) +#define TCR_CCLR_TGRC (5 << 5) +#define TCR_CCLR_TGRD (6 << 5) +#define TCR_CCLR_MASK (7 << 5) +#define TCR_CKEG_RISING (0 << 3) +#define TCR_CKEG_FALLING (1 << 3) +#define TCR_CKEG_BOTH (2 << 3) +#define TCR_CKEG_MASK (3 << 3) +/* Values 4 to 7 are channel-dependent */ +#define TCR_TPSC_P1 (0 << 0) +#define TCR_TPSC_P4 (1 << 0) +#define TCR_TPSC_P16 (2 << 0) +#define TCR_TPSC_P64 (3 << 0) +#define TCR_TPSC_CH0_TCLKA (4 << 0) +#define TCR_TPSC_CH0_TCLKB (5 << 0) +#define TCR_TPSC_CH0_TCLKC (6 << 0) +#define TCR_TPSC_CH0_TCLKD (7 << 0) +#define TCR_TPSC_CH1_TCLKA (4 << 0) +#define TCR_TPSC_CH1_TCLKB (5 << 0) +#define TCR_TPSC_CH1_P256 (6 << 0) +#define TCR_TPSC_CH1_TCNT2 (7 << 0) +#define TCR_TPSC_CH2_TCLKA (4 << 0) +#define TCR_TPSC_CH2_TCLKB (5 << 0) +#define TCR_TPSC_CH2_TCLKC (6 << 0) +#define TCR_TPSC_CH2_P1024 (7 << 0) +#define TCR_TPSC_CH34_P256 (4 << 0) +#define TCR_TPSC_CH34_P1024 (5 << 0) +#define TCR_TPSC_CH34_TCLKA (6 << 0) +#define TCR_TPSC_CH34_TCLKB (7 << 0) +#define TCR_TPSC_MASK (7 << 0) + +#define TMDR_BFE (1 << 6) +#define TMDR_BFB (1 << 5) +#define TMDR_BFA (1 << 4) +#define TMDR_MD_NORMAL (0 << 0) +#define TMDR_MD_PWM_1 (2 << 0) +#define TMDR_MD_PWM_2 (3 << 0) +#define TMDR_MD_PHASE_1 (4 << 0) +#define TMDR_MD_PHASE_2 (5 << 0) +#define TMDR_MD_PHASE_3 (6 << 0) +#define TMDR_MD_PHASE_4 (7 << 0) +#define TMDR_MD_PWM_SYNC (8 << 0) +#define TMDR_MD_PWM_COMP_CREST (13 << 0) +#define TMDR_MD_PWM_COMP_TROUGH (14 << 0) +#define TMDR_MD_PWM_COMP_BOTH (15 << 0) +#define TMDR_MD_MASK (15 << 0) + +#define TIOC_IOCH(n) ((n) << 4) +#define TIOC_IOCL(n) ((n) << 0) +#define TIOR_OC_RETAIN (0 << 0) +#define TIOR_OC_0_CLEAR (1 << 0) +#define TIOR_OC_0_SET (2 << 0) +#define TIOR_OC_0_TOGGLE (3 << 0) +#define TIOR_OC_1_CLEAR (5 << 0) +#define TIOR_OC_1_SET (6 << 0) +#define TIOR_OC_1_TOGGLE (7 << 0) +#define TIOR_IC_RISING (8 << 0) +#define TIOR_IC_FALLING (9 << 0) +#define TIOR_IC_BOTH (10 << 0) +#define TIOR_IC_TCNT (12 << 0) +#define TIOR_MASK (15 << 0) + +#define TIER_TTGE (1 << 7) +#define TIER_TTGE2 (1 << 6) +#define TIER_TCIEU (1 << 5) +#define TIER_TCIEV (1 << 4) +#define TIER_TGIED (1 << 3) +#define TIER_TGIEC (1 << 2) +#define TIER_TGIEB (1 << 1) +#define TIER_TGIEA (1 << 0) + +#define TSR_TCFD (1 << 7) +#define TSR_TCFU (1 << 5) +#define TSR_TCFV (1 << 4) +#define TSR_TGFD (1 << 3) +#define TSR_TGFC (1 << 2) +#define TSR_TGFB (1 << 1) +#define TSR_TGFA (1 << 0) + static unsigned long mtu2_reg_offs[] = { [TCR] = 0, [TMDR] = 1, @@ -65,135 +158,143 @@ static unsigned long mtu2_reg_offs[] = { [TGR] = 8, }; -static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr) +static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr) { - struct sh_timer_config *cfg = p->pdev->dev.platform_data; - void __iomem *base = p->mapbase; unsigned long offs; - if (reg_nr == TSTR) - return ioread8(base + cfg->channel_offset); + if (reg_nr == TSTR) { + if (ch->mtu->legacy) + return ioread8(ch->mtu->mapbase); + else + return ioread8(ch->mtu->mapbase + 0x280); + } offs = mtu2_reg_offs[reg_nr]; if ((reg_nr == TCNT) || (reg_nr == TGR)) - return ioread16(base + offs); + return ioread16(ch->base + offs); else - return ioread8(base + offs); + return ioread8(ch->base + offs); } -static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr, +static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr, unsigned long value) { - struct sh_timer_config *cfg = p->pdev->dev.platform_data; - void __iomem *base = p->mapbase; unsigned long offs; if (reg_nr == TSTR) { - iowrite8(value, base + cfg->channel_offset); - return; + if (ch->mtu->legacy) + return iowrite8(value, ch->mtu->mapbase); + else + return iowrite8(value, ch->mtu->mapbase + 0x280); } offs = mtu2_reg_offs[reg_nr]; if ((reg_nr == TCNT) || (reg_nr == TGR)) - iowrite16(value, base + offs); + iowrite16(value, ch->base + offs); else - iowrite8(value, base + offs); + iowrite8(value, ch->base + offs); } -static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) +static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start) { - struct sh_timer_config *cfg = p->pdev->dev.platform_data; unsigned long flags, value; /* start stop register shared by multiple timer channels */ raw_spin_lock_irqsave(&sh_mtu2_lock, flags); - value = sh_mtu2_read(p, TSTR); + value = sh_mtu2_read(ch, TSTR); if (start) - value |= 1 << cfg->timer_bit; + value |= 1 << ch->index; else - value &= ~(1 << cfg->timer_bit); + value &= ~(1 << ch->index); - sh_mtu2_write(p, TSTR, value); + sh_mtu2_write(ch, TSTR, value); raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); } -static int sh_mtu2_enable(struct sh_mtu2_priv *p) +static int sh_mtu2_enable(struct sh_mtu2_channel *ch) { + unsigned long periodic; + unsigned long rate; int ret; - pm_runtime_get_sync(&p->pdev->dev); - dev_pm_syscore_device(&p->pdev->dev, true); + pm_runtime_get_sync(&ch->mtu->pdev->dev); + dev_pm_syscore_device(&ch->mtu->pdev->dev, true); /* enable clock */ - ret = clk_enable(p->clk); + ret = clk_enable(ch->mtu->clk); if (ret) { - dev_err(&p->pdev->dev, "cannot enable clock\n"); + dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n", + ch->index); return ret; } /* make sure channel is disabled */ - sh_mtu2_start_stop_ch(p, 0); - - p->rate = clk_get_rate(p->clk) / 64; - p->periodic = (p->rate + HZ/2) / HZ; - - /* "Periodic Counter Operation" */ - sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */ - sh_mtu2_write(p, TIOR, 0); - sh_mtu2_write(p, TGR, p->periodic); - sh_mtu2_write(p, TCNT, 0); - sh_mtu2_write(p, TMDR, 0); - sh_mtu2_write(p, TIER, 0x01); + sh_mtu2_start_stop_ch(ch, 0); + + rate = clk_get_rate(ch->mtu->clk) / 64; + periodic = (rate + HZ/2) / HZ; + + /* + * "Periodic Counter Operation" + * Clear on TGRA compare match, divide clock by 64. + */ + sh_mtu2_write(ch, TCR, TCR_CCLR_TGRA | TCR_TPSC_P64); + sh_mtu2_write(ch, TIOR, TIOC_IOCH(TIOR_OC_0_CLEAR) | + TIOC_IOCL(TIOR_OC_0_CLEAR)); + sh_mtu2_write(ch, TGR, periodic); + sh_mtu2_write(ch, TCNT, 0); + sh_mtu2_write(ch, TMDR, TMDR_MD_NORMAL); + sh_mtu2_write(ch, TIER, TIER_TGIEA); /* enable channel */ - sh_mtu2_start_stop_ch(p, 1); + sh_mtu2_start_stop_ch(ch, 1); return 0; } -static void sh_mtu2_disable(struct sh_mtu2_priv *p) +static void sh_mtu2_disable(struct sh_mtu2_channel *ch) { /* disable channel */ - sh_mtu2_start_stop_ch(p, 0); + sh_mtu2_start_stop_ch(ch, 0); /* stop clock */ - clk_disable(p->clk); + clk_disable(ch->mtu->clk); - dev_pm_syscore_device(&p->pdev->dev, false); - pm_runtime_put(&p->pdev->dev); + dev_pm_syscore_device(&ch->mtu->pdev->dev, false); + pm_runtime_put(&ch->mtu->pdev->dev); } static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id) { - struct sh_mtu2_priv *p = dev_id; + struct sh_mtu2_channel *ch = dev_id; /* acknowledge interrupt */ - sh_mtu2_read(p, TSR); - sh_mtu2_write(p, TSR, 0xfe); + sh_mtu2_read(ch, TSR); + sh_mtu2_write(ch, TSR, ~TSR_TGFA); /* notify clockevent layer */ - p->ced.event_handler(&p->ced); + ch->ced.event_handler(&ch->ced); return IRQ_HANDLED; } -static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced) +static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced) { - return container_of(ced, struct sh_mtu2_priv, ced); + return container_of(ced, struct sh_mtu2_channel, ced); } static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, struct clock_event_device *ced) { - struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced); + struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced); int disabled = 0; /* deal with old setting first */ switch (ced->mode) { case CLOCK_EVT_MODE_PERIODIC: - sh_mtu2_disable(p); + sh_mtu2_disable(ch); disabled = 1; break; default: @@ -202,12 +303,13 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, switch (mode) { case CLOCK_EVT_MODE_PERIODIC: - dev_info(&p->pdev->dev, "used for periodic clock events\n"); - sh_mtu2_enable(p); + dev_info(&ch->mtu->pdev->dev, + "ch%u: used for periodic clock events\n", ch->index); + sh_mtu2_enable(ch); break; case CLOCK_EVT_MODE_UNUSED: if (!disabled) - sh_mtu2_disable(p); + sh_mtu2_disable(ch); break; case CLOCK_EVT_MODE_SHUTDOWN: default: @@ -217,125 +319,207 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced) { - pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev); + pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev); } static void sh_mtu2_clock_event_resume(struct clock_event_device *ced) { - pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev); + pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev); } -static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, - char *name, unsigned long rating) +static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, + const char *name) { - struct clock_event_device *ced = &p->ced; + struct clock_event_device *ced = &ch->ced; int ret; - memset(ced, 0, sizeof(*ced)); - ced->name = name; ced->features = CLOCK_EVT_FEAT_PERIODIC; - ced->rating = rating; - ced->cpumask = cpumask_of(0); + ced->rating = 200; + ced->cpumask = cpu_possible_mask; ced->set_mode = sh_mtu2_clock_event_mode; ced->suspend = sh_mtu2_clock_event_suspend; ced->resume = sh_mtu2_clock_event_resume; - dev_info(&p->pdev->dev, "used for clock events\n"); + dev_info(&ch->mtu->pdev->dev, "ch%u: used for clock events\n", + ch->index); clockevents_register_device(ced); - ret = setup_irq(p->irqaction.irq, &p->irqaction); + ret = request_irq(ch->irq, sh_mtu2_interrupt, + IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, + dev_name(&ch->mtu->pdev->dev), ch); if (ret) { - dev_err(&p->pdev->dev, "failed to request irq %d\n", - p->irqaction.irq); + dev_err(&ch->mtu->pdev->dev, "ch%u: failed to request irq %d\n", + ch->index, ch->irq); return; } } -static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name, - unsigned long clockevent_rating) +static int sh_mtu2_register(struct sh_mtu2_channel *ch, const char *name, + bool clockevent) { - if (clockevent_rating) - sh_mtu2_register_clockevent(p, name, clockevent_rating); + if (clockevent) { + ch->mtu->has_clockevent = true; + sh_mtu2_register_clockevent(ch, name); + } return 0; } -static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) +static int sh_mtu2_setup_channel(struct sh_mtu2_channel *ch, unsigned int index, + struct sh_mtu2_device *mtu) { - struct sh_timer_config *cfg = pdev->dev.platform_data; - struct resource *res; - int irq, ret; - ret = -ENXIO; + static const unsigned int channel_offsets[] = { + 0x300, 0x380, 0x000, + }; + bool clockevent; + + ch->mtu = mtu; + + if (mtu->legacy) { + struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; + + clockevent = cfg->clockevent_rating != 0; - memset(p, 0, sizeof(*p)); - p->pdev = pdev; + ch->irq = platform_get_irq(mtu->pdev, 0); + ch->base = mtu->mapbase - cfg->channel_offset; + ch->index = cfg->timer_bit; + } else { + char name[6]; - if (!cfg) { - dev_err(&p->pdev->dev, "missing platform data\n"); - goto err0; + clockevent = true; + + sprintf(name, "tgi%ua", index); + ch->irq = platform_get_irq_byname(mtu->pdev, name); + ch->base = mtu->mapbase + channel_offsets[index]; + ch->index = index; } - platform_set_drvdata(pdev, p); + if (ch->irq < 0) { + /* Skip channels with no declared interrupt. */ + if (!mtu->legacy) + return 0; + + dev_err(&mtu->pdev->dev, "ch%u: failed to get irq\n", + ch->index); + return ch->irq; + } + + return sh_mtu2_register(ch, dev_name(&mtu->pdev->dev), clockevent); +} - res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); +static int sh_mtu2_map_memory(struct sh_mtu2_device *mtu) +{ + struct resource *res; + + res = platform_get_resource(mtu->pdev, IORESOURCE_MEM, 0); if (!res) { - dev_err(&p->pdev->dev, "failed to get I/O memory\n"); - goto err0; + dev_err(&mtu->pdev->dev, "failed to get I/O memory\n"); + return -ENXIO; } - irq = platform_get_irq(p->pdev, 0); - if (irq < 0) { - dev_err(&p->pdev->dev, "failed to get irq\n"); - goto err0; + mtu->mapbase = ioremap_nocache(res->start, resource_size(res)); + if (mtu->mapbase == NULL) + return -ENXIO; + + /* + * In legacy platform device configuration (with one device per channel) + * the resource points to the channel base address. + */ + if (mtu->legacy) { + struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; + mtu->mapbase += cfg->channel_offset; } - /* map memory, let mapbase point to our channel */ - p->mapbase = ioremap_nocache(res->start, resource_size(res)); - if (p->mapbase == NULL) { - dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); - goto err0; + return 0; +} + +static void sh_mtu2_unmap_memory(struct sh_mtu2_device *mtu) +{ + if (mtu->legacy) { + struct sh_timer_config *cfg = mtu->pdev->dev.platform_data; + mtu->mapbase -= cfg->channel_offset; } - /* setup data for setup_irq() (too early for request_irq()) */ - p->irqaction.name = dev_name(&p->pdev->dev); - p->irqaction.handler = sh_mtu2_interrupt; - p->irqaction.dev_id = p; - p->irqaction.irq = irq; - p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING; - - /* get hold of clock */ - p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); - if (IS_ERR(p->clk)) { - dev_err(&p->pdev->dev, "cannot get clock\n"); - ret = PTR_ERR(p->clk); - goto err1; + iounmap(mtu->mapbase); +} + +static int sh_mtu2_setup(struct sh_mtu2_device *mtu, + struct platform_device *pdev) +{ + struct sh_timer_config *cfg = pdev->dev.platform_data; + const struct platform_device_id *id = pdev->id_entry; + unsigned int i; + int ret; + + mtu->pdev = pdev; + mtu->legacy = id->driver_data; + + if (mtu->legacy && !cfg) { + dev_err(&mtu->pdev->dev, "missing platform data\n"); + return -ENXIO; } - ret = clk_prepare(p->clk); - if (ret < 0) - goto err2; + /* Get hold of clock. */ + mtu->clk = clk_get(&mtu->pdev->dev, mtu->legacy ? "mtu2_fck" : "fck"); + if (IS_ERR(mtu->clk)) { + dev_err(&mtu->pdev->dev, "cannot get clock\n"); + return PTR_ERR(mtu->clk); + } - ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), - cfg->clockevent_rating); + ret = clk_prepare(mtu->clk); if (ret < 0) - goto err3; + goto err_clk_put; + + /* Map the memory resource. */ + ret = sh_mtu2_map_memory(mtu); + if (ret < 0) { + dev_err(&mtu->pdev->dev, "failed to remap I/O memory\n"); + goto err_clk_unprepare; + } + + /* Allocate and setup the channels. */ + if (mtu->legacy) + mtu->num_channels = 1; + else + mtu->num_channels = 3; + + mtu->channels = kzalloc(sizeof(*mtu->channels) * mtu->num_channels, + GFP_KERNEL); + if (mtu->channels == NULL) { + ret = -ENOMEM; + goto err_unmap; + } + + if (mtu->legacy) { + ret = sh_mtu2_setup_channel(&mtu->channels[0], 0, mtu); + if (ret < 0) + goto err_unmap; + } else { + for (i = 0; i < mtu->num_channels; ++i) { + ret = sh_mtu2_setup_channel(&mtu->channels[i], i, mtu); + if (ret < 0) + goto err_unmap; + } + } + + platform_set_drvdata(pdev, mtu); return 0; - err3: - clk_unprepare(p->clk); - err2: - clk_put(p->clk); - err1: - iounmap(p->mapbase); - err0: + +err_unmap: + kfree(mtu->channels); + sh_mtu2_unmap_memory(mtu); +err_clk_unprepare: + clk_unprepare(mtu->clk); +err_clk_put: + clk_put(mtu->clk); return ret; } static int sh_mtu2_probe(struct platform_device *pdev) { - struct sh_mtu2_priv *p = platform_get_drvdata(pdev); - struct sh_timer_config *cfg = pdev->dev.platform_data; + struct sh_mtu2_device *mtu = platform_get_drvdata(pdev); int ret; if (!is_early_platform_device(pdev)) { @@ -343,20 +527,20 @@ static int sh_mtu2_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); } - if (p) { + if (mtu) { dev_info(&pdev->dev, "kept as earlytimer\n"); goto out; } - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (p == NULL) { + mtu = kzalloc(sizeof(*mtu), GFP_KERNEL); + if (mtu == NULL) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } - ret = sh_mtu2_setup(p, pdev); + ret = sh_mtu2_setup(mtu, pdev); if (ret) { - kfree(p); + kfree(mtu); pm_runtime_idle(&pdev->dev); return ret; } @@ -364,7 +548,7 @@ static int sh_mtu2_probe(struct platform_device *pdev) return 0; out: - if (cfg->clockevent_rating) + if (mtu->has_clockevent) pm_runtime_irq_safe(&pdev->dev); else pm_runtime_idle(&pdev->dev); @@ -377,12 +561,20 @@ static int sh_mtu2_remove(struct platform_device *pdev) return -EBUSY; /* cannot unregister clockevent */ } +static const struct platform_device_id sh_mtu2_id_table[] = { + { "sh_mtu2", 1 }, + { "sh-mtu2", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(platform, sh_mtu2_id_table); + static struct platform_driver sh_mtu2_device_driver = { .probe = sh_mtu2_probe, .remove = sh_mtu2_remove, .driver = { .name = "sh_mtu2", - } + }, + .id_table = sh_mtu2_id_table, }; static int __init sh_mtu2_init(void) diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index ecd7b60bfdf..4ba2c0fea58 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -11,35 +11,41 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/clocksource.h> +#include <linux/delay.h> +#include <linux/err.h> #include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/spinlock.h> #include <linux/interrupt.h> -#include <linux/ioport.h> -#include <linux/delay.h> #include <linux/io.h> -#include <linux/clk.h> +#include <linux/ioport.h> #include <linux/irq.h> -#include <linux/err.h> -#include <linux/clocksource.h> -#include <linux/clockchips.h> -#include <linux/sh_timer.h> -#include <linux/slab.h> #include <linux/module.h> +#include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/sh_timer.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +enum sh_tmu_model { + SH_TMU_LEGACY, + SH_TMU, + SH_TMU_SH3, +}; + +struct sh_tmu_device; + +struct sh_tmu_channel { + struct sh_tmu_device *tmu; + unsigned int index; + + void __iomem *base; + int irq; -struct sh_tmu_priv { - void __iomem *mapbase; - struct clk *clk; - struct irqaction irqaction; - struct platform_device *pdev; unsigned long rate; unsigned long periodic; struct clock_event_device ced; @@ -48,6 +54,21 @@ struct sh_tmu_priv { unsigned int enable_count; }; +struct sh_tmu_device { + struct platform_device *pdev; + + void __iomem *mapbase; + struct clk *clk; + + enum sh_tmu_model model; + + struct sh_tmu_channel *channels; + unsigned int num_channels; + + bool has_clockevent; + bool has_clocksource; +}; + static DEFINE_RAW_SPINLOCK(sh_tmu_lock); #define TSTR -1 /* shared register */ @@ -55,189 +76,208 @@ static DEFINE_RAW_SPINLOCK(sh_tmu_lock); #define TCNT 1 /* channel register */ #define TCR 2 /* channel register */ -static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr) +#define TCR_UNF (1 << 8) +#define TCR_UNIE (1 << 5) +#define TCR_TPSC_CLK4 (0 << 0) +#define TCR_TPSC_CLK16 (1 << 0) +#define TCR_TPSC_CLK64 (2 << 0) +#define TCR_TPSC_CLK256 (3 << 0) +#define TCR_TPSC_CLK1024 (4 << 0) +#define TCR_TPSC_MASK (7 << 0) + +static inline unsigned long sh_tmu_read(struct sh_tmu_channel *ch, int reg_nr) { - struct sh_timer_config *cfg = p->pdev->dev.platform_data; - void __iomem *base = p->mapbase; unsigned long offs; - if (reg_nr == TSTR) - return ioread8(base - cfg->channel_offset); + if (reg_nr == TSTR) { + switch (ch->tmu->model) { + case SH_TMU_LEGACY: + return ioread8(ch->tmu->mapbase); + case SH_TMU_SH3: + return ioread8(ch->tmu->mapbase + 2); + case SH_TMU: + return ioread8(ch->tmu->mapbase + 4); + } + } offs = reg_nr << 2; if (reg_nr == TCR) - return ioread16(base + offs); + return ioread16(ch->base + offs); else - return ioread32(base + offs); + return ioread32(ch->base + offs); } -static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr, +static inline void sh_tmu_write(struct sh_tmu_channel *ch, int reg_nr, unsigned long value) { - struct sh_timer_config *cfg = p->pdev->dev.platform_data; - void __iomem *base = p->mapbase; unsigned long offs; if (reg_nr == TSTR) { - iowrite8(value, base - cfg->channel_offset); - return; + switch (ch->tmu->model) { + case SH_TMU_LEGACY: + return iowrite8(value, ch->tmu->mapbase); + case SH_TMU_SH3: + return iowrite8(value, ch->tmu->mapbase + 2); + case SH_TMU: + return iowrite8(value, ch->tmu->mapbase + 4); + } } offs = reg_nr << 2; if (reg_nr == TCR) - iowrite16(value, base + offs); + iowrite16(value, ch->base + offs); else - iowrite32(value, base + offs); + iowrite32(value, ch->base + offs); } -static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) +static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start) { - struct sh_timer_config *cfg = p->pdev->dev.platform_data; unsigned long flags, value; /* start stop register shared by multiple timer channels */ raw_spin_lock_irqsave(&sh_tmu_lock, flags); - value = sh_tmu_read(p, TSTR); + value = sh_tmu_read(ch, TSTR); if (start) - value |= 1 << cfg->timer_bit; + value |= 1 << ch->index; else - value &= ~(1 << cfg->timer_bit); + value &= ~(1 << ch->index); - sh_tmu_write(p, TSTR, value); + sh_tmu_write(ch, TSTR, value); raw_spin_unlock_irqrestore(&sh_tmu_lock, flags); } -static int __sh_tmu_enable(struct sh_tmu_priv *p) +static int __sh_tmu_enable(struct sh_tmu_channel *ch) { int ret; /* enable clock */ - ret = clk_enable(p->clk); + ret = clk_enable(ch->tmu->clk); if (ret) { - dev_err(&p->pdev->dev, "cannot enable clock\n"); + dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n", + ch->index); return ret; } /* make sure channel is disabled */ - sh_tmu_start_stop_ch(p, 0); + sh_tmu_start_stop_ch(ch, 0); /* maximum timeout */ - sh_tmu_write(p, TCOR, 0xffffffff); - sh_tmu_write(p, TCNT, 0xffffffff); + sh_tmu_write(ch, TCOR, 0xffffffff); + sh_tmu_write(ch, TCNT, 0xffffffff); /* configure channel to parent clock / 4, irq off */ - p->rate = clk_get_rate(p->clk) / 4; - sh_tmu_write(p, TCR, 0x0000); + ch->rate = clk_get_rate(ch->tmu->clk) / 4; + sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); /* enable channel */ - sh_tmu_start_stop_ch(p, 1); + sh_tmu_start_stop_ch(ch, 1); return 0; } -static int sh_tmu_enable(struct sh_tmu_priv *p) +static int sh_tmu_enable(struct sh_tmu_channel *ch) { - if (p->enable_count++ > 0) + if (ch->enable_count++ > 0) return 0; - pm_runtime_get_sync(&p->pdev->dev); - dev_pm_syscore_device(&p->pdev->dev, true); + pm_runtime_get_sync(&ch->tmu->pdev->dev); + dev_pm_syscore_device(&ch->tmu->pdev->dev, true); - return __sh_tmu_enable(p); + return __sh_tmu_enable(ch); } -static void __sh_tmu_disable(struct sh_tmu_priv *p) +static void __sh_tmu_disable(struct sh_tmu_channel *ch) { /* disable channel */ - sh_tmu_start_stop_ch(p, 0); + sh_tmu_start_stop_ch(ch, 0); /* disable interrupts in TMU block */ - sh_tmu_write(p, TCR, 0x0000); + sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); /* stop clock */ - clk_disable(p->clk); + clk_disable(ch->tmu->clk); } -static void sh_tmu_disable(struct sh_tmu_priv *p) +static void sh_tmu_disable(struct sh_tmu_channel *ch) { - if (WARN_ON(p->enable_count == 0)) + if (WARN_ON(ch->enable_count == 0)) return; - if (--p->enable_count > 0) + if (--ch->enable_count > 0) return; - __sh_tmu_disable(p); + __sh_tmu_disable(ch); - dev_pm_syscore_device(&p->pdev->dev, false); - pm_runtime_put(&p->pdev->dev); + dev_pm_syscore_device(&ch->tmu->pdev->dev, false); + pm_runtime_put(&ch->tmu->pdev->dev); } -static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, +static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta, int periodic) { /* stop timer */ - sh_tmu_start_stop_ch(p, 0); + sh_tmu_start_stop_ch(ch, 0); /* acknowledge interrupt */ - sh_tmu_read(p, TCR); + sh_tmu_read(ch, TCR); /* enable interrupt */ - sh_tmu_write(p, TCR, 0x0020); + sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); /* reload delta value in case of periodic timer */ if (periodic) - sh_tmu_write(p, TCOR, delta); + sh_tmu_write(ch, TCOR, delta); else - sh_tmu_write(p, TCOR, 0xffffffff); + sh_tmu_write(ch, TCOR, 0xffffffff); - sh_tmu_write(p, TCNT, delta); + sh_tmu_write(ch, TCNT, delta); /* start timer */ - sh_tmu_start_stop_ch(p, 1); + sh_tmu_start_stop_ch(ch, 1); } static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id) { - struct sh_tmu_priv *p = dev_id; + struct sh_tmu_channel *ch = dev_id; /* disable or acknowledge interrupt */ - if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) - sh_tmu_write(p, TCR, 0x0000); + if (ch->ced.mode == CLOCK_EVT_MODE_ONESHOT) + sh_tmu_write(ch, TCR, TCR_TPSC_CLK4); else - sh_tmu_write(p, TCR, 0x0020); + sh_tmu_write(ch, TCR, TCR_UNIE | TCR_TPSC_CLK4); /* notify clockevent layer */ - p->ced.event_handler(&p->ced); + ch->ced.event_handler(&ch->ced); return IRQ_HANDLED; } -static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs) +static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs) { - return container_of(cs, struct sh_tmu_priv, cs); + return container_of(cs, struct sh_tmu_channel, cs); } static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) { - struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); - return sh_tmu_read(p, TCNT) ^ 0xffffffff; + return sh_tmu_read(ch, TCNT) ^ 0xffffffff; } static int sh_tmu_clocksource_enable(struct clocksource *cs) { - struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); int ret; - if (WARN_ON(p->cs_enabled)) + if (WARN_ON(ch->cs_enabled)) return 0; - ret = sh_tmu_enable(p); + ret = sh_tmu_enable(ch); if (!ret) { - __clocksource_updatefreq_hz(cs, p->rate); - p->cs_enabled = true; + __clocksource_updatefreq_hz(cs, ch->rate); + ch->cs_enabled = true; } return ret; @@ -245,48 +285,48 @@ static int sh_tmu_clocksource_enable(struct clocksource *cs) static void sh_tmu_clocksource_disable(struct clocksource *cs) { - struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); - if (WARN_ON(!p->cs_enabled)) + if (WARN_ON(!ch->cs_enabled)) return; - sh_tmu_disable(p); - p->cs_enabled = false; + sh_tmu_disable(ch); + ch->cs_enabled = false; } static void sh_tmu_clocksource_suspend(struct clocksource *cs) { - struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); - if (!p->cs_enabled) + if (!ch->cs_enabled) return; - if (--p->enable_count == 0) { - __sh_tmu_disable(p); - pm_genpd_syscore_poweroff(&p->pdev->dev); + if (--ch->enable_count == 0) { + __sh_tmu_disable(ch); + pm_genpd_syscore_poweroff(&ch->tmu->pdev->dev); } } static void sh_tmu_clocksource_resume(struct clocksource *cs) { - struct sh_tmu_priv *p = cs_to_sh_tmu(cs); + struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); - if (!p->cs_enabled) + if (!ch->cs_enabled) return; - if (p->enable_count++ == 0) { - pm_genpd_syscore_poweron(&p->pdev->dev); - __sh_tmu_enable(p); + if (ch->enable_count++ == 0) { + pm_genpd_syscore_poweron(&ch->tmu->pdev->dev); + __sh_tmu_enable(ch); } } -static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, - char *name, unsigned long rating) +static int sh_tmu_register_clocksource(struct sh_tmu_channel *ch, + const char *name) { - struct clocksource *cs = &p->cs; + struct clocksource *cs = &ch->cs; cs->name = name; - cs->rating = rating; + cs->rating = 200; cs->read = sh_tmu_clocksource_read; cs->enable = sh_tmu_clocksource_enable; cs->disable = sh_tmu_clocksource_disable; @@ -295,43 +335,44 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, cs->mask = CLOCKSOURCE_MASK(32); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; - dev_info(&p->pdev->dev, "used as clock source\n"); + dev_info(&ch->tmu->pdev->dev, "ch%u: used as clock source\n", + ch->index); /* Register with dummy 1 Hz value, gets updated in ->enable() */ clocksource_register_hz(cs, 1); return 0; } -static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced) +static struct sh_tmu_channel *ced_to_sh_tmu(struct clock_event_device *ced) { - return container_of(ced, struct sh_tmu_priv, ced); + return container_of(ced, struct sh_tmu_channel, ced); } -static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) +static void sh_tmu_clock_event_start(struct sh_tmu_channel *ch, int periodic) { - struct clock_event_device *ced = &p->ced; + struct clock_event_device *ced = &ch->ced; - sh_tmu_enable(p); + sh_tmu_enable(ch); - clockevents_config(ced, p->rate); + clockevents_config(ced, ch->rate); if (periodic) { - p->periodic = (p->rate + HZ/2) / HZ; - sh_tmu_set_next(p, p->periodic, 1); + ch->periodic = (ch->rate + HZ/2) / HZ; + sh_tmu_set_next(ch, ch->periodic, 1); } } static void sh_tmu_clock_event_mode(enum clock_event_mode mode, struct clock_event_device *ced) { - struct sh_tmu_priv *p = ced_to_sh_tmu(ced); + struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); int disabled = 0; /* deal with old setting first */ switch (ced->mode) { case CLOCK_EVT_MODE_PERIODIC: case CLOCK_EVT_MODE_ONESHOT: - sh_tmu_disable(p); + sh_tmu_disable(ch); disabled = 1; break; default: @@ -340,16 +381,18 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode, switch (mode) { case CLOCK_EVT_MODE_PERIODIC: - dev_info(&p->pdev->dev, "used for periodic clock events\n"); - sh_tmu_clock_event_start(p, 1); + dev_info(&ch->tmu->pdev->dev, + "ch%u: used for periodic clock events\n", ch->index); + sh_tmu_clock_event_start(ch, 1); break; case CLOCK_EVT_MODE_ONESHOT: - dev_info(&p->pdev->dev, "used for oneshot clock events\n"); - sh_tmu_clock_event_start(p, 0); + dev_info(&ch->tmu->pdev->dev, + "ch%u: used for oneshot clock events\n", ch->index); + sh_tmu_clock_event_start(ch, 0); break; case CLOCK_EVT_MODE_UNUSED: if (!disabled) - sh_tmu_disable(p); + sh_tmu_disable(ch); break; case CLOCK_EVT_MODE_SHUTDOWN: default: @@ -360,147 +403,234 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode, static int sh_tmu_clock_event_next(unsigned long delta, struct clock_event_device *ced) { - struct sh_tmu_priv *p = ced_to_sh_tmu(ced); + struct sh_tmu_channel *ch = ced_to_sh_tmu(ced); BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); /* program new delta value */ - sh_tmu_set_next(p, delta, 0); + sh_tmu_set_next(ch, delta, 0); return 0; } static void sh_tmu_clock_event_suspend(struct clock_event_device *ced) { - pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->pdev->dev); + pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced)->tmu->pdev->dev); } static void sh_tmu_clock_event_resume(struct clock_event_device *ced) { - pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->pdev->dev); + pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced)->tmu->pdev->dev); } -static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, - char *name, unsigned long rating) +static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch, + const char *name) { - struct clock_event_device *ced = &p->ced; + struct clock_event_device *ced = &ch->ced; int ret; - memset(ced, 0, sizeof(*ced)); - ced->name = name; ced->features = CLOCK_EVT_FEAT_PERIODIC; ced->features |= CLOCK_EVT_FEAT_ONESHOT; - ced->rating = rating; + ced->rating = 200; ced->cpumask = cpumask_of(0); ced->set_next_event = sh_tmu_clock_event_next; ced->set_mode = sh_tmu_clock_event_mode; ced->suspend = sh_tmu_clock_event_suspend; ced->resume = sh_tmu_clock_event_resume; - dev_info(&p->pdev->dev, "used for clock events\n"); + dev_info(&ch->tmu->pdev->dev, "ch%u: used for clock events\n", + ch->index); clockevents_config_and_register(ced, 1, 0x300, 0xffffffff); - ret = setup_irq(p->irqaction.irq, &p->irqaction); + ret = request_irq(ch->irq, sh_tmu_interrupt, + IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, + dev_name(&ch->tmu->pdev->dev), ch); if (ret) { - dev_err(&p->pdev->dev, "failed to request irq %d\n", - p->irqaction.irq); + dev_err(&ch->tmu->pdev->dev, "ch%u: failed to request irq %d\n", + ch->index, ch->irq); return; } } -static int sh_tmu_register(struct sh_tmu_priv *p, char *name, - unsigned long clockevent_rating, - unsigned long clocksource_rating) +static int sh_tmu_register(struct sh_tmu_channel *ch, const char *name, + bool clockevent, bool clocksource) { - if (clockevent_rating) - sh_tmu_register_clockevent(p, name, clockevent_rating); - else if (clocksource_rating) - sh_tmu_register_clocksource(p, name, clocksource_rating); + if (clockevent) { + ch->tmu->has_clockevent = true; + sh_tmu_register_clockevent(ch, name); + } else if (clocksource) { + ch->tmu->has_clocksource = true; + sh_tmu_register_clocksource(ch, name); + } return 0; } -static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) +static int sh_tmu_channel_setup(struct sh_tmu_channel *ch, unsigned int index, + bool clockevent, bool clocksource, + struct sh_tmu_device *tmu) { - struct sh_timer_config *cfg = pdev->dev.platform_data; - struct resource *res; - int irq, ret; - ret = -ENXIO; + /* Skip unused channels. */ + if (!clockevent && !clocksource) + return 0; - memset(p, 0, sizeof(*p)); - p->pdev = pdev; + ch->tmu = tmu; + + if (tmu->model == SH_TMU_LEGACY) { + struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; + + /* + * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps + * channel registers blocks at base + 2 + 12 * index, while all + * other variants map them at base + 4 + 12 * index. We can + * compute the index by just dividing by 12, the 2 bytes or 4 + * bytes offset being hidden by the integer division. + */ + ch->index = cfg->channel_offset / 12; + ch->base = tmu->mapbase + cfg->channel_offset; + } else { + ch->index = index; + + if (tmu->model == SH_TMU_SH3) + ch->base = tmu->mapbase + 4 + ch->index * 12; + else + ch->base = tmu->mapbase + 8 + ch->index * 12; + } - if (!cfg) { - dev_err(&p->pdev->dev, "missing platform data\n"); - goto err0; + ch->irq = platform_get_irq(tmu->pdev, ch->index); + if (ch->irq < 0) { + dev_err(&tmu->pdev->dev, "ch%u: failed to get irq\n", + ch->index); + return ch->irq; } - platform_set_drvdata(pdev, p); + ch->cs_enabled = false; + ch->enable_count = 0; + + return sh_tmu_register(ch, dev_name(&tmu->pdev->dev), + clockevent, clocksource); +} - res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); +static int sh_tmu_map_memory(struct sh_tmu_device *tmu) +{ + struct resource *res; + + res = platform_get_resource(tmu->pdev, IORESOURCE_MEM, 0); if (!res) { - dev_err(&p->pdev->dev, "failed to get I/O memory\n"); - goto err0; + dev_err(&tmu->pdev->dev, "failed to get I/O memory\n"); + return -ENXIO; + } + + tmu->mapbase = ioremap_nocache(res->start, resource_size(res)); + if (tmu->mapbase == NULL) + return -ENXIO; + + /* + * In legacy platform device configuration (with one device per channel) + * the resource points to the channel base address. + */ + if (tmu->model == SH_TMU_LEGACY) { + struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; + tmu->mapbase -= cfg->channel_offset; } - irq = platform_get_irq(p->pdev, 0); - if (irq < 0) { - dev_err(&p->pdev->dev, "failed to get irq\n"); - goto err0; + return 0; +} + +static void sh_tmu_unmap_memory(struct sh_tmu_device *tmu) +{ + if (tmu->model == SH_TMU_LEGACY) { + struct sh_timer_config *cfg = tmu->pdev->dev.platform_data; + tmu->mapbase += cfg->channel_offset; } - /* map memory, let mapbase point to our channel */ - p->mapbase = ioremap_nocache(res->start, resource_size(res)); - if (p->mapbase == NULL) { - dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); - goto err0; + iounmap(tmu->mapbase); +} + +static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev) +{ + struct sh_timer_config *cfg = pdev->dev.platform_data; + const struct platform_device_id *id = pdev->id_entry; + unsigned int i; + int ret; + + if (!cfg) { + dev_err(&tmu->pdev->dev, "missing platform data\n"); + return -ENXIO; } - /* setup data for setup_irq() (too early for request_irq()) */ - p->irqaction.name = dev_name(&p->pdev->dev); - p->irqaction.handler = sh_tmu_interrupt; - p->irqaction.dev_id = p; - p->irqaction.irq = irq; - p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING; - - /* get hold of clock */ - p->clk = clk_get(&p->pdev->dev, "tmu_fck"); - if (IS_ERR(p->clk)) { - dev_err(&p->pdev->dev, "cannot get clock\n"); - ret = PTR_ERR(p->clk); - goto err1; + tmu->pdev = pdev; + tmu->model = id->driver_data; + + /* Get hold of clock. */ + tmu->clk = clk_get(&tmu->pdev->dev, + tmu->model == SH_TMU_LEGACY ? "tmu_fck" : "fck"); + if (IS_ERR(tmu->clk)) { + dev_err(&tmu->pdev->dev, "cannot get clock\n"); + return PTR_ERR(tmu->clk); } - ret = clk_prepare(p->clk); + ret = clk_prepare(tmu->clk); if (ret < 0) - goto err2; + goto err_clk_put; - p->cs_enabled = false; - p->enable_count = 0; + /* Map the memory resource. */ + ret = sh_tmu_map_memory(tmu); + if (ret < 0) { + dev_err(&tmu->pdev->dev, "failed to remap I/O memory\n"); + goto err_clk_unprepare; + } - ret = sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), - cfg->clockevent_rating, - cfg->clocksource_rating); - if (ret < 0) - goto err3; + /* Allocate and setup the channels. */ + if (tmu->model == SH_TMU_LEGACY) + tmu->num_channels = 1; + else + tmu->num_channels = hweight8(cfg->channels_mask); + + tmu->channels = kzalloc(sizeof(*tmu->channels) * tmu->num_channels, + GFP_KERNEL); + if (tmu->channels == NULL) { + ret = -ENOMEM; + goto err_unmap; + } + + if (tmu->model == SH_TMU_LEGACY) { + ret = sh_tmu_channel_setup(&tmu->channels[0], 0, + cfg->clockevent_rating != 0, + cfg->clocksource_rating != 0, tmu); + if (ret < 0) + goto err_unmap; + } else { + /* + * Use the first channel as a clock event device and the second + * channel as a clock source. + */ + for (i = 0; i < tmu->num_channels; ++i) { + ret = sh_tmu_channel_setup(&tmu->channels[i], i, + i == 0, i == 1, tmu); + if (ret < 0) + goto err_unmap; + } + } + + platform_set_drvdata(pdev, tmu); return 0; - err3: - clk_unprepare(p->clk); - err2: - clk_put(p->clk); - err1: - iounmap(p->mapbase); - err0: +err_unmap: + kfree(tmu->channels); + sh_tmu_unmap_memory(tmu); +err_clk_unprepare: + clk_unprepare(tmu->clk); +err_clk_put: + clk_put(tmu->clk); return ret; } static int sh_tmu_probe(struct platform_device *pdev) { - struct sh_tmu_priv *p = platform_get_drvdata(pdev); - struct sh_timer_config *cfg = pdev->dev.platform_data; + struct sh_tmu_device *tmu = platform_get_drvdata(pdev); int ret; if (!is_early_platform_device(pdev)) { @@ -508,20 +638,20 @@ static int sh_tmu_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); } - if (p) { + if (tmu) { dev_info(&pdev->dev, "kept as earlytimer\n"); goto out; } - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (p == NULL) { + tmu = kzalloc(sizeof(*tmu), GFP_KERNEL); + if (tmu == NULL) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } - ret = sh_tmu_setup(p, pdev); + ret = sh_tmu_setup(tmu, pdev); if (ret) { - kfree(p); + kfree(tmu); pm_runtime_idle(&pdev->dev); return ret; } @@ -529,7 +659,7 @@ static int sh_tmu_probe(struct platform_device *pdev) return 0; out: - if (cfg->clockevent_rating || cfg->clocksource_rating) + if (tmu->has_clockevent || tmu->has_clocksource) pm_runtime_irq_safe(&pdev->dev); else pm_runtime_idle(&pdev->dev); @@ -542,12 +672,21 @@ static int sh_tmu_remove(struct platform_device *pdev) return -EBUSY; /* cannot unregister clockevent and clocksource */ } +static const struct platform_device_id sh_tmu_id_table[] = { + { "sh_tmu", SH_TMU_LEGACY }, + { "sh-tmu", SH_TMU }, + { "sh-tmu-sh3", SH_TMU_SH3 }, + { } +}; +MODULE_DEVICE_TABLE(platform, sh_tmu_id_table); + static struct platform_driver sh_tmu_device_driver = { .probe = sh_tmu_probe, .remove = sh_tmu_remove, .driver = { .name = "sh_tmu", - } + }, + .id_table = sh_tmu_id_table, }; static int __init sh_tmu_init(void) diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 00fdd117028..a8d7ea14f18 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -100,7 +100,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) || tcd->clkevt.mode == CLOCK_EVT_MODE_ONESHOT) { __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); - clk_disable_unprepare(tcd->clk); + clk_disable(tcd->clk); } switch (m) { @@ -109,7 +109,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) * of oneshot, we get lower overhead and improved accuracy. */ case CLOCK_EVT_MODE_PERIODIC: - clk_prepare_enable(tcd->clk); + clk_enable(tcd->clk); /* slow clock, count up to RC, then irq and restart */ __raw_writel(timer_clock @@ -126,7 +126,7 @@ static void tc_mode(enum clock_event_mode m, struct clock_event_device *d) break; case CLOCK_EVT_MODE_ONESHOT: - clk_prepare_enable(tcd->clk); + clk_enable(tcd->clk); /* slow clock, count up to RC, then irq and stop */ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP @@ -194,7 +194,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) ret = clk_prepare_enable(t2_clk); if (ret) return ret; - clk_disable_unprepare(t2_clk); + clk_disable(t2_clk); clkevt.regs = tc->regs; clkevt.clk = t2_clk; diff --git a/drivers/clocksource/timer-marco.c b/drivers/clocksource/timer-marco.c index b52e1c078b9..dbd30398222 100644 --- a/drivers/clocksource/timer-marco.c +++ b/drivers/clocksource/timer-marco.c @@ -199,7 +199,7 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) action->dev_id = ce; BUG_ON(setup_irq(ce->irq, action)); - irq_set_affinity(action->irq, cpumask_of(cpu)); + irq_force_affinity(action->irq, cpumask_of(cpu)); clockevents_register_device(ce); return 0; @@ -252,15 +252,13 @@ static void __init sirfsoc_clockevent_init(void) } /* initialize the kernel jiffy timer source */ -static void __init sirfsoc_marco_timer_init(void) +static void __init sirfsoc_marco_timer_init(struct device_node *np) { unsigned long rate; u32 timer_div; struct clk *clk; - /* timer's input clock is io clock */ - clk = clk_get_sys("io", NULL); - + clk = of_clk_get(np, 0); BUG_ON(IS_ERR(clk)); rate = clk_get_rate(clk); @@ -303,6 +301,6 @@ static void __init sirfsoc_of_timer_init(struct device_node *np) if (!sirfsoc_timer1_irq.irq) panic("No irq passed for timer1 via DT\n"); - sirfsoc_marco_timer_init(); + sirfsoc_marco_timer_init(np); } CLOCKSOURCE_OF_DECLARE(sirfsoc_marco_timer, "sirf,marco-tick", sirfsoc_of_timer_init ); diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c index 1a6b2d6356d..a722aac7ac0 100644 --- a/drivers/clocksource/timer-prima2.c +++ b/drivers/clocksource/timer-prima2.c @@ -61,7 +61,8 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *ce = dev_id; - WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) & BIT(0))); + WARN_ON(!(readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_STATUS) & + BIT(0))); /* clear timer0 interrupt */ writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); @@ -77,9 +78,11 @@ static cycle_t sirfsoc_timer_read(struct clocksource *cs) u64 cycles; /* latch the 64-bit timer counter */ - writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); + writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, + sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); cycles = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_HI); - cycles = (cycles << 32) | readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); + cycles = (cycles << 32) | + readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); return cycles; } @@ -89,11 +92,13 @@ static int sirfsoc_timer_set_next_event(unsigned long delta, { unsigned long now, next; - writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); + writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, + sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); next = now + delta; writel_relaxed(next, sirfsoc_timer_base + SIRFSOC_TIMER_MATCH_0); - writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); + writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, + sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); now = readl_relaxed(sirfsoc_timer_base + SIRFSOC_TIMER_LATCHED_LO); return next - now > delta ? -ETIME : 0; @@ -108,10 +113,12 @@ static void sirfsoc_timer_set_mode(enum clock_event_mode mode, WARN_ON(1); break; case CLOCK_EVT_MODE_ONESHOT: - writel_relaxed(val | BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); + writel_relaxed(val | BIT(0), + sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); break; case CLOCK_EVT_MODE_SHUTDOWN: - writel_relaxed(val & ~BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); + writel_relaxed(val & ~BIT(0), + sirfsoc_timer_base + SIRFSOC_TIMER_INT_EN); break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_RESUME: @@ -123,10 +130,13 @@ static void sirfsoc_clocksource_suspend(struct clocksource *cs) { int i; - writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); + writel_relaxed(SIRFSOC_TIMER_LATCH_BIT, + sirfsoc_timer_base + SIRFSOC_TIMER_LATCH); for (i = 0; i < SIRFSOC_TIMER_REG_CNT; i++) - sirfsoc_timer_reg_val[i] = readl_relaxed(sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); + sirfsoc_timer_reg_val[i] = + readl_relaxed(sirfsoc_timer_base + + sirfsoc_timer_reg_list[i]); } static void sirfsoc_clocksource_resume(struct clocksource *cs) @@ -134,10 +144,13 @@ static void sirfsoc_clocksource_resume(struct clocksource *cs) int i; for (i = 0; i < SIRFSOC_TIMER_REG_CNT - 2; i++) - writel_relaxed(sirfsoc_timer_reg_val[i], sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); + writel_relaxed(sirfsoc_timer_reg_val[i], + sirfsoc_timer_base + sirfsoc_timer_reg_list[i]); - writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); - writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); + writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 2], + sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); + writel_relaxed(sirfsoc_timer_reg_val[SIRFSOC_TIMER_REG_CNT - 1], + sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); } static struct clock_event_device sirfsoc_clockevent = { @@ -185,11 +198,8 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np) unsigned long rate; struct clk *clk; - /* timer's input clock is io clock */ - clk = clk_get_sys("io", NULL); - + clk = of_clk_get(np, 0); BUG_ON(IS_ERR(clk)); - rate = clk_get_rate(clk); BUG_ON(rate < PRIMA2_CLOCK_FREQ); @@ -202,7 +212,7 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np) sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0); writel_relaxed(rate / PRIMA2_CLOCK_FREQ / 2 - 1, - sirfsoc_timer_base + SIRFSOC_TIMER_DIV); + sirfsoc_timer_base + SIRFSOC_TIMER_DIV); writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_LO); writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI); writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS); @@ -216,4 +226,5 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np) sirfsoc_clockevent_init(); } -CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer, "sirf,prima2-tick", sirfsoc_prima2_timer_init); +CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer, + "sirf,prima2-tick", sirfsoc_prima2_timer_init); diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/versatile.c new file mode 100644 index 00000000000..e4c50ad2f9d --- /dev/null +++ b/drivers/clocksource/versatile.c @@ -0,0 +1,40 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include <linux/clocksource.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/sched_clock.h> + +#define SYS_24MHZ 0x05c + +static void __iomem *versatile_sys_24mhz; + +static u32 notrace versatile_sys_24mhz_read(void) +{ + return readl(versatile_sys_24mhz); +} + +static void __init versatile_sched_clock_init(struct device_node *node) +{ + void __iomem *base = of_iomap(node, 0); + + if (!base) + return; + + versatile_sys_24mhz = base + SYS_24MHZ; + + setup_sched_clock(versatile_sys_24mhz_read, 32, 24000000); +} +CLOCKSOURCE_OF_DECLARE(versatile, "arm,vexpress-sysreg", + versatile_sched_clock_init); diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c index ca81809d159..7ce442148c3 100644 --- a/drivers/clocksource/zevio-timer.c +++ b/drivers/clocksource/zevio-timer.c @@ -212,4 +212,9 @@ error_free: return ret; } -CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add); +static void __init zevio_timer_init(struct device_node *node) +{ + BUG_ON(zevio_timer_add(node)); +} + +CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init); diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index 148d707a1d4..ccdd4c7e748 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -369,7 +369,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg, return; /* Can only change if privileged. */ - if (!capable(CAP_NET_ADMIN)) { + if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) { err = EPERM; goto out; } diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 580503513f0..d2c7b4b8ffd 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -30,7 +30,7 @@ config ARM_EXYNOS_CPUFREQ config ARM_EXYNOS4210_CPUFREQ bool "SAMSUNG EXYNOS4210" - depends on CPU_EXYNOS4210 && !ARCH_MULTIPLATFORM + depends on CPU_EXYNOS4210 default y select ARM_EXYNOS_CPUFREQ help @@ -41,7 +41,7 @@ config ARM_EXYNOS4210_CPUFREQ config ARM_EXYNOS4X12_CPUFREQ bool "SAMSUNG EXYNOS4x12" - depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM + depends on SOC_EXYNOS4212 || SOC_EXYNOS4412 default y select ARM_EXYNOS_CPUFREQ help @@ -52,7 +52,7 @@ config ARM_EXYNOS4X12_CPUFREQ config ARM_EXYNOS5250_CPUFREQ bool "SAMSUNG EXYNOS5250" - depends on SOC_EXYNOS5250 && !ARCH_MULTIPLATFORM + depends on SOC_EXYNOS5250 default y select ARM_EXYNOS_CPUFREQ help diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index 1bf6bbac3e0..09b9129c7bd 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c @@ -130,7 +130,7 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) return -ENOENT; } - cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0"); + cpu_reg = regulator_get_optional(cpu_dev, "cpu0"); if (IS_ERR(cpu_reg)) { /* * If cpu0 regulator supply node is present, but regulator is @@ -145,23 +145,23 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) PTR_ERR(cpu_reg)); } - cpu_clk = devm_clk_get(cpu_dev, NULL); + cpu_clk = clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); pr_err("failed to get cpu0 clock: %d\n", ret); - goto out_put_node; + goto out_put_reg; } ret = of_init_opp_table(cpu_dev); if (ret) { pr_err("failed to init OPP table: %d\n", ret); - goto out_put_node; + goto out_put_clk; } ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); if (ret) { pr_err("failed to init cpufreq table: %d\n", ret); - goto out_put_node; + goto out_put_clk; } of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance); @@ -216,6 +216,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) out_free_table: dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_put_clk: + if (!IS_ERR(cpu_clk)) + clk_put(cpu_clk); +out_put_reg: + if (!IS_ERR(cpu_reg)) + regulator_put(cpu_reg); out_put_node: of_node_put(np); return ret; diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index ba43991ba98..e1c6433b16e 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -366,6 +366,11 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, break; case CPUFREQ_GOV_LIMITS: + mutex_lock(&dbs_data->mutex); + if (!cpu_cdbs->cur_policy) { + mutex_unlock(&dbs_data->mutex); + break; + } mutex_lock(&cpu_cdbs->timer_mutex); if (policy->max < cpu_cdbs->cur_policy->cur) __cpufreq_driver_target(cpu_cdbs->cur_policy, @@ -375,6 +380,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, policy->min, CPUFREQ_RELATION_L); dbs_check_cpu(dbs_data, cpu); mutex_unlock(&cpu_cdbs->timer_mutex); + mutex_unlock(&dbs_data->mutex); break; } return 0; diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index f99cfe24e7b..348c8bafe43 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -17,8 +17,7 @@ #include <linux/regulator/consumer.h> #include <linux/cpufreq.h> #include <linux/platform_device.h> - -#include <plat/cpu.h> +#include <linux/of.h> #include "exynos-cpufreq.h" @@ -163,14 +162,22 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) if (!exynos_info) return -ENOMEM; - if (soc_is_exynos4210()) + if (of_machine_is_compatible("samsung,exynos4210")) { + exynos_info->type = EXYNOS_SOC_4210; ret = exynos4210_cpufreq_init(exynos_info); - else if (soc_is_exynos4212() || soc_is_exynos4412()) + } else if (of_machine_is_compatible("samsung,exynos4212")) { + exynos_info->type = EXYNOS_SOC_4212; + ret = exynos4x12_cpufreq_init(exynos_info); + } else if (of_machine_is_compatible("samsung,exynos4412")) { + exynos_info->type = EXYNOS_SOC_4412; ret = exynos4x12_cpufreq_init(exynos_info); - else if (soc_is_exynos5250()) + } else if (of_machine_is_compatible("samsung,exynos5250")) { + exynos_info->type = EXYNOS_SOC_5250; ret = exynos5250_cpufreq_init(exynos_info); - else - return 0; + } else { + pr_err("%s: Unknown SoC type\n", __func__); + return -ENODEV; + } if (ret) goto err_vdd_arm; diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h index 3ddade8a512..51af42e1b7f 100644 --- a/drivers/cpufreq/exynos-cpufreq.h +++ b/drivers/cpufreq/exynos-cpufreq.h @@ -17,6 +17,13 @@ enum cpufreq_level_index { L20, }; +enum exynos_soc_type { + EXYNOS_SOC_4210, + EXYNOS_SOC_4212, + EXYNOS_SOC_4412, + EXYNOS_SOC_5250, +}; + #define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \ { \ .freq = (f) * 1000, \ @@ -34,6 +41,7 @@ struct apll_freq { }; struct exynos_dvfs_info { + enum exynos_soc_type type; unsigned long mpll_freq_khz; unsigned int pll_safe_idx; struct clk *cpu_clk; @@ -41,6 +49,7 @@ struct exynos_dvfs_info { struct cpufreq_frequency_table *freq_table; void (*set_freq)(unsigned int, unsigned int); bool (*need_apll_change)(unsigned int, unsigned int); + void __iomem *cmu_regs; }; #ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ @@ -68,24 +77,21 @@ static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) } #endif -#include <plat/cpu.h> -#include <mach/map.h> - -#define EXYNOS4_CLKSRC_CPU (S5P_VA_CMU + 0x14200) -#define EXYNOS4_CLKMUX_STATCPU (S5P_VA_CMU + 0x14400) +#define EXYNOS4_CLKSRC_CPU 0x14200 +#define EXYNOS4_CLKMUX_STATCPU 0x14400 -#define EXYNOS4_CLKDIV_CPU (S5P_VA_CMU + 0x14500) -#define EXYNOS4_CLKDIV_CPU1 (S5P_VA_CMU + 0x14504) -#define EXYNOS4_CLKDIV_STATCPU (S5P_VA_CMU + 0x14600) -#define EXYNOS4_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x14604) +#define EXYNOS4_CLKDIV_CPU 0x14500 +#define EXYNOS4_CLKDIV_CPU1 0x14504 +#define EXYNOS4_CLKDIV_STATCPU 0x14600 +#define EXYNOS4_CLKDIV_STATCPU1 0x14604 #define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT (16) #define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT) -#define EXYNOS5_APLL_LOCK (S5P_VA_CMU + 0x00000) -#define EXYNOS5_APLL_CON0 (S5P_VA_CMU + 0x00100) -#define EXYNOS5_CLKMUX_STATCPU (S5P_VA_CMU + 0x00400) -#define EXYNOS5_CLKDIV_CPU0 (S5P_VA_CMU + 0x00500) -#define EXYNOS5_CLKDIV_CPU1 (S5P_VA_CMU + 0x00504) -#define EXYNOS5_CLKDIV_STATCPU0 (S5P_VA_CMU + 0x00600) -#define EXYNOS5_CLKDIV_STATCPU1 (S5P_VA_CMU + 0x00604) +#define EXYNOS5_APLL_LOCK 0x00000 +#define EXYNOS5_APLL_CON0 0x00100 +#define EXYNOS5_CLKMUX_STATCPU 0x00400 +#define EXYNOS5_CLKDIV_CPU0 0x00500 +#define EXYNOS5_CLKDIV_CPU1 0x00504 +#define EXYNOS5_CLKDIV_STATCPU0 0x00600 +#define EXYNOS5_CLKDIV_STATCPU1 0x00604 diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c index 6384e5b9a34..61a54310a1b 100644 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ b/drivers/cpufreq/exynos4210-cpufreq.c @@ -16,6 +16,8 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> +#include <linux/of.h> +#include <linux/of_address.h> #include "exynos-cpufreq.h" @@ -23,6 +25,7 @@ static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; +static struct exynos_dvfs_info *cpufreq; static unsigned int exynos4210_volt_table[] = { 1250000, 1150000, 1050000, 975000, 950000, @@ -60,20 +63,20 @@ static void exynos4210_set_clkdiv(unsigned int div_index) tmp = apll_freq_4210[div_index].clk_div_cpu0; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); do { - tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU); } while (tmp & 0x1111111); /* Change Divider - CPU1 */ tmp = apll_freq_4210[div_index].clk_div_cpu1; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); do { - tmp = __raw_readl(EXYNOS4_CLKDIV_STATCPU1); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); } while (tmp & 0x11); } @@ -85,7 +88,7 @@ static void exynos4210_set_apll(unsigned int index) clk_set_parent(moutcore, mout_mpll); do { - tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU) + tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); tmp &= 0x7; } while (tmp != 0x2); @@ -96,7 +99,7 @@ static void exynos4210_set_apll(unsigned int index) clk_set_parent(moutcore, mout_apll); do { - tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); } @@ -115,8 +118,30 @@ static void exynos4210_set_frequency(unsigned int old_index, int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) { + struct device_node *np; unsigned long rate; + /* + * HACK: This is a temporary workaround to get access to clock + * controller registers directly and remove static mappings and + * dependencies on platform headers. It is necessary to enable + * Exynos multi-platform support and will be removed together with + * this whole driver as soon as Exynos gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + info->cmu_regs = of_iomap(np, 0); + if (!info->cmu_regs) { + pr_err("%s: failed to map CMU registers\n", __func__); + return -EFAULT; + } + cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -143,6 +168,8 @@ int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) info->freq_table = exynos4210_freq_table; info->set_freq = exynos4210_set_frequency; + cpufreq = info; + return 0; err_mout_apll: diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c index 466c76ad335..351a2074cfe 100644 --- a/drivers/cpufreq/exynos4x12-cpufreq.c +++ b/drivers/cpufreq/exynos4x12-cpufreq.c @@ -16,6 +16,8 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> +#include <linux/of.h> +#include <linux/of_address.h> #include "exynos-cpufreq.h" @@ -23,6 +25,7 @@ static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; +static struct exynos_dvfs_info *cpufreq; static unsigned int exynos4x12_volt_table[] = { 1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500, @@ -100,28 +103,26 @@ static struct apll_freq apll_freq_4412[] = { static void exynos4x12_set_clkdiv(unsigned int div_index) { unsigned int tmp; - unsigned int stat_cpu1; /* Change Divider - CPU0 */ tmp = apll_freq_4x12[div_index].clk_div_cpu0; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); - while (__raw_readl(EXYNOS4_CLKDIV_STATCPU) & 0x11111111) + while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU) + & 0x11111111) cpu_relax(); /* Change Divider - CPU1 */ tmp = apll_freq_4x12[div_index].clk_div_cpu1; - __raw_writel(tmp, EXYNOS4_CLKDIV_CPU1); - if (soc_is_exynos4212()) - stat_cpu1 = 0x11; - else - stat_cpu1 = 0x111; + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); - while (__raw_readl(EXYNOS4_CLKDIV_STATCPU1) & stat_cpu1) + do { cpu_relax(); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); + } while (tmp != 0x0); } static void exynos4x12_set_apll(unsigned int index) @@ -133,7 +134,7 @@ static void exynos4x12_set_apll(unsigned int index) do { cpu_relax(); - tmp = (__raw_readl(EXYNOS4_CLKMUX_STATCPU) + tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); tmp &= 0x7; } while (tmp != 0x2); @@ -145,7 +146,7 @@ static void exynos4x12_set_apll(unsigned int index) do { cpu_relax(); - tmp = __raw_readl(EXYNOS4_CLKMUX_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); } @@ -164,8 +165,30 @@ static void exynos4x12_set_frequency(unsigned int old_index, int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) { + struct device_node *np; unsigned long rate; + /* + * HACK: This is a temporary workaround to get access to clock + * controller registers directly and remove static mappings and + * dependencies on platform headers. It is necessary to enable + * Exynos multi-platform support and will be removed together with + * this whole driver as soon as Exynos gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + info->cmu_regs = of_iomap(np, 0); + if (!info->cmu_regs) { + pr_err("%s: failed to map CMU registers\n", __func__); + return -EFAULT; + } + cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -184,7 +207,7 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) if (IS_ERR(mout_apll)) goto err_mout_apll; - if (soc_is_exynos4212()) + if (info->type == EXYNOS_SOC_4212) apll_freq_4x12 = apll_freq_4212; else apll_freq_4x12 = apll_freq_4412; @@ -197,6 +220,8 @@ int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info) info->freq_table = exynos4x12_freq_table; info->set_freq = exynos4x12_set_frequency; + cpufreq = info; + return 0; err_mout_apll: diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c index 363a0b3fe1b..c91ce69dc63 100644 --- a/drivers/cpufreq/exynos5250-cpufreq.c +++ b/drivers/cpufreq/exynos5250-cpufreq.c @@ -16,8 +16,8 @@ #include <linux/io.h> #include <linux/slab.h> #include <linux/cpufreq.h> - -#include <mach/map.h> +#include <linux/of.h> +#include <linux/of_address.h> #include "exynos-cpufreq.h" @@ -25,6 +25,7 @@ static struct clk *cpu_clk; static struct clk *moutcore; static struct clk *mout_mpll; static struct clk *mout_apll; +static struct exynos_dvfs_info *cpufreq; static unsigned int exynos5250_volt_table[] = { 1300000, 1250000, 1225000, 1200000, 1150000, @@ -87,17 +88,18 @@ static void set_clkdiv(unsigned int div_index) tmp = apll_freq_5250[div_index].clk_div_cpu0; - __raw_writel(tmp, EXYNOS5_CLKDIV_CPU0); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0); - while (__raw_readl(EXYNOS5_CLKDIV_STATCPU0) & 0x11111111) + while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0) + & 0x11111111) cpu_relax(); /* Change Divider - CPU1 */ tmp = apll_freq_5250[div_index].clk_div_cpu1; - __raw_writel(tmp, EXYNOS5_CLKDIV_CPU1); + __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1); - while (__raw_readl(EXYNOS5_CLKDIV_STATCPU1) & 0x11) + while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11) cpu_relax(); } @@ -111,7 +113,8 @@ static void set_apll(unsigned int index) do { cpu_relax(); - tmp = (__raw_readl(EXYNOS5_CLKMUX_STATCPU) >> 16); + tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU) + >> 16); tmp &= 0x7; } while (tmp != 0x2); @@ -122,7 +125,7 @@ static void set_apll(unsigned int index) do { cpu_relax(); - tmp = __raw_readl(EXYNOS5_CLKMUX_STATCPU); + tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU); tmp &= (0x7 << 16); } while (tmp != (0x1 << 16)); } @@ -141,8 +144,30 @@ static void exynos5250_set_frequency(unsigned int old_index, int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) { + struct device_node *np; unsigned long rate; + /* + * HACK: This is a temporary workaround to get access to clock + * controller registers directly and remove static mappings and + * dependencies on platform headers. It is necessary to enable + * Exynos multi-platform support and will be removed together with + * this whole driver as soon as Exynos gets migrated to use + * cpufreq-cpu0 driver. + */ + np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock"); + if (!np) { + pr_err("%s: failed to find clock controller DT node\n", + __func__); + return -ENODEV; + } + + info->cmu_regs = of_iomap(np, 0); + if (!info->cmu_regs) { + pr_err("%s: failed to map CMU registers\n", __func__); + return -EFAULT; + } + cpu_clk = clk_get(NULL, "armclk"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); @@ -169,6 +194,8 @@ int exynos5250_cpufreq_init(struct exynos_dvfs_info *info) info->freq_table = exynos5250_freq_table; info->set_freq = exynos5250_set_frequency; + cpufreq = info; + return 0; err_mout_apll: diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 099967302bf..eab8ccfe6be 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -37,6 +37,7 @@ #define BYT_RATIOS 0x66a #define BYT_VIDS 0x66b #define BYT_TURBO_RATIOS 0x66c +#define BYT_TURBO_VIDS 0x66d #define FRAC_BITS 6 @@ -70,8 +71,9 @@ struct pstate_data { }; struct vid_data { - int32_t min; - int32_t max; + int min; + int max; + int turbo; int32_t ratio; }; @@ -359,14 +361,14 @@ static int byt_get_min_pstate(void) { u64 value; rdmsrl(BYT_RATIOS, value); - return (value >> 8) & 0xFF; + return (value >> 8) & 0x3F; } static int byt_get_max_pstate(void) { u64 value; rdmsrl(BYT_RATIOS, value); - return (value >> 16) & 0xFF; + return (value >> 16) & 0x3F; } static int byt_get_turbo_pstate(void) @@ -393,6 +395,9 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate) vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); vid = fp_toint(vid_fp); + if (pstate > cpudata->pstate.max_pstate) + vid = cpudata->vid.turbo; + val |= vid; wrmsrl(MSR_IA32_PERF_CTL, val); @@ -402,13 +407,17 @@ static void byt_get_vid(struct cpudata *cpudata) { u64 value; + rdmsrl(BYT_VIDS, value); - cpudata->vid.min = int_tofp((value >> 8) & 0x7f); - cpudata->vid.max = int_tofp((value >> 16) & 0x7f); + cpudata->vid.min = int_tofp((value >> 8) & 0x3f); + cpudata->vid.max = int_tofp((value >> 16) & 0x3f); cpudata->vid.ratio = div_fp( cpudata->vid.max - cpudata->vid.min, int_tofp(cpudata->pstate.max_pstate - cpudata->pstate.min_pstate)); + + rdmsrl(BYT_TURBO_VIDS, value); + cpudata->vid.turbo = value & 0x7f; } @@ -545,12 +554,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) if (pstate_funcs.get_vid) pstate_funcs.get_vid(cpu); - - /* - * goto max pstate so we don't slow up boot if we are built-in if we are - * a module we will take care of it during normal operation - */ - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); } static inline void intel_pstate_calc_busy(struct cpudata *cpu, @@ -695,11 +699,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu = all_cpu_data[cpunum]; intel_pstate_get_cpu_pstates(cpu); - if (!cpu->pstate.current_pstate) { - all_cpu_data[cpunum] = NULL; - kfree(cpu); - return -ENODATA; - } cpu->cpu = cpunum; @@ -710,7 +709,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) cpu->timer.expires = jiffies + HZ/100; intel_pstate_busy_pid_reset(cpu); intel_pstate_sample(cpu); - intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); add_timer_on(&cpu->timer, cpunum); diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index d00e5d1abd2..5c4369b5d83 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index, * Sets a new clock ratio. */ -static void longhaul_setstate(struct cpufreq_policy *policy, +static int longhaul_setstate(struct cpufreq_policy *policy, unsigned int table_index) { unsigned int mults_index; @@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy, /* Safety precautions */ mult = mults[mults_index & 0x1f]; if (mult == -1) - return; + return -EINVAL; + speed = calc_speed(mult); if ((speed > highest_speed) || (speed < lowest_speed)) - return; + return -EINVAL; + /* Voltage transition before frequency transition? */ if (can_scale_voltage && longhaul_index < table_index) dir = 1; @@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy, freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.new = speed; - cpufreq_freq_transition_begin(policy, &freqs); - pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); retry_loop: @@ -385,12 +385,14 @@ retry_loop: goto retry_loop; } } - /* Report true CPU frequency */ - cpufreq_freq_transition_end(policy, &freqs, 0); - if (!bm_timeout) + if (!bm_timeout) { printk(KERN_INFO PFX "Warning: Timeout while waiting for " "idle PCI bus.\n"); + return -EBUSY; + } + + return 0; } /* @@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy, unsigned int i; unsigned int dir = 0; u8 vid, current_vid; + int retval = 0; if (!can_scale_voltage) - longhaul_setstate(policy, table_index); + retval = longhaul_setstate(policy, table_index); else { /* On test system voltage transitions exceeding single * step up or down were turning motherboard off. Both @@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy, while (i != table_index) { vid = (longhaul_table[i].driver_data >> 8) & 0x1f; if (vid != current_vid) { - longhaul_setstate(policy, i); + retval = longhaul_setstate(policy, i); current_vid = vid; msleep(200); } @@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy, else i--; } - longhaul_setstate(policy, table_index); + retval = longhaul_setstate(policy, table_index); } + longhaul_index = table_index; - return 0; + return retval; } @@ -968,7 +972,15 @@ static void __exit longhaul_exit(void) for (i = 0; i < numscales; i++) { if (mults[i] == maxmult) { + struct cpufreq_freqs freqs; + + freqs.old = policy->cur; + freqs.new = longhaul_table[i].frequency; + freqs.flags = 0; + + cpufreq_freq_transition_begin(policy, &freqs); longhaul_setstate(policy, i); + cpufreq_freq_transition_end(policy, &freqs, 0); break; } } diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index f0bc31f5db2..d4add862194 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c @@ -62,7 +62,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy, set_cpus_allowed_ptr(current, &cpus_allowed); /* setting the cpu frequency */ - clk_set_rate(policy->clk, freq); + clk_set_rate(policy->clk, freq * 1000); return 0; } @@ -92,7 +92,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) i++) loongson2_clockmod_table[i].frequency = (rate * i) / 8; - ret = clk_set_rate(cpuclk, rate); + ret = clk_set_rate(cpuclk, rate * 1000); if (ret) { clk_put(cpuclk); return ret; diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index 49f120e1bc7..78904e6ca4a 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i) static int powernow_k6_target(struct cpufreq_policy *policy, unsigned int best_i) { - struct cpufreq_freqs freqs; if (clock_ratio[best_i].driver_data > max_multiplier) { printk(KERN_ERR PFX "invalid target frequency\n"); return -EINVAL; } - freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); - freqs.new = busfreq * clock_ratio[best_i].driver_data; - - cpufreq_freq_transition_begin(policy, &freqs); - powernow_k6_set_cpu_multiplier(best_i); - cpufreq_freq_transition_end(policy, &freqs, 0); - return 0; } @@ -227,9 +219,20 @@ have_busfreq: static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) { unsigned int i; - for (i = 0; i < 8; i++) { - if (i == max_multiplier) + + for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { + if (clock_ratio[i].driver_data == max_multiplier) { + struct cpufreq_freqs freqs; + + freqs.old = policy->cur; + freqs.new = clock_ratio[i].frequency; + freqs.flags = 0; + + cpufreq_freq_transition_begin(policy, &freqs); powernow_k6_target(policy, i); + cpufreq_freq_transition_end(policy, &freqs, 0); + break; + } } return 0; } diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index f911645c3f6..e61e224475a 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) freqs.new = powernow_table[index].frequency; - cpufreq_freq_transition_begin(policy, &freqs); - /* Now do the magic poking into the MSRs. */ if (have_a0 == 1) /* A0 errata 5 */ @@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) if (have_a0 == 1) local_irq_enable(); - cpufreq_freq_transition_end(policy, &freqs, 0); - return 0; } diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c index a1ca3dd04a8..0af618abeba 100644 --- a/drivers/cpufreq/ppc-corenet-cpufreq.c +++ b/drivers/cpufreq/ppc-corenet-cpufreq.c @@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) struct cpufreq_frequency_table *table; struct cpu_data *data; unsigned int cpu = policy->cpu; + u64 transition_latency_hz; np = of_get_cpu_node(cpu, NULL); if (!np) @@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) for_each_cpu(i, per_cpu(cpu_mask, cpu)) per_cpu(cpu_data, i) = data; + transition_latency_hz = 12ULL * NSEC_PER_SEC; policy->cpuinfo.transition_latency = - (12ULL * NSEC_PER_SEC) / fsl_get_sys_freq(); + do_div(transition_latency_hz, fsl_get_sys_freq()); + of_node_put(np); return 0; diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index be1b2b5c975..227ebf7c1ee 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -141,6 +141,7 @@ static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg) static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg) { + cfg->mpll = _clk_mpll; (cfg->info->set_fvco)(cfg); } diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 97ccc31dbdd..ae1d78ea7df 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -1,6 +1,11 @@ # # ARM CPU Idle drivers # +config ARM_ARMADA_370_XP_CPUIDLE + bool "CPU Idle Driver for Armada 370/XP family processors" + depends on ARCH_MVEBU + help + Select this to enable cpuidle on Armada 370/XP processors. config ARM_BIG_LITTLE_CPUIDLE bool "Support for ARM big.LITTLE processors" @@ -44,3 +49,9 @@ config ARM_AT91_CPUIDLE depends on ARCH_AT91 help Select this to enable cpuidle for AT91 processors + +config ARM_EXYNOS_CPUIDLE + bool "Cpu Idle Driver for the Exynos processors" + depends on ARCH_EXYNOS + help + Select this to enable cpuidle for Exynos processors diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index f71ae1b373c..cd3ab59f846 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -7,12 +7,14 @@ obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o ################################################################################## # ARM SoC drivers +obj-$(CONFIG_ARM_ARMADA_370_XP_CPUIDLE) += cpuidle-armada-370-xp.o obj-$(CONFIG_ARM_BIG_LITTLE_CPUIDLE) += cpuidle-big_little.o obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o +obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o ############################################################################### # POWERPC drivers diff --git a/drivers/cpuidle/cpuidle-armada-370-xp.c b/drivers/cpuidle/cpuidle-armada-370-xp.c new file mode 100644 index 00000000000..28587d0f394 --- /dev/null +++ b/drivers/cpuidle/cpuidle-armada-370-xp.c @@ -0,0 +1,93 @@ +/* + * Marvell Armada 370 and Armada XP SoC cpuidle driver + * + * Copyright (C) 2014 Marvell + * + * Nadav Haklai <nadavh@marvell.com> + * Gregory CLEMENT <gregory.clement@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * Maintainer: Gregory CLEMENT <gregory.clement@free-electrons.com> + */ + +#include <linux/cpu_pm.h> +#include <linux/cpuidle.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/suspend.h> +#include <linux/platform_device.h> +#include <asm/cpuidle.h> + +#define ARMADA_370_XP_MAX_STATES 3 +#define ARMADA_370_XP_FLAG_DEEP_IDLE 0x10000 + +static int (*armada_370_xp_cpu_suspend)(int); + +static int armada_370_xp_enter_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + int ret; + bool deepidle = false; + cpu_pm_enter(); + + if (drv->states[index].flags & ARMADA_370_XP_FLAG_DEEP_IDLE) + deepidle = true; + + ret = armada_370_xp_cpu_suspend(deepidle); + if (ret) + return ret; + + cpu_pm_exit(); + + return index; +} + +static struct cpuidle_driver armada_370_xp_idle_driver = { + .name = "armada_370_xp_idle", + .states[0] = ARM_CPUIDLE_WFI_STATE, + .states[1] = { + .enter = armada_370_xp_enter_idle, + .exit_latency = 10, + .power_usage = 50, + .target_residency = 100, + .flags = CPUIDLE_FLAG_TIME_VALID, + .name = "MV CPU IDLE", + .desc = "CPU power down", + }, + .states[2] = { + .enter = armada_370_xp_enter_idle, + .exit_latency = 100, + .power_usage = 5, + .target_residency = 1000, + .flags = CPUIDLE_FLAG_TIME_VALID | + ARMADA_370_XP_FLAG_DEEP_IDLE, + .name = "MV CPU DEEP IDLE", + .desc = "CPU and L2 Fabric power down", + }, + .state_count = ARMADA_370_XP_MAX_STATES, +}; + +static int armada_370_xp_cpuidle_probe(struct platform_device *pdev) +{ + + armada_370_xp_cpu_suspend = (void *)(pdev->dev.platform_data); + return cpuidle_register(&armada_370_xp_idle_driver, NULL); +} + +static struct platform_driver armada_370_xp_cpuidle_plat_driver = { + .driver = { + .name = "cpuidle-armada-370-xp", + .owner = THIS_MODULE, + }, + .probe = armada_370_xp_cpuidle_probe, +}; + +module_platform_driver(armada_370_xp_cpuidle_plat_driver); + +MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>"); +MODULE_DESCRIPTION("Armada 370/XP cpu idle driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/cpuidle-exynos.c b/drivers/cpuidle/cpuidle-exynos.c new file mode 100644 index 00000000000..7c015126382 --- /dev/null +++ b/drivers/cpuidle/cpuidle-exynos.c @@ -0,0 +1,99 @@ +/* linux/arch/arm/mach-exynos/cpuidle.c + * + * Copyright (c) 2011 Samsung Electronics Co., Ltd. + * http://www.samsung.com + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#include <linux/cpuidle.h> +#include <linux/cpu_pm.h> +#include <linux/export.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <asm/proc-fns.h> +#include <asm/suspend.h> +#include <asm/cpuidle.h> + +static void (*exynos_enter_aftr)(void); + +static int idle_finisher(unsigned long flags) +{ + exynos_enter_aftr(); + cpu_do_idle(); + + return 1; +} + +static int exynos_enter_core0_aftr(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + cpu_pm_enter(); + cpu_suspend(0, idle_finisher); + cpu_pm_exit(); + + return index; +} + +static int exynos_enter_lowpower(struct cpuidle_device *dev, + struct cpuidle_driver *drv, + int index) +{ + int new_index = index; + + /* AFTR can only be entered when cores other than CPU0 are offline */ + if (num_online_cpus() > 1 || dev->cpu != 0) + new_index = drv->safe_state_index; + + if (new_index == 0) + return arm_cpuidle_simple_enter(dev, drv, new_index); + else + return exynos_enter_core0_aftr(dev, drv, new_index); +} + +static struct cpuidle_driver exynos_idle_driver = { + .name = "exynos_idle", + .owner = THIS_MODULE, + .states = { + [0] = ARM_CPUIDLE_WFI_STATE, + [1] = { + .enter = exynos_enter_lowpower, + .exit_latency = 300, + .target_residency = 100000, + .flags = CPUIDLE_FLAG_TIME_VALID, + .name = "C1", + .desc = "ARM power down", + }, + }, + .state_count = 2, + .safe_state_index = 0, +}; + +static int exynos_cpuidle_probe(struct platform_device *pdev) +{ + int ret; + + exynos_enter_aftr = (void *)(pdev->dev.platform_data); + + ret = cpuidle_register(&exynos_idle_driver, NULL); + if (ret) { + dev_err(&pdev->dev, "failed to register cpuidle driver\n"); + return ret; + } + + return 0; +} + +static struct platform_driver exynos_cpuidle_driver = { + .probe = exynos_cpuidle_probe, + .driver = { + .name = "exynos_cpuidle", + .owner = THIS_MODULE, + }, +}; + +module_platform_driver(exynos_cpuidle_driver); diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c index 9f25f529602..0eabd81e1a9 100644 --- a/drivers/crypto/caam/error.c +++ b/drivers/crypto/caam/error.c @@ -16,9 +16,13 @@ char *tmp; \ \ tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \ - sprintf(tmp, format, param); \ - strcat(str, tmp); \ - kfree(tmp); \ + if (likely(tmp)) { \ + sprintf(tmp, format, param); \ + strcat(str, tmp); \ + kfree(tmp); \ + } else { \ + strcat(str, "kmalloc failure in SPRINTFCAT"); \ + } \ } static void report_jump_idx(u32 status, char *outstr) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a886713937f..d5d30ed863c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref) dma_unmap_page(dev, unmap->addr[i], unmap->len, DMA_BIDIRECTIONAL); } + cnt = unmap->map_cnt; mempool_free(unmap, __get_unmap_pool(cnt)->pool); } @@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) memset(unmap, 0, sizeof(*unmap)); kref_init(&unmap->kref); unmap->dev = dev; + unmap->map_cnt = nr; return unmap; } diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cfdbb92aae1..7a740769c2f 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* Disable BLOCK interrupts as well */ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); - err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, - IRQF_SHARED, "dw_dmac", dw); - if (err) - return err; - /* Create a pool of consistent memory blocks for hardware descriptors */ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, sizeof(struct dw_desc), 4, 0); @@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); + err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, + "dw_dmac", dw); + if (err) + return err; + INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; @@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip) dw_dma_off(dw); dma_async_device_unregister(&dw->dma); + free_irq(chip->irq, dw); tasklet_kill(&dw->tasklet); list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 926360c2db6..d08c4dedef3 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -57,14 +57,48 @@ #define EDMA_MAX_SLOTS MAX_NR_SG #define EDMA_DESCRIPTORS 16 +struct edma_pset { + u32 len; + dma_addr_t addr; + struct edmacc_param param; +}; + struct edma_desc { struct virt_dma_desc vdesc; struct list_head node; + enum dma_transfer_direction direction; int cyclic; int absync; int pset_nr; + struct edma_chan *echan; int processed; - struct edmacc_param pset[0]; + + /* + * The following 4 elements are used for residue accounting. + * + * - processed_stat: the number of SG elements we have traversed + * so far to cover accounting. This is updated directly to processed + * during edma_callback and is always <= processed, because processed + * refers to the number of pending transfer (programmed to EDMA + * controller), where as processed_stat tracks number of transfers + * accounted for so far. + * + * - residue: The amount of bytes we have left to transfer for this desc + * + * - residue_stat: The residue in bytes of data we have covered + * so far for accounting. This is updated directly to residue + * during callbacks to keep it current. + * + * - sg_len: Tracks the length of the current intermediate transfer, + * this is required to update the residue during intermediate transfer + * completion callback. + */ + int processed_stat; + u32 sg_len; + u32 residue; + u32 residue_stat; + + struct edma_pset pset[0]; }; struct edma_cc; @@ -136,12 +170,14 @@ static void edma_execute(struct edma_chan *echan) /* Find out how many left */ left = edesc->pset_nr - edesc->processed; nslots = min(MAX_NR_SG, left); + edesc->sg_len = 0; /* Write descriptor PaRAM set(s) */ for (i = 0; i < nslots; i++) { j = i + edesc->processed; - edma_write_slot(echan->slot[i], &edesc->pset[j]); - dev_dbg(echan->vchan.chan.device->dev, + edma_write_slot(echan->slot[i], &edesc->pset[j].param); + edesc->sg_len += edesc->pset[j].len; + dev_vdbg(echan->vchan.chan.device->dev, "\n pset[%d]:\n" " chnum\t%d\n" " slot\t%d\n" @@ -154,14 +190,14 @@ static void edma_execute(struct edma_chan *echan) " cidx\t%08x\n" " lkrld\t%08x\n", j, echan->ch_num, echan->slot[i], - edesc->pset[j].opt, - edesc->pset[j].src, - edesc->pset[j].dst, - edesc->pset[j].a_b_cnt, - edesc->pset[j].ccnt, - edesc->pset[j].src_dst_bidx, - edesc->pset[j].src_dst_cidx, - edesc->pset[j].link_bcntrld); + edesc->pset[j].param.opt, + edesc->pset[j].param.src, + edesc->pset[j].param.dst, + edesc->pset[j].param.a_b_cnt, + edesc->pset[j].param.ccnt, + edesc->pset[j].param.src_dst_bidx, + edesc->pset[j].param.src_dst_cidx, + edesc->pset[j].param.link_bcntrld); /* Link to the previous slot if not the last set */ if (i != (nslots - 1)) edma_link(echan->slot[i], echan->slot[i+1]); @@ -183,7 +219,8 @@ static void edma_execute(struct edma_chan *echan) } if (edesc->processed <= MAX_NR_SG) { - dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); + dev_dbg(dev, "first transfer starting on channel %d\n", + echan->ch_num); edma_start(echan->ch_num); } else { dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", @@ -197,7 +234,7 @@ static void edma_execute(struct edma_chan *echan) * MAX_NR_SG */ if (echan->missed) { - dev_dbg(dev, "missed event in execute detected\n"); + dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); edma_clean_channel(echan->ch_num); edma_stop(echan->ch_num); edma_start(echan->ch_num); @@ -242,6 +279,26 @@ static int edma_slave_config(struct edma_chan *echan, return 0; } +static int edma_dma_pause(struct edma_chan *echan) +{ + /* Pause/Resume only allowed with cyclic mode */ + if (!echan->edesc->cyclic) + return -EINVAL; + + edma_pause(echan->ch_num); + return 0; +} + +static int edma_dma_resume(struct edma_chan *echan) +{ + /* Pause/Resume only allowed with cyclic mode */ + if (!echan->edesc->cyclic) + return -EINVAL; + + edma_resume(echan->ch_num); + return 0; +} + static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { @@ -257,6 +314,14 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, config = (struct dma_slave_config *)arg; ret = edma_slave_config(echan, config); break; + case DMA_PAUSE: + ret = edma_dma_pause(echan); + break; + + case DMA_RESUME: + ret = edma_dma_resume(echan); + break; + default: ret = -ENOSYS; } @@ -275,18 +340,23 @@ static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * @dma_length: Total length of the DMA transfer * @direction: Direction of the transfer */ -static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, +static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, enum dma_slave_buswidth dev_width, unsigned int dma_length, enum dma_transfer_direction direction) { struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; + struct edmacc_param *param = &epset->param; int acnt, bcnt, ccnt, cidx; int src_bidx, dst_bidx, src_cidx, dst_cidx; int absync; acnt = dev_width; + + /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ + if (!burst) + burst = 1; /* * If the maxburst is equal to the fifo width, use * A-synced transfers. This allows for large contiguous @@ -337,41 +407,50 @@ static int edma_config_pset(struct dma_chan *chan, struct edmacc_param *pset, cidx = acnt * bcnt; } + epset->len = dma_length; + if (direction == DMA_MEM_TO_DEV) { src_bidx = acnt; src_cidx = cidx; dst_bidx = 0; dst_cidx = 0; + epset->addr = src_addr; } else if (direction == DMA_DEV_TO_MEM) { src_bidx = 0; src_cidx = 0; dst_bidx = acnt; dst_cidx = cidx; + epset->addr = dst_addr; + } else if (direction == DMA_MEM_TO_MEM) { + src_bidx = acnt; + src_cidx = cidx; + dst_bidx = acnt; + dst_cidx = cidx; } else { dev_err(dev, "%s: direction not implemented yet\n", __func__); return -EINVAL; } - pset->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); + param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); /* Configure A or AB synchronized transfers */ if (absync) - pset->opt |= SYNCDIM; + param->opt |= SYNCDIM; - pset->src = src_addr; - pset->dst = dst_addr; + param->src = src_addr; + param->dst = dst_addr; - pset->src_dst_bidx = (dst_bidx << 16) | src_bidx; - pset->src_dst_cidx = (dst_cidx << 16) | src_cidx; + param->src_dst_bidx = (dst_bidx << 16) | src_bidx; + param->src_dst_cidx = (dst_cidx << 16) | src_cidx; - pset->a_b_cnt = bcnt << 16 | acnt; - pset->ccnt = ccnt; + param->a_b_cnt = bcnt << 16 | acnt; + param->ccnt = ccnt; /* * Only time when (bcntrld) auto reload is required is for * A-sync case, and in this case, a requirement of reload value * of SZ_64K-1 only is assured. 'link' is initially set to NULL * and then later will be populated by edma_execute. */ - pset->link_bcntrld = 0xffffffff; + param->link_bcntrld = 0xffffffff; return absync; } @@ -401,23 +480,26 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( dev_width = echan->cfg.dst_addr_width; burst = echan->cfg.dst_maxburst; } else { - dev_err(dev, "%s: bad direction?\n", __func__); + dev_err(dev, "%s: bad direction: %d\n", __func__, direction); return NULL; } if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { - dev_err(dev, "Undefined slave buswidth\n"); + dev_err(dev, "%s: Undefined slave buswidth\n", __func__); return NULL; } edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), GFP_ATOMIC); if (!edesc) { - dev_dbg(dev, "Failed to allocate a descriptor\n"); + dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); return NULL; } edesc->pset_nr = sg_len; + edesc->residue = 0; + edesc->direction = direction; + edesc->echan = echan; /* Allocate a PaRAM slot, if needed */ nslots = min_t(unsigned, MAX_NR_SG, sg_len); @@ -429,7 +511,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( EDMA_SLOT_ANY); if (echan->slot[i] < 0) { kfree(edesc); - dev_err(dev, "Failed to allocate slot\n"); + dev_err(dev, "%s: Failed to allocate slot\n", + __func__); return NULL; } } @@ -452,16 +535,56 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( } edesc->absync = ret; + edesc->residue += sg_dma_len(sg); /* If this is the last in a current SG set of transactions, enable interrupts so that next set is processed */ if (!((i+1) % MAX_NR_SG)) - edesc->pset[i].opt |= TCINTEN; + edesc->pset[i].param.opt |= TCINTEN; /* If this is the last set, enable completion interrupt flag */ if (i == sg_len - 1) - edesc->pset[i].opt |= TCINTEN; + edesc->pset[i].param.opt |= TCINTEN; } + edesc->residue_stat = edesc->residue; + + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); +} + +struct dma_async_tx_descriptor *edma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long tx_flags) +{ + int ret; + struct edma_desc *edesc; + struct device *dev = chan->device->dev; + struct edma_chan *echan = to_edma_chan(chan); + + if (unlikely(!echan || !len)) + return NULL; + + edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC); + if (!edesc) { + dev_dbg(dev, "Failed to allocate a descriptor\n"); + return NULL; + } + + edesc->pset_nr = 1; + + ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, + DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM); + if (ret < 0) + return NULL; + + edesc->absync = ret; + + /* + * Enable intermediate transfer chaining to re-trigger channel + * on completion of every TR, and enable transfer-completion + * interrupt on completion of the whole transfer. + */ + edesc->pset[0].param.opt |= ITCCHEN; + edesc->pset[0].param.opt |= TCINTEN; return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } @@ -493,12 +616,12 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( dev_width = echan->cfg.dst_addr_width; burst = echan->cfg.dst_maxburst; } else { - dev_err(dev, "%s: bad direction?\n", __func__); + dev_err(dev, "%s: bad direction: %d\n", __func__, direction); return NULL; } if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { - dev_err(dev, "Undefined slave buswidth\n"); + dev_err(dev, "%s: Undefined slave buswidth\n", __func__); return NULL; } @@ -523,16 +646,18 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), GFP_ATOMIC); if (!edesc) { - dev_dbg(dev, "Failed to allocate a descriptor\n"); + dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); return NULL; } edesc->cyclic = 1; edesc->pset_nr = nslots; + edesc->residue = edesc->residue_stat = buf_len; + edesc->direction = direction; + edesc->echan = echan; - dev_dbg(dev, "%s: nslots=%d\n", __func__, nslots); - dev_dbg(dev, "%s: period_len=%d\n", __func__, period_len); - dev_dbg(dev, "%s: buf_len=%d\n", __func__, buf_len); + dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n", + __func__, echan->ch_num, nslots, period_len, buf_len); for (i = 0; i < nslots; i++) { /* Allocate a PaRAM slot, if needed */ @@ -542,7 +667,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( EDMA_SLOT_ANY); if (echan->slot[i] < 0) { kfree(edesc); - dev_err(dev, "Failed to allocate slot\n"); + dev_err(dev, "%s: Failed to allocate slot\n", + __func__); return NULL; } } @@ -566,8 +692,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( else src_addr += period_len; - dev_dbg(dev, "%s: Configure period %d of buf:\n", __func__, i); - dev_dbg(dev, + dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i); + dev_vdbg(dev, "\n pset[%d]:\n" " chnum\t%d\n" " slot\t%d\n" @@ -580,14 +706,14 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( " cidx\t%08x\n" " lkrld\t%08x\n", i, echan->ch_num, echan->slot[i], - edesc->pset[i].opt, - edesc->pset[i].src, - edesc->pset[i].dst, - edesc->pset[i].a_b_cnt, - edesc->pset[i].ccnt, - edesc->pset[i].src_dst_bidx, - edesc->pset[i].src_dst_cidx, - edesc->pset[i].link_bcntrld); + edesc->pset[i].param.opt, + edesc->pset[i].param.src, + edesc->pset[i].param.dst, + edesc->pset[i].param.a_b_cnt, + edesc->pset[i].param.ccnt, + edesc->pset[i].param.src_dst_bidx, + edesc->pset[i].param.src_dst_cidx, + edesc->pset[i].param.link_bcntrld); edesc->absync = ret; @@ -595,7 +721,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( * Enable interrupts for every period because callback * has to be called for every period. */ - edesc->pset[i].opt |= TCINTEN; + edesc->pset[i].param.opt |= TCINTEN; } return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); @@ -606,7 +732,6 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) struct edma_chan *echan = data; struct device *dev = echan->vchan.chan.device->dev; struct edma_desc *edesc; - unsigned long flags; struct edmacc_param p; edesc = echan->edesc; @@ -617,27 +742,34 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) switch (ch_status) { case EDMA_DMA_COMPLETE: - spin_lock_irqsave(&echan->vchan.lock, flags); + spin_lock(&echan->vchan.lock); if (edesc) { if (edesc->cyclic) { vchan_cyclic_callback(&edesc->vdesc); } else if (edesc->processed == edesc->pset_nr) { dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); + edesc->residue = 0; edma_stop(echan->ch_num); vchan_cookie_complete(&edesc->vdesc); edma_execute(echan); } else { dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); + + /* Update statistics for tx_status */ + edesc->residue -= edesc->sg_len; + edesc->residue_stat = edesc->residue; + edesc->processed_stat = edesc->processed; + edma_execute(echan); } } - spin_unlock_irqrestore(&echan->vchan.lock, flags); + spin_unlock(&echan->vchan.lock); break; case EDMA_DMA_CC_ERROR: - spin_lock_irqsave(&echan->vchan.lock, flags); + spin_lock(&echan->vchan.lock); edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); @@ -668,7 +800,7 @@ static void edma_callback(unsigned ch_num, u16 ch_status, void *data) edma_trigger_channel(echan->ch_num); } - spin_unlock_irqrestore(&echan->vchan.lock, flags); + spin_unlock(&echan->vchan.lock); break; default: @@ -704,7 +836,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) echan->alloced = true; echan->slot[0] = echan->ch_num; - dev_dbg(dev, "allocated channel for %u:%u\n", + dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); return 0; @@ -756,23 +888,52 @@ static void edma_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&echan->vchan.lock, flags); } -static size_t edma_desc_size(struct edma_desc *edesc) +static u32 edma_residue(struct edma_desc *edesc) { + bool dst = edesc->direction == DMA_DEV_TO_MEM; + struct edma_pset *pset = edesc->pset; + dma_addr_t done, pos; int i; - size_t size; - - if (edesc->absync) - for (size = i = 0; i < edesc->pset_nr; i++) - size += (edesc->pset[i].a_b_cnt & 0xffff) * - (edesc->pset[i].a_b_cnt >> 16) * - edesc->pset[i].ccnt; - else - size = (edesc->pset[0].a_b_cnt & 0xffff) * - (edesc->pset[0].a_b_cnt >> 16) + - (edesc->pset[0].a_b_cnt & 0xffff) * - (SZ_64K - 1) * edesc->pset[0].ccnt; - - return size; + + /* + * We always read the dst/src position from the first RamPar + * pset. That's the one which is active now. + */ + pos = edma_get_position(edesc->echan->slot[0], dst); + + /* + * Cyclic is simple. Just subtract pset[0].addr from pos. + * + * We never update edesc->residue in the cyclic case, so we + * can tell the remaining room to the end of the circular + * buffer. + */ + if (edesc->cyclic) { + done = pos - pset->addr; + edesc->residue_stat = edesc->residue - done; + return edesc->residue_stat; + } + + /* + * For SG operation we catch up with the last processed + * status. + */ + pset += edesc->processed_stat; + + for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { + /* + * If we are inside this pset address range, we know + * this is the active one. Get the current delta and + * stop walking the psets. + */ + if (pos >= pset->addr && pos < pset->addr + pset->len) + return edesc->residue_stat - (pos - pset->addr); + + /* Otherwise mark it done and update residue_stat. */ + edesc->processed_stat++; + edesc->residue_stat -= pset->len; + } + return edesc->residue_stat; } /* Check request completion status */ @@ -790,13 +951,10 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, return ret; spin_lock_irqsave(&echan->vchan.lock, flags); - vdesc = vchan_find_desc(&echan->vchan, cookie); - if (vdesc) { - txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); - } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { - struct edma_desc *edesc = echan->edesc; - txstate->residue = edma_desc_size(edesc); - } + if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) + txstate->residue = edma_residue(echan->edesc); + else if ((vdesc = vchan_find_desc(&echan->vchan, cookie))) + txstate->residue = to_edma_desc(&vdesc->tx)->residue; spin_unlock_irqrestore(&echan->vchan.lock, flags); return ret; @@ -822,18 +980,43 @@ static void __init edma_chan_init(struct edma_cc *ecc, } } +#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +static int edma_dma_device_slave_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + caps->src_addr_widths = EDMA_DMA_BUSWIDTHS; + caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS; + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + caps->cmd_pause = true; + caps->cmd_terminate = true; + caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + + return 0; +} + static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, struct device *dev) { dma->device_prep_slave_sg = edma_prep_slave_sg; dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; + dma->device_prep_dma_memcpy = edma_prep_dma_memcpy; dma->device_alloc_chan_resources = edma_alloc_chan_resources; dma->device_free_chan_resources = edma_free_chan_resources; dma->device_issue_pending = edma_issue_pending; dma->device_tx_status = edma_tx_status; dma->device_control = edma_control; + dma->device_slave_caps = edma_dma_device_slave_caps; dma->dev = dev; + /* + * code using dma memcpy must make sure alignment of + * length is at dma->copy_align boundary. + */ + dma->copy_align = DMA_SLAVE_BUSWIDTH_4_BYTES; + INIT_LIST_HEAD(&dma->channels); } @@ -861,6 +1044,8 @@ static int edma_probe(struct platform_device *pdev) dma_cap_zero(ecc->dma_slave.cap_mask); dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); + dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask); + dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask); edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 766b68ed505..394cbc5c93e 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan, static void mv_chan_activate(struct mv_xor_chan *chan) { - u32 activation; - dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); - activation = readl_relaxed(XOR_ACTIVATION(chan)); - activation |= 0x1; - writel_relaxed(activation, XOR_ACTIVATION(chan)); + + /* writel ensures all descriptors are flushed before activation */ + writel(BIT(0), XOR_ACTIVATION(chan)); } static char mv_chan_is_busy(struct mv_xor_chan *chan) diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index ab26d46bbe1..5ebdfbc1051 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -113,11 +113,9 @@ struct sa11x0_dma_phy { struct sa11x0_dma_desc *txd_load; unsigned sg_done; struct sa11x0_dma_desc *txd_done; -#ifdef CONFIG_PM_SLEEP u32 dbs[2]; u32 dbt[2]; u32 dcsr; -#endif }; struct sa11x0_dma_dev { @@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP static int sa11x0_dma_suspend(struct device *dev) { struct sa11x0_dma_dev *d = dev_get_drvdata(dev); @@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops sa11x0_dma_pm_ops = { .suspend_noirq = sa11x0_dma_suspend, diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 33edd676634..2c694b5297c 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -1018,7 +1018,7 @@ static void edac_ce_error(struct mem_ctl_info *mci, } edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count); - if (mci->scrub_mode & SCRUB_SW_SRC) { + if (mci->scrub_mode == SCRUB_SW_SRC) { /* * Some memory controllers (called MCs below) can remap * memory so that it is still available at a different diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 8d0450b9b9a..64b68320249 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c @@ -275,7 +275,6 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, { struct pci_dev *dev; void __iomem *window; - int err; *ovrfl_pdev = NULL; *ovrfl_window = NULL; @@ -293,13 +292,8 @@ static int i82875p_setup_overfl_dev(struct pci_dev *pdev, if (dev == NULL) return 1; - err = pci_bus_add_device(dev); - if (err) { - i82875p_printk(KERN_ERR, - "%s(): pci_bus_add_device() Failed\n", - __func__); - } pci_bus_assign_resources(dev->bus); + pci_bus_add_device(dev); } *ovrfl_pdev = dev; diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 51b9caa0b02..5f43620d580 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -6,7 +6,6 @@ static struct amd_decoder_ops *fam_ops; static u8 xec_mask = 0xf; -static u8 nb_err_cpumask = 0xf; static bool report_gart_errors; static void (*nb_bus_decoder)(int node_id, struct mce *m); @@ -852,7 +851,6 @@ static int __init mce_amd_init(void) break; case 0x14: - nb_err_cpumask = 0x3; fam_ops->mc0_mce = cat_mc0_mce; fam_ops->mc1_mce = cat_mc1_mce; fam_ops->mc2_mce = k8_mc2_mce; diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index c98764aeeec..f477308b6e9 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -237,8 +237,8 @@ static inline bool is_next_generation(int new_generation, int old_generation) #define LOCAL_BUS 0xffc0 -/* arbitrarily chosen maximum range for physical DMA: 128 TB */ -#define FW_MAX_PHYSICAL_RANGE (128ULL << 40) +/* OHCI-1394's default upper bound for physical DMA: 4 GB */ +#define FW_MAX_PHYSICAL_RANGE (1ULL << 32) void fw_core_handle_request(struct fw_card *card, struct fw_packet *request); void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8db66321956..586f2f7f699 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -3716,7 +3716,7 @@ static int pci_probe(struct pci_dev *dev, version >> 16, version & 0xff, ohci->card.index, ohci->n_ir, ohci->n_it, ohci->quirks, reg_read(ohci, OHCI1394_PhyUpperBound) ? - ", >4 GB phys DMA" : ""); + ", physUB" : ""); return 0; diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c index 3ee852c9925..071c2c969ee 100644 --- a/drivers/firmware/iscsi_ibft.c +++ b/drivers/firmware/iscsi_ibft.c @@ -756,6 +756,7 @@ static const struct { */ { ACPI_SIG_IBFT }, { "iBFT" }, + { "BIFT" }, /* Broadcom iSCSI Offload */ }; static void __init acpi_find_ibft_region(void) diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index a86c49a605c..4a1b5113e52 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -56,6 +56,7 @@ config GPIO_ACPI depends on ACPI config GPIOLIB_IRQCHIP + select IRQ_DOMAIN bool config DEBUG_GPIO @@ -243,6 +244,15 @@ config GPIO_OCTEON Say yes here to support the on-chip GPIO lines on the OCTEON family of SOCs. +config GPIO_OMAP + bool "TI OMAP GPIO support" if COMPILE_TEST && !ARCH_OMAP2PLUS + default y if ARCH_OMAP + depends on ARM + select GENERIC_IRQ_CHIP + select GPIOLIB_IRQCHIP + help + Say yes here to enable GPIO support for TI OMAP SoCs. + config GPIO_PL061 bool "PrimeCell PL061 GPIO support" depends on ARM_AMBA @@ -259,7 +269,7 @@ config GPIO_PXA config GPIO_RCAR tristate "Renesas R-Car GPIO" - depends on ARM + depends on ARM && (ARCH_SHMOBILE || COMPILE_TEST) help Say yes here to support GPIO on Renesas R-Car SoCs. @@ -510,6 +520,7 @@ config GPIO_PCA953X config GPIO_PCA953X_IRQ bool "Interrupt controller support for PCA953x" depends on GPIO_PCA953X=y + select GPIOLIB_IRQCHIP help Say yes here to enable the pca953x to be used as an interrupt controller. It requires the driver to be built in the kernel. @@ -579,6 +590,7 @@ config GPIO_STP_XWAY config GPIO_TC3589X bool "TC3589X GPIOs" depends on MFD_TC3589X + select GPIOLIB_IRQCHIP help This enables support for the GPIOs found on the TC3589X I/O Expander. @@ -699,13 +711,13 @@ config GPIO_AMD8111 config GPIO_INTEL_MID bool "Intel Mid GPIO support" depends on PCI && X86 - select IRQ_DOMAIN + select GPIOLIB_IRQCHIP help Say Y here to support Intel Mid GPIO. config GPIO_PCH tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO" - depends on PCI && X86 + depends on PCI && (X86_32 || COMPILE_TEST) select GENERIC_IRQ_CHIP help This driver is for PCH(Platform controller Hub) GPIO of Intel Topcliff @@ -739,7 +751,7 @@ config GPIO_SODAVILLE config GPIO_TIMBERDALE bool "Support for timberdale GPIO IP" - depends on MFD_TIMBERDALE && HAS_IOMEM + depends on MFD_TIMBERDALE ---help--- Add support for the GPIO IP in the timberdale FPGA. diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 6309aff1d80..d10f6a9d875 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -58,7 +58,7 @@ obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o obj-$(CONFIG_GPIO_OCTEON) += gpio-octeon.o -obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o +obj-$(CONFIG_GPIO_OMAP) += gpio-omap.o obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o obj-$(CONFIG_GPIO_PCH) += gpio-pch.o diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c index 307464fd015..65978cf85f7 100644 --- a/drivers/gpio/devres.c +++ b/drivers/gpio/devres.c @@ -52,6 +52,22 @@ struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, EXPORT_SYMBOL(devm_gpiod_get); /** + * devm_gpiod_get_optional - Resource-managed gpiod_get_optional() + * @dev: GPIO consumer + * @con_id: function within the GPIO consumer + * + * Managed gpiod_get_optional(). GPIO descriptors returned from this function + * are automatically disposed on driver detach. See gpiod_get_optional() for + * detailed information about behavior and return values. + */ +struct gpio_desc *__must_check devm_gpiod_get_optional(struct device *dev, + const char *con_id) +{ + return devm_gpiod_get_index_optional(dev, con_id, 0); +} +EXPORT_SYMBOL(devm_gpiod_get_optional); + +/** * devm_gpiod_get_index - Resource-managed gpiod_get_index() * @dev: GPIO consumer * @con_id: function within the GPIO consumer @@ -87,6 +103,33 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, EXPORT_SYMBOL(devm_gpiod_get_index); /** + * devm_gpiod_get_index_optional - Resource-managed gpiod_get_index_optional() + * @dev: GPIO consumer + * @con_id: function within the GPIO consumer + * @index: index of the GPIO to obtain in the consumer + * + * Managed gpiod_get_index_optional(). GPIO descriptors returned from this + * function are automatically disposed on driver detach. See + * gpiod_get_index_optional() for detailed information about behavior and + * return values. + */ +struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev, + const char *con_id, + unsigned int index) +{ + struct gpio_desc *desc; + + desc = devm_gpiod_get_index(dev, con_id, index); + if (IS_ERR(desc)) { + if (PTR_ERR(desc) == -ENOENT) + return NULL; + } + + return desc; +} +EXPORT_SYMBOL(devm_gpiod_get_index_optional); + +/** * devm_gpiod_put - Resource-managed gpiod_put() * @desc: GPIO descriptor to dispose of * diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c index 613265944e2..f1ade8fa321 100644 --- a/drivers/gpio/gpio-adp5520.c +++ b/drivers/gpio/gpio-adp5520.c @@ -106,10 +106,8 @@ static int adp5520_gpio_probe(struct platform_device *pdev) } dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); - if (dev == NULL) { - dev_err(&pdev->dev, "failed to alloc memory\n"); + if (dev == NULL) return -ENOMEM; - } dev->master = pdev->dev.parent; diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c index d974020b78b..ef19bc33f2b 100644 --- a/drivers/gpio/gpio-adp5588.c +++ b/drivers/gpio/gpio-adp5588.c @@ -379,10 +379,8 @@ static int adp5588_gpio_probe(struct i2c_client *client, } dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (dev == NULL) { - dev_err(&client->dev, "failed to alloc memory\n"); + if (dev == NULL) return -ENOMEM; - } dev->client = client; diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c index ecb3ca2d1d1..6557147d933 100644 --- a/drivers/gpio/gpio-bt8xx.c +++ b/drivers/gpio/gpio-bt8xx.c @@ -178,7 +178,7 @@ static int bt8xxgpio_probe(struct pci_dev *dev, struct bt8xxgpio *bg; int err; - bg = kzalloc(sizeof(*bg), GFP_KERNEL); + bg = devm_kzalloc(&dev->dev, sizeof(struct bt8xxgpio), GFP_KERNEL); if (!bg) return -ENOMEM; @@ -188,9 +188,9 @@ static int bt8xxgpio_probe(struct pci_dev *dev, err = pci_enable_device(dev); if (err) { printk(KERN_ERR "bt8xxgpio: Can't enable device.\n"); - goto err_freebg; + return err; } - if (!request_mem_region(pci_resource_start(dev, 0), + if (!devm_request_mem_region(&dev->dev, pci_resource_start(dev, 0), pci_resource_len(dev, 0), "bt8xxgpio")) { printk(KERN_WARNING "bt8xxgpio: Can't request iomem (0x%llx).\n", @@ -201,11 +201,11 @@ static int bt8xxgpio_probe(struct pci_dev *dev, pci_set_master(dev); pci_set_drvdata(dev, bg); - bg->mmio = ioremap(pci_resource_start(dev, 0), 0x1000); + bg->mmio = devm_ioremap(&dev->dev, pci_resource_start(dev, 0), 0x1000); if (!bg->mmio) { printk(KERN_ERR "bt8xxgpio: ioremap() failed\n"); err = -EIO; - goto err_release_mem; + goto err_disable; } /* Disable interrupts */ @@ -220,18 +220,13 @@ static int bt8xxgpio_probe(struct pci_dev *dev, err = gpiochip_add(&bg->gpio); if (err) { printk(KERN_ERR "bt8xxgpio: Failed to register GPIOs\n"); - goto err_release_mem; + goto err_disable; } return 0; -err_release_mem: - release_mem_region(pci_resource_start(dev, 0), - pci_resource_len(dev, 0)); err_disable: pci_disable_device(dev); -err_freebg: - kfree(bg); return err; } @@ -250,8 +245,6 @@ static void bt8xxgpio_remove(struct pci_dev *pdev) release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); pci_disable_device(pdev); - - kfree(bg); } #ifdef CONFIG_PM diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 339f9dac591..9f0682534e2 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -230,10 +230,8 @@ static int davinci_gpio_probe(struct platform_device *pdev) chips = devm_kzalloc(dev, ngpio * sizeof(struct davinci_gpio_controller), GFP_KERNEL); - if (!chips) { - dev_err(dev, "Memory allocation failed\n"); + if (!chips) return -ENOMEM; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index ed5711f77e2..cd3b8143527 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -198,6 +198,8 @@ static int dwapb_irq_set_type(struct irq_data *d, u32 type) break; } + irq_setup_alt_chip(d, type); + writel(level, gpio->regs + GPIO_INTTYPE_LEVEL); writel(polarity, gpio->regs + GPIO_INT_POLARITY); spin_unlock_irqrestore(&bgc->lock, flags); @@ -213,7 +215,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, struct irq_chip_generic *irq_gc; unsigned int hwirq, ngpio = gc->ngpio; struct irq_chip_type *ct; - int err, irq; + int err, irq, i; irq = irq_of_parse_and_map(node, 0); if (!irq) { @@ -227,7 +229,7 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, if (!gpio->domain) return; - err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 1, + err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, "gpio-dwapb", handle_level_irq, IRQ_NOREQUEST, 0, IRQ_GC_INIT_NESTED_LOCK); @@ -248,20 +250,24 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, irq_gc->reg_base = gpio->regs; irq_gc->private = gpio; - ct = irq_gc->chip_types; - ct->chip.irq_ack = irq_gc_ack_set_bit; - ct->chip.irq_mask = irq_gc_mask_set_bit; - ct->chip.irq_unmask = irq_gc_mask_clr_bit; - ct->chip.irq_set_type = dwapb_irq_set_type; - ct->chip.irq_enable = dwapb_irq_enable; - ct->chip.irq_disable = dwapb_irq_disable; - ct->chip.irq_request_resources = dwapb_irq_reqres; - ct->chip.irq_release_resources = dwapb_irq_relres; - ct->regs.ack = GPIO_PORTA_EOI; - ct->regs.mask = GPIO_INTMASK; - - irq_setup_generic_chip(irq_gc, IRQ_MSK(port->bgc.gc.ngpio), - IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0); + for (i = 0; i < 2; i++) { + ct = &irq_gc->chip_types[i]; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_set_type = dwapb_irq_set_type; + ct->chip.irq_enable = dwapb_irq_enable; + ct->chip.irq_disable = dwapb_irq_disable; + ct->chip.irq_request_resources = dwapb_irq_reqres; + ct->chip.irq_release_resources = dwapb_irq_relres; + ct->regs.ack = GPIO_PORTA_EOI; + ct->regs.mask = GPIO_INTMASK; + ct->type = IRQ_TYPE_LEVEL_MASK; + } + + irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; + irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; + irq_gc->chip_types[1].handler = handle_edge_irq; irq_set_chained_handler(irq, dwapb_irq_handler); irq_set_handler_data(irq, gpio); diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index 8765bd6f48e..cde36054c38 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -212,7 +212,7 @@ static void __em_gio_set(struct gpio_chip *chip, unsigned int reg, { /* upper 16 bits contains mask and lower 16 actual value */ em_gio_write(gpio_to_priv(chip), reg, - (1 << (shift + 16)) | (value << shift)); + (BIT(shift + 16)) | (value << shift)); } static void em_gio_set(struct gpio_chip *chip, unsigned offset, int value) @@ -284,7 +284,6 @@ static int em_gio_probe(struct platform_device *pdev) p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); if (!p) { - dev_err(&pdev->dev, "failed to allocate driver data\n"); ret = -ENOMEM; goto err0; } diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c index 80829f3c654..dcc2bb4074e 100644 --- a/drivers/gpio/gpio-ep93xx.c +++ b/drivers/gpio/gpio-ep93xx.c @@ -344,37 +344,24 @@ static int ep93xx_gpio_probe(struct platform_device *pdev) { struct ep93xx_gpio *ep93xx_gpio; struct resource *res; - void __iomem *mmio; int i; - int ret; + struct device *dev = &pdev->dev; - ep93xx_gpio = kzalloc(sizeof(*ep93xx_gpio), GFP_KERNEL); + ep93xx_gpio = devm_kzalloc(dev, sizeof(struct ep93xx_gpio), GFP_KERNEL); if (!ep93xx_gpio) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENXIO; - goto exit_free; - } - - if (!request_mem_region(res->start, resource_size(res), pdev->name)) { - ret = -EBUSY; - goto exit_free; - } - - mmio = ioremap(res->start, resource_size(res)); - if (!mmio) { - ret = -ENXIO; - goto exit_release; - } - ep93xx_gpio->mmio_base = mmio; + ep93xx_gpio->mmio_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ep93xx_gpio->mmio_base)) + return PTR_ERR(ep93xx_gpio->mmio_base); for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) { struct bgpio_chip *bgc = &ep93xx_gpio->bgc[i]; struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i]; - if (ep93xx_gpio_add_bank(bgc, &pdev->dev, mmio, bank)) + if (ep93xx_gpio_add_bank(bgc, &pdev->dev, + ep93xx_gpio->mmio_base, bank)) dev_warn(&pdev->dev, "Unable to add gpio bank %s\n", bank->label); } @@ -382,13 +369,6 @@ static int ep93xx_gpio_probe(struct platform_device *pdev) ep93xx_gpio_init_irq(); return 0; - -exit_release: - release_mem_region(res->start, resource_size(res)); -exit_free: - kfree(ep93xx_gpio); - dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, ret); - return ret; } static struct platform_driver ep93xx_gpio_driver = { diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c index 7b95a4a8318..1237a73c3c9 100644 --- a/drivers/gpio/gpio-ge.c +++ b/drivers/gpio/gpio-ge.c @@ -18,15 +18,9 @@ */ #include <linux/kernel.h> -#include <linux/compiler.h> -#include <linux/init.h> #include <linux/io.h> -#include <linux/of.h> #include <linux/of_device.h> -#include <linux/of_platform.h> #include <linux/of_gpio.h> -#include <linux/gpio.h> -#include <linux/slab.h> #include <linux/module.h> #define GEF_GPIO_DIRECT 0x00 @@ -39,28 +33,26 @@ #define GEF_GPIO_OVERRUN 0x1C #define GEF_GPIO_MODE 0x20 -static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value) +static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { + struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); unsigned int data; - data = ioread32be(reg); - /* value: 0=low; 1=high */ - if (value & 0x1) - data = data | (0x1 << offset); + data = ioread32be(mmchip->regs + GEF_GPIO_OUT); + if (value) + data = data | BIT(offset); else - data = data & ~(0x1 << offset); - - iowrite32be(data, reg); + data = data & ~BIT(offset); + iowrite32be(data, mmchip->regs + GEF_GPIO_OUT); } - static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset) { unsigned int data; struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT); - data = data | (0x1 << offset); + data = data | BIT(offset); iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT); return 0; @@ -71,11 +63,11 @@ static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value) unsigned int data; struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); - /* Set direction before switching to input */ - _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value); + /* Set value before switching to output */ + gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value); data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT); - data = data & ~(0x1 << offset); + data = data & ~BIT(offset); iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT); return 0; @@ -83,116 +75,56 @@ static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value) static int gef_gpio_get(struct gpio_chip *chip, unsigned offset) { - unsigned int data; - int state = 0; struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); - data = ioread32be(mmchip->regs + GEF_GPIO_IN); - state = (int)((data >> offset) & 0x1); - - return state; + return !!(ioread32be(mmchip->regs + GEF_GPIO_IN) & BIT(offset)); } -static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -{ - struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip); - - _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value); -} +static const struct of_device_id gef_gpio_ids[] = { + { + .compatible = "gef,sbc610-gpio", + .data = (void *)19, + }, { + .compatible = "gef,sbc310-gpio", + .data = (void *)6, + }, { + .compatible = "ge,imp3a-gpio", + .data = (void *)16, + }, + { } +}; +MODULE_DEVICE_TABLE(of, gef_gpio_ids); -static int __init gef_gpio_init(void) +static int __init gef_gpio_probe(struct platform_device *pdev) { - struct device_node *np; - int retval; - struct of_mm_gpio_chip *gef_gpio_chip; - - for_each_compatible_node(np, NULL, "gef,sbc610-gpio") { - - pr_debug("%s: Initialising GEF GPIO\n", np->full_name); - - /* Allocate chip structure */ - gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL); - if (!gef_gpio_chip) { - pr_err("%s: Unable to allocate structure\n", - np->full_name); - continue; - } - - /* Setup pointers to chip functions */ - gef_gpio_chip->gc.of_gpio_n_cells = 2; - gef_gpio_chip->gc.ngpio = 19; - gef_gpio_chip->gc.direction_input = gef_gpio_dir_in; - gef_gpio_chip->gc.direction_output = gef_gpio_dir_out; - gef_gpio_chip->gc.get = gef_gpio_get; - gef_gpio_chip->gc.set = gef_gpio_set; - - /* This function adds a memory mapped GPIO chip */ - retval = of_mm_gpiochip_add(np, gef_gpio_chip); - if (retval) { - kfree(gef_gpio_chip); - pr_err("%s: Unable to add GPIO\n", np->full_name); - } - } - - for_each_compatible_node(np, NULL, "gef,sbc310-gpio") { - - pr_debug("%s: Initialising GEF GPIO\n", np->full_name); - - /* Allocate chip structure */ - gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL); - if (!gef_gpio_chip) { - pr_err("%s: Unable to allocate structure\n", - np->full_name); - continue; - } - - /* Setup pointers to chip functions */ - gef_gpio_chip->gc.of_gpio_n_cells = 2; - gef_gpio_chip->gc.ngpio = 6; - gef_gpio_chip->gc.direction_input = gef_gpio_dir_in; - gef_gpio_chip->gc.direction_output = gef_gpio_dir_out; - gef_gpio_chip->gc.get = gef_gpio_get; - gef_gpio_chip->gc.set = gef_gpio_set; - - /* This function adds a memory mapped GPIO chip */ - retval = of_mm_gpiochip_add(np, gef_gpio_chip); - if (retval) { - kfree(gef_gpio_chip); - pr_err("%s: Unable to add GPIO\n", np->full_name); - } - } - - for_each_compatible_node(np, NULL, "ge,imp3a-gpio") { - - pr_debug("%s: Initialising GE GPIO\n", np->full_name); - - /* Allocate chip structure */ - gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL); - if (!gef_gpio_chip) { - pr_err("%s: Unable to allocate structure\n", - np->full_name); - continue; - } - - /* Setup pointers to chip functions */ - gef_gpio_chip->gc.of_gpio_n_cells = 2; - gef_gpio_chip->gc.ngpio = 16; - gef_gpio_chip->gc.direction_input = gef_gpio_dir_in; - gef_gpio_chip->gc.direction_output = gef_gpio_dir_out; - gef_gpio_chip->gc.get = gef_gpio_get; - gef_gpio_chip->gc.set = gef_gpio_set; - - /* This function adds a memory mapped GPIO chip */ - retval = of_mm_gpiochip_add(np, gef_gpio_chip); - if (retval) { - kfree(gef_gpio_chip); - pr_err("%s: Unable to add GPIO\n", np->full_name); - } - } + const struct of_device_id *of_id = + of_match_device(gef_gpio_ids, &pdev->dev); + struct of_mm_gpio_chip *mmchip; + + mmchip = devm_kzalloc(&pdev->dev, sizeof(*mmchip), GFP_KERNEL); + if (!mmchip) + return -ENOMEM; + + /* Setup pointers to chip functions */ + mmchip->gc.ngpio = (u16)(uintptr_t)of_id->data; + mmchip->gc.of_gpio_n_cells = 2; + mmchip->gc.direction_input = gef_gpio_dir_in; + mmchip->gc.direction_output = gef_gpio_dir_out; + mmchip->gc.get = gef_gpio_get; + mmchip->gc.set = gef_gpio_set; + + /* This function adds a memory mapped GPIO chip */ + return of_mm_gpiochip_add(pdev->dev.of_node, mmchip); +}; - return 0; +static struct platform_driver gef_gpio_driver = { + .driver = { + .name = "gef-gpio", + .owner = THIS_MODULE, + .of_match_table = gef_gpio_ids, + }, }; -arch_initcall(gef_gpio_init); +module_platform_driver_probe(gef_gpio_driver, gef_gpio_probe); MODULE_DESCRIPTION("GE I/O FPGA GPIO driver"); MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com"); diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c index b5dff9e742f..fea8c82bb8f 100644 --- a/drivers/gpio/gpio-generic.c +++ b/drivers/gpio/gpio-generic.c @@ -388,6 +388,14 @@ static int bgpio_setup_direction(struct bgpio_chip *bgc, return 0; } +static int bgpio_request(struct gpio_chip *chip, unsigned gpio_pin) +{ + if (gpio_pin < chip->ngpio) + return 0; + + return -EINVAL; +} + int bgpio_remove(struct bgpio_chip *bgc) { return gpiochip_remove(&bgc->gc); @@ -413,6 +421,7 @@ int bgpio_init(struct bgpio_chip *bgc, struct device *dev, bgc->gc.label = dev_name(dev); bgc->gc.base = -1; bgc->gc.ngpio = bgc->bits; + bgc->gc.request = bgpio_request; ret = bgpio_setup_io(bgc, dat, set, clr); if (ret) diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index 84d2478ec29..3c3f515b791 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -481,7 +481,7 @@ out: return ret; } -static struct of_device_id grgpio_match[] = { +static const struct of_device_id grgpio_match[] = { {.name = "GAISLER_GPIO"}, {.name = "01_01a"}, {}, diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c index e73c6755a5e..70304220a47 100644 --- a/drivers/gpio/gpio-ich.c +++ b/drivers/gpio/gpio-ich.c @@ -305,6 +305,8 @@ static struct ichx_desc ich6_desc = { .ngpio = 50, .have_blink = true, + .regs = ichx_regs, + .reglen = ichx_reglen, }; /* Intel 3100 */ @@ -324,6 +326,8 @@ static struct ichx_desc i3100_desc = { .uses_gpe0 = true, .ngpio = 50, + .regs = ichx_regs, + .reglen = ichx_reglen, }; /* ICH7 and ICH8-based */ diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c index 2ecd3a09c74..42852eaaf02 100644 --- a/drivers/gpio/gpio-janz-ttl.c +++ b/drivers/gpio/gpio-janz-ttl.c @@ -152,34 +152,21 @@ static int ttl_probe(struct platform_device *pdev) pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(dev, "no platform data\n"); - ret = -ENXIO; - goto out_return; + return -ENXIO; } - mod = kzalloc(sizeof(*mod), GFP_KERNEL); - if (!mod) { - dev_err(dev, "unable to allocate private data\n"); - ret = -ENOMEM; - goto out_return; - } + mod = devm_kzalloc(dev, sizeof(*mod), GFP_KERNEL); + if (!mod) + return -ENOMEM; platform_set_drvdata(pdev, mod); spin_lock_init(&mod->lock); /* get access to the MODULbus registers for this module */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "MODULbus registers not found\n"); - ret = -ENODEV; - goto out_free_mod; - } - - mod->regs = ioremap(res->start, resource_size(res)); - if (!mod->regs) { - dev_err(dev, "MODULbus registers not ioremap\n"); - ret = -ENOMEM; - goto out_free_mod; - } + mod->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(mod->regs)) + return PTR_ERR(mod->regs); ttl_setup_device(mod); @@ -198,17 +185,10 @@ static int ttl_probe(struct platform_device *pdev) ret = gpiochip_add(gpio); if (ret) { dev_err(dev, "unable to add GPIO chip\n"); - goto out_iounmap_regs; + return ret; } return 0; - -out_iounmap_regs: - iounmap(mod->regs); -out_free_mod: - kfree(mod); -out_return: - return ret; } static int ttl_remove(struct platform_device *pdev) @@ -223,8 +203,6 @@ static int ttl_remove(struct platform_device *pdev) return ret; } - iounmap(mod->regs); - kfree(mod); return 0; } diff --git a/drivers/gpio/gpio-kempld.c b/drivers/gpio/gpio-kempld.c index c6d88173f5a..1e5e51987d3 100644 --- a/drivers/gpio/gpio-kempld.c +++ b/drivers/gpio/gpio-kempld.c @@ -24,7 +24,7 @@ #include <linux/mfd/kempld.h> #define KEMPLD_GPIO_MAX_NUM 16 -#define KEMPLD_GPIO_MASK(x) (1 << ((x) % 8)) +#define KEMPLD_GPIO_MASK(x) (BIT((x) % 8)) #define KEMPLD_GPIO_DIR_NUM(x) (0x40 + (x) / 8) #define KEMPLD_GPIO_LVL_NUM(x) (0x42 + (x) / 8) #define KEMPLD_GPIO_EVT_LVL_EDGE 0x46 @@ -216,4 +216,4 @@ module_platform_driver(kempld_gpio_driver); MODULE_DESCRIPTION("KEM PLD GPIO Driver"); MODULE_AUTHOR("Michael Brunner <michael.brunner@kontron.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:gpio-kempld"); +MODULE_ALIAS("platform:kempld-gpio"); diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c index 9a82a9074a2..2bea89b7250 100644 --- a/drivers/gpio/gpio-lynxpoint.c +++ b/drivers/gpio/gpio-lynxpoint.c @@ -375,10 +375,8 @@ static int lp_gpio_probe(struct platform_device *pdev) int ret = -ENODEV; lg = devm_kzalloc(dev, sizeof(struct lp_gpio), GFP_KERNEL); - if (!lg) { - dev_err(dev, "can't allocate lp_gpio chip data\n"); + if (!lg) return -ENOMEM; - } lg->pdev = pdev; platform_set_drvdata(pdev, lg); diff --git a/drivers/gpio/gpio-max730x.c b/drivers/gpio/gpio-max730x.c index 8672755f95c..0814584fcdc 100644 --- a/drivers/gpio/gpio-max730x.c +++ b/drivers/gpio/gpio-max730x.c @@ -237,10 +237,9 @@ int __max730x_remove(struct device *dev) ts->write(dev, 0x04, 0x00); ret = gpiochip_remove(&ts->chip); - if (!ret) { + if (!ret) mutex_destroy(&ts->lock); - kfree(ts); - } else + else dev_err(dev, "Failed to remove GPIO controller: %d\n", ret); return ret; diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index 99a68310e7c..fe7c0e211f9 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c @@ -714,7 +714,7 @@ fail: #ifdef CONFIG_OF #ifdef CONFIG_SPI_MASTER -static struct of_device_id mcp23s08_spi_of_match[] = { +static const struct of_device_id mcp23s08_spi_of_match[] = { { .compatible = "microchip,mcp23s08", .data = (void *) MCP_TYPE_S08, @@ -738,7 +738,7 @@ MODULE_DEVICE_TABLE(of, mcp23s08_spi_of_match); #endif #if IS_ENABLED(CONFIG_I2C) -static struct of_device_id mcp23s08_i2c_of_match[] = { +static const struct of_device_id mcp23s08_i2c_of_match[] = { { .compatible = "microchip,mcp23008", .data = (void *) MCP_TYPE_008, @@ -867,7 +867,7 @@ static int mcp23s08_probe(struct spi_device *spi) { struct mcp23s08_platform_data *pdata; unsigned addr; - unsigned chips = 0; + int chips = 0; struct mcp23s08_driver_data *data; int status, type; unsigned base = -1, @@ -895,8 +895,13 @@ static int mcp23s08_probe(struct spi_device *spi) return -ENODEV; } - for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) + for (addr = 0; addr < ARRAY_SIZE(pdata->chip); addr++) { pullups[addr] = 0; + if (spi_present_mask & (1 << addr)) + chips++; + } + if (!chips) + return -ENODEV; } else { type = spi_get_device_id(spi)->driver_data; pdata = dev_get_platdata(&spi->dev); @@ -919,12 +924,12 @@ static int mcp23s08_probe(struct spi_device *spi) pullups[addr] = pdata->chip[addr].pullups; } - if (!chips) - return -ENODEV; - base = pdata->base; } + if (!chips) + return -ENODEV; + data = kzalloc(sizeof(*data) + chips * sizeof(struct mcp23s08), GFP_KERNEL); if (!data) @@ -935,6 +940,10 @@ static int mcp23s08_probe(struct spi_device *spi) if (!(spi_present_mask & (1 << addr))) continue; chips--; + if (chips < 0) { + dev_err(&spi->dev, "FATAL: invalid negative chip id\n"); + goto fail; + } data->mcp[addr] = &data->chip[chips]; status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi, 0x40 | (addr << 1), type, base, diff --git a/drivers/gpio/gpio-moxart.c b/drivers/gpio/gpio-moxart.c index ccd45704e5f..4661e181be0 100644 --- a/drivers/gpio/gpio-moxart.c +++ b/drivers/gpio/gpio-moxart.c @@ -113,10 +113,8 @@ static int moxart_gpio_probe(struct platform_device *pdev) int ret; mgc = devm_kzalloc(dev, sizeof(*mgc), GFP_KERNEL); - if (!mgc) { - dev_err(dev, "can't allocate GPIO chip container\n"); + if (!mgc) return -ENOMEM; - } mgc->gpio = moxart_template_chip; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index d4250942239..418e3865036 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -535,7 +535,7 @@ static void mvebu_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) #define mvebu_gpio_dbg_show NULL #endif -static struct of_device_id mvebu_gpio_of_match[] = { +static const struct of_device_id mvebu_gpio_of_match[] = { { .compatible = "marvell,orion-gpio", .data = (void *) MVEBU_GPIO_SOC_VARIANT_ORION, @@ -574,10 +574,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev) soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION; mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL); - if (!mvchip) { - dev_err(&pdev->dev, "Cannot allocate memory\n"); + if (!mvchip) return -ENOMEM; - } if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) { dev_err(&pdev->dev, "Missing ngpios OF property\n"); @@ -738,9 +736,4 @@ static struct platform_driver mvebu_gpio_driver = { }, .probe = mvebu_gpio_probe, }; - -static int __init mvebu_gpio_init(void) -{ - return platform_driver_register(&mvebu_gpio_driver); -} -postcore_initcall(mvebu_gpio_init); +module_platform_driver(mvebu_gpio_driver); diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 19b886c21b1..00f29aa1fb9 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -24,9 +24,9 @@ #include <linux/pm.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> #include <linux/gpio.h> +#include <linux/bitops.h> #include <linux/platform_data/gpio-omap.h> #define OFF_MODE 1 @@ -52,7 +52,6 @@ struct gpio_bank { struct list_head node; void __iomem *base; u16 irq; - struct irq_domain *domain; u32 non_wakeup_gpios; u32 enabled_non_wakeup_gpios; struct gpio_regs context; @@ -84,22 +83,21 @@ struct gpio_bank { }; #define GPIO_INDEX(bank, gpio) (gpio % bank->width) -#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) +#define GPIO_BIT(bank, gpio) (BIT(GPIO_INDEX(bank, gpio))) #define GPIO_MOD_CTRL_BIT BIT(0) #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) -#define LINE_USED(line, offset) (line & (1 << offset)) +#define LINE_USED(line, offset) (line & (BIT(offset))) static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) { return bank->chip.base + gpio_irq; } -static int omap_gpio_to_irq(struct gpio_chip *chip, unsigned offset) +static inline struct gpio_bank *_irq_data_get_bank(struct irq_data *d) { - struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); - - return irq_find_mapping(bank->domain, offset); + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + return container_of(chip, struct gpio_bank, chip); } static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input) @@ -110,9 +108,9 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input) reg += bank->regs->direction; l = readl_relaxed(reg); if (is_input) - l |= 1 << gpio; + l |= BIT(gpio); else - l &= ~(1 << gpio); + l &= ~(BIT(gpio)); writel_relaxed(l, reg); bank->context.oe = l; } @@ -155,14 +153,14 @@ static int _get_gpio_datain(struct gpio_bank *bank, int offset) { void __iomem *reg = bank->base + bank->regs->datain; - return (readl_relaxed(reg) & (1 << offset)) != 0; + return (readl_relaxed(reg) & (BIT(offset))) != 0; } static int _get_gpio_dataout(struct gpio_bank *bank, int offset) { void __iomem *reg = bank->base + bank->regs->dataout; - return (readl_relaxed(reg) & (1 << offset)) != 0; + return (readl_relaxed(reg) & (BIT(offset))) != 0; } static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) @@ -180,7 +178,7 @@ static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) static inline void _gpio_dbck_enable(struct gpio_bank *bank) { if (bank->dbck_enable_mask && !bank->dbck_enabled) { - clk_enable(bank->dbck); + clk_prepare_enable(bank->dbck); bank->dbck_enabled = true; writel_relaxed(bank->dbck_enable_mask, @@ -198,7 +196,7 @@ static inline void _gpio_dbck_disable(struct gpio_bank *bank) */ writel_relaxed(0, bank->base + bank->regs->debounce_en); - clk_disable(bank->dbck); + clk_disable_unprepare(bank->dbck); bank->dbck_enabled = false; } } @@ -231,7 +229,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, l = GPIO_BIT(bank, gpio); - clk_enable(bank->dbck); + clk_prepare_enable(bank->dbck); reg = bank->base + bank->regs->debounce; writel_relaxed(debounce, reg); @@ -245,7 +243,7 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, bank->dbck_enable_mask = val; writel_relaxed(val, reg); - clk_disable(bank->dbck); + clk_disable_unprepare(bank->dbck); /* * Enable debounce clock per module. * This call is mandatory because in omap_gpio_request() when @@ -290,7 +288,7 @@ static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio) bank->context.debounce = 0; writel_relaxed(bank->context.debounce, bank->base + bank->regs->debounce); - clk_disable(bank->dbck); + clk_disable_unprepare(bank->dbck); bank->dbck_enabled = false; } } @@ -299,7 +297,7 @@ static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio, unsigned trigger) { void __iomem *base = bank->base; - u32 gpio_bit = 1 << gpio; + u32 gpio_bit = BIT(gpio); _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit, trigger & IRQ_TYPE_LEVEL_LOW); @@ -368,9 +366,9 @@ static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) l = readl_relaxed(reg); if ((l >> gpio) & 1) - l &= ~(1 << gpio); + l &= ~(BIT(gpio)); else - l |= 1 << gpio; + l |= BIT(gpio); writel_relaxed(l, reg); } @@ -392,11 +390,11 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, l = readl_relaxed(reg); if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) - bank->toggle_mask |= 1 << gpio; + bank->toggle_mask |= BIT(gpio); if (trigger & IRQ_TYPE_EDGE_RISING) - l |= 1 << gpio; + l |= BIT(gpio); else if (trigger & IRQ_TYPE_EDGE_FALLING) - l &= ~(1 << gpio); + l &= ~(BIT(gpio)); else return -EINVAL; @@ -413,10 +411,10 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, if (trigger & IRQ_TYPE_EDGE_RISING) l |= 2 << (gpio << 1); if (trigger & IRQ_TYPE_EDGE_FALLING) - l |= 1 << (gpio << 1); + l |= BIT(gpio << 1); /* Enable wake-up during idle for dynamic tick */ - _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger); + _gpio_rmw(base, bank->regs->wkup_en, BIT(gpio), trigger); bank->context.wake_en = readl_relaxed(bank->base + bank->regs->wkup_en); writel_relaxed(l, reg); @@ -430,7 +428,7 @@ static void _enable_gpio_module(struct gpio_bank *bank, unsigned offset) void __iomem *reg = bank->base + bank->regs->pinctrl; /* Claim the pin for MPU */ - writel_relaxed(readl_relaxed(reg) | (1 << offset), reg); + writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); } if (bank->regs->ctrl && !BANK_USED(bank)) { @@ -453,7 +451,7 @@ static void _disable_gpio_module(struct gpio_bank *bank, unsigned offset) !LINE_USED(bank->mod_usage, offset) && !LINE_USED(bank->irq_usage, offset)) { /* Disable wake-up during idle for dynamic tick */ - _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0); + _gpio_rmw(base, bank->regs->wkup_en, BIT(offset), 0); bank->context.wake_en = readl_relaxed(bank->base + bank->regs->wkup_en); } @@ -479,7 +477,7 @@ static int gpio_is_input(struct gpio_bank *bank, int mask) static int gpio_irq_type(struct irq_data *d, unsigned type) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned gpio = 0; int retval; unsigned long flags; @@ -509,20 +507,12 @@ static int gpio_irq_type(struct irq_data *d, unsigned type) if (!LINE_USED(bank->mod_usage, offset)) { _enable_gpio_module(bank, offset); _set_gpio_direction(bank, offset, 1); - } else if (!gpio_is_input(bank, 1 << offset)) { + } else if (!gpio_is_input(bank, BIT(offset))) { spin_unlock_irqrestore(&bank->lock, flags); return -EINVAL; } - retval = gpio_lock_as_irq(&bank->chip, offset); - if (retval) { - dev_err(bank->dev, "unable to lock offset %d for IRQ\n", - offset); - spin_unlock_irqrestore(&bank->lock, flags); - return retval; - } - - bank->irq_usage |= 1 << GPIO_INDEX(bank, gpio); + bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio)); spin_unlock_irqrestore(&bank->lock, flags); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) @@ -559,7 +549,7 @@ static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank) { void __iomem *reg = bank->base; u32 l; - u32 mask = (1 << bank->width) - 1; + u32 mask = (BIT(bank->width)) - 1; reg += bank->regs->irqenable; l = readl_relaxed(reg); @@ -664,7 +654,7 @@ static void _reset_gpio(struct gpio_bank *bank, int gpio) /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ static int gpio_wake_enable(struct irq_data *d, unsigned int enable) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned int gpio = irq_to_gpio(bank, d->hwirq); return _set_gpio_wakeup(bank, gpio, enable); @@ -691,7 +681,7 @@ static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); _enable_gpio_module(bank, offset); } - bank->mod_usage |= 1 << offset; + bank->mod_usage |= BIT(offset); spin_unlock_irqrestore(&bank->lock, flags); return 0; @@ -703,7 +693,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) unsigned long flags; spin_lock_irqsave(&bank->lock, flags); - bank->mod_usage &= ~(1 << offset); + bank->mod_usage &= ~(BIT(offset)); _disable_gpio_module(bank, offset); _reset_gpio(bank, bank->chip.base + offset); spin_unlock_irqrestore(&bank->lock, flags); @@ -732,11 +722,12 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) unsigned int bit; struct gpio_bank *bank; int unmasked = 0; - struct irq_chip *chip = irq_desc_get_chip(desc); + struct irq_chip *irqchip = irq_desc_get_chip(desc); + struct gpio_chip *chip = irq_get_handler_data(irq); - chained_irq_enter(chip, desc); + chained_irq_enter(irqchip, desc); - bank = irq_get_handler_data(irq); + bank = container_of(chip, struct gpio_bank, chip); isr_reg = bank->base + bank->regs->irqstatus; pm_runtime_get_sync(bank->dev); @@ -764,7 +755,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) configured, we could unmask GPIO bank interrupt immediately */ if (!level_mask && !unmasked) { unmasked = 1; - chained_irq_exit(chip, desc); + chained_irq_exit(irqchip, desc); } if (!isr) @@ -772,7 +763,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) while (isr) { bit = __ffs(isr); - isr &= ~(1 << bit); + isr &= ~(BIT(bit)); /* * Some chips can't respond to both rising and falling @@ -781,10 +772,11 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) * to respond to the IRQ for the opposite direction. * This will be indicated in the bank toggle_mask. */ - if (bank->toggle_mask & (1 << bit)) + if (bank->toggle_mask & (BIT(bit))) _toggle_gpio_edge_triggering(bank, bit); - generic_handle_irq(irq_find_mapping(bank->domain, bit)); + generic_handle_irq(irq_find_mapping(bank->chip.irqdomain, + bit)); } } /* if bank has any level sensitive GPIO pin interrupt @@ -793,20 +785,20 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) interrupt */ exit: if (!unmasked) - chained_irq_exit(chip, desc); + chained_irq_exit(irqchip, desc); pm_runtime_put(bank->dev); } static void gpio_irq_shutdown(struct irq_data *d) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned int gpio = irq_to_gpio(bank, d->hwirq); unsigned long flags; unsigned offset = GPIO_INDEX(bank, gpio); spin_lock_irqsave(&bank->lock, flags); gpio_unlock_as_irq(&bank->chip, offset); - bank->irq_usage &= ~(1 << offset); + bank->irq_usage &= ~(BIT(offset)); _disable_gpio_module(bank, offset); _reset_gpio(bank, gpio); spin_unlock_irqrestore(&bank->lock, flags); @@ -821,7 +813,7 @@ static void gpio_irq_shutdown(struct irq_data *d) static void gpio_ack_irq(struct irq_data *d) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned int gpio = irq_to_gpio(bank, d->hwirq); _clear_gpio_irqstatus(bank, gpio); @@ -829,7 +821,7 @@ static void gpio_ack_irq(struct irq_data *d) static void gpio_mask_irq(struct irq_data *d) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned int gpio = irq_to_gpio(bank, d->hwirq); unsigned long flags; @@ -841,7 +833,7 @@ static void gpio_mask_irq(struct irq_data *d) static void gpio_unmask_irq(struct irq_data *d) { - struct gpio_bank *bank = irq_data_get_irq_chip_data(d); + struct gpio_bank *bank = _irq_data_get_bank(d); unsigned int gpio = irq_to_gpio(bank, d->hwirq); unsigned int irq_mask = GPIO_BIT(bank, gpio); u32 trigger = irqd_get_trigger_type(d); @@ -936,6 +928,21 @@ static inline void mpuio_init(struct gpio_bank *bank) /*---------------------------------------------------------------------*/ +static int gpio_get_direction(struct gpio_chip *chip, unsigned offset) +{ + struct gpio_bank *bank; + unsigned long flags; + void __iomem *reg; + int dir; + + bank = container_of(chip, struct gpio_bank, chip); + reg = bank->base + bank->regs->direction; + spin_lock_irqsave(&bank->lock, flags); + dir = !!(readl_relaxed(reg) & BIT(offset)); + spin_unlock_irqrestore(&bank->lock, flags); + return dir; +} + static int gpio_input(struct gpio_chip *chip, unsigned offset) { struct gpio_bank *bank; @@ -954,7 +961,7 @@ static int gpio_get(struct gpio_chip *chip, unsigned offset) u32 mask; bank = container_of(chip, struct gpio_bank, chip); - mask = (1 << offset); + mask = (BIT(offset)); if (gpio_is_input(bank, mask)) return _get_gpio_datain(bank, offset); @@ -1081,10 +1088,12 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start, IRQ_NOREQUEST | IRQ_NOPROBE, 0); } -static void omap_gpio_chip_init(struct gpio_bank *bank) +static int omap_gpio_chip_init(struct gpio_bank *bank) { int j; static int gpio; + int irq_base = 0; + int ret; /* * REVISIT eventually switch from OMAP-specific gpio structs @@ -1092,12 +1101,12 @@ static void omap_gpio_chip_init(struct gpio_bank *bank) */ bank->chip.request = omap_gpio_request; bank->chip.free = omap_gpio_free; + bank->chip.get_direction = gpio_get_direction; bank->chip.direction_input = gpio_input; bank->chip.get = gpio_get; bank->chip.direction_output = gpio_output; bank->chip.set_debounce = gpio_debounce; bank->chip.set = gpio_set; - bank->chip.to_irq = omap_gpio_to_irq; if (bank->is_mpuio) { bank->chip.label = "mpuio"; if (bank->regs->wkup_en) @@ -1110,22 +1119,48 @@ static void omap_gpio_chip_init(struct gpio_bank *bank) } bank->chip.ngpio = bank->width; - gpiochip_add(&bank->chip); + ret = gpiochip_add(&bank->chip); + if (ret) { + dev_err(bank->dev, "Could not register gpio chip %d\n", ret); + return ret; + } + +#ifdef CONFIG_ARCH_OMAP1 + /* + * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop + * irq_alloc_descs() since a base IRQ offset will no longer be needed. + */ + irq_base = irq_alloc_descs(-1, 0, bank->width, 0); + if (irq_base < 0) { + dev_err(bank->dev, "Couldn't allocate IRQ numbers\n"); + return -ENODEV; + } +#endif + + ret = gpiochip_irqchip_add(&bank->chip, &gpio_irq_chip, + irq_base, gpio_irq_handler, + IRQ_TYPE_NONE); + + if (ret) { + dev_err(bank->dev, "Couldn't add irqchip to gpiochip %d\n", ret); + ret = gpiochip_remove(&bank->chip); + return -ENODEV; + } + + gpiochip_set_chained_irqchip(&bank->chip, &gpio_irq_chip, + bank->irq, gpio_irq_handler); for (j = 0; j < bank->width; j++) { - int irq = irq_create_mapping(bank->domain, j); + int irq = irq_find_mapping(bank->chip.irqdomain, j); irq_set_lockdep_class(irq, &gpio_lock_class); - irq_set_chip_data(irq, bank); if (bank->is_mpuio) { omap_mpuio_alloc_gc(bank, irq, bank->width); - } else { - irq_set_chip_and_handler(irq, &gpio_irq_chip, - handle_simple_irq); - set_irq_flags(irq, IRQF_VALID); + irq_set_chip_and_handler(irq, NULL, NULL); + set_irq_flags(irq, 0); } } - irq_set_chained_handler(bank->irq, gpio_irq_handler); - irq_set_handler_data(bank->irq, bank); + + return 0; } static const struct of_device_id omap_gpio_match[]; @@ -1138,9 +1173,7 @@ static int omap_gpio_probe(struct platform_device *pdev) const struct omap_gpio_platform_data *pdata; struct resource *res; struct gpio_bank *bank; -#ifdef CONFIG_ARCH_OMAP1 - int irq_base; -#endif + int ret; match = of_match_device(of_match_ptr(omap_gpio_match), dev); @@ -1162,6 +1195,7 @@ static int omap_gpio_probe(struct platform_device *pdev) bank->irq = res->start; bank->dev = dev; + bank->chip.dev = dev; bank->dbck_flag = pdata->dbck_flag; bank->stride = pdata->bank_stride; bank->width = pdata->bank_width; @@ -1182,29 +1216,6 @@ static int omap_gpio_probe(struct platform_device *pdev) pdata->get_context_loss_count; } -#ifdef CONFIG_ARCH_OMAP1 - /* - * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop - * irq_alloc_descs() and irq_domain_add_legacy() and just use a - * linear IRQ domain mapping for all OMAP platforms. - */ - irq_base = irq_alloc_descs(-1, 0, bank->width, 0); - if (irq_base < 0) { - dev_err(dev, "Couldn't allocate IRQ numbers\n"); - return -ENODEV; - } - - bank->domain = irq_domain_add_legacy(node, bank->width, irq_base, - 0, &irq_domain_simple_ops, NULL); -#else - bank->domain = irq_domain_add_linear(node, bank->width, - &irq_domain_simple_ops, NULL); -#endif - if (!bank->domain) { - dev_err(dev, "Couldn't register an IRQ domain\n"); - return -ENODEV; - } - if (bank->regs->set_dataout && bank->regs->clr_dataout) bank->set_dataout = _set_gpio_dataout_reg; else @@ -1216,7 +1227,7 @@ static int omap_gpio_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); bank->base = devm_ioremap_resource(dev, res); if (IS_ERR(bank->base)) { - irq_domain_remove(bank->domain); + irq_domain_remove(bank->chip.irqdomain); return PTR_ERR(bank->base); } @@ -1230,7 +1241,11 @@ static int omap_gpio_probe(struct platform_device *pdev) mpuio_init(bank); omap_gpio_mod_init(bank); - omap_gpio_chip_init(bank); + + ret = omap_gpio_chip_init(bank); + if (ret) + return ret; + omap_gpio_show_rev(bank); pm_runtime_put(bank->dev); diff --git a/drivers/gpio/gpio-palmas.c b/drivers/gpio/gpio-palmas.c index da9d33252e5..86bdbe36206 100644 --- a/drivers/gpio/gpio-palmas.c +++ b/drivers/gpio/gpio-palmas.c @@ -148,7 +148,7 @@ static const struct palmas_device_data tps80036_dev_data = { .ngpio = 16, }; -static struct of_device_id of_palmas_gpio_match[] = { +static const struct of_device_id of_palmas_gpio_match[] = { { .compatible = "ti,palmas-gpio", .data = &palmas_dev_data,}, { .compatible = "ti,tps65913-gpio", .data = &palmas_dev_data,}, { .compatible = "ti,tps65914-gpio", .data = &palmas_dev_data,}, @@ -173,10 +173,8 @@ static int palmas_gpio_probe(struct platform_device *pdev) palmas_gpio = devm_kzalloc(&pdev->dev, sizeof(*palmas_gpio), GFP_KERNEL); - if (!palmas_gpio) { - dev_err(&pdev->dev, "Could not allocate palmas_gpio\n"); + if (!palmas_gpio) return -ENOMEM; - } palmas_gpio->palmas = palmas; palmas_gpio->gpio_chip.owner = THIS_MODULE; diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index d550d8e5870..e721a37c347 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -15,8 +15,6 @@ #include <linux/init.h> #include <linux/gpio.h> #include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/irqdomain.h> #include <linux/i2c.h> #include <linux/platform_data/pca953x.h> #include <linux/slab.h> @@ -91,7 +89,6 @@ struct pca953x_chip { u8 irq_stat[MAX_BANK]; u8 irq_trig_raise[MAX_BANK]; u8 irq_trig_fall[MAX_BANK]; - struct irq_domain *domain; #endif struct i2c_client *client; @@ -100,6 +97,11 @@ struct pca953x_chip { int chip_type; }; +static inline struct pca953x_chip *to_pca(struct gpio_chip *gc) +{ + return container_of(gc, struct pca953x_chip, gpio_chip); +} + static int pca953x_read_single(struct pca953x_chip *chip, int reg, u32 *val, int off) { @@ -202,12 +204,10 @@ static int pca953x_read_regs(struct pca953x_chip *chip, int reg, u8 *val) static int pca953x_gpio_direction_input(struct gpio_chip *gc, unsigned off) { - struct pca953x_chip *chip; + struct pca953x_chip *chip = to_pca(gc); u8 reg_val; int ret, offset = 0; - chip = container_of(gc, struct pca953x_chip, gpio_chip); - mutex_lock(&chip->i2c_lock); reg_val = chip->reg_direction[off / BANK_SZ] | (1u << (off % BANK_SZ)); @@ -233,12 +233,10 @@ exit: static int pca953x_gpio_direction_output(struct gpio_chip *gc, unsigned off, int val) { - struct pca953x_chip *chip; + struct pca953x_chip *chip = to_pca(gc); u8 reg_val; int ret, offset = 0; - chip = container_of(gc, struct pca953x_chip, gpio_chip); - mutex_lock(&chip->i2c_lock); /* set output level */ if (val) @@ -285,12 +283,10 @@ exit: static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) { - struct pca953x_chip *chip; + struct pca953x_chip *chip = to_pca(gc); u32 reg_val; int ret, offset = 0; - chip = container_of(gc, struct pca953x_chip, gpio_chip); - mutex_lock(&chip->i2c_lock); switch (chip->chip_type) { case PCA953X_TYPE: @@ -315,12 +311,10 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off) static void pca953x_gpio_set_value(struct gpio_chip *gc, unsigned off, int val) { - struct pca953x_chip *chip; + struct pca953x_chip *chip = to_pca(gc); u8 reg_val; int ret, offset = 0; - chip = container_of(gc, struct pca953x_chip, gpio_chip); - mutex_lock(&chip->i2c_lock); if (val) reg_val = chip->reg_output[off / BANK_SZ] @@ -367,38 +361,34 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios) } #ifdef CONFIG_GPIO_PCA953X_IRQ -static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off) -{ - struct pca953x_chip *chip; - - chip = container_of(gc, struct pca953x_chip, gpio_chip); - return irq_create_mapping(chip->domain, off); -} - static void pca953x_irq_mask(struct irq_data *d) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = to_pca(gc); chip->irq_mask[d->hwirq / BANK_SZ] &= ~(1 << (d->hwirq % BANK_SZ)); } static void pca953x_irq_unmask(struct irq_data *d) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = to_pca(gc); chip->irq_mask[d->hwirq / BANK_SZ] |= 1 << (d->hwirq % BANK_SZ); } static void pca953x_irq_bus_lock(struct irq_data *d) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = to_pca(gc); mutex_lock(&chip->irq_lock); } static void pca953x_irq_bus_sync_unlock(struct irq_data *d) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = to_pca(gc); u8 new_irqs; int level, i; @@ -420,7 +410,8 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d) static int pca953x_irq_set_type(struct irq_data *d, unsigned int type) { - struct pca953x_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct pca953x_chip *chip = to_pca(gc); int bank_nb = d->hwirq / BANK_SZ; u8 mask = 1 << (d->hwirq % BANK_SZ); @@ -503,44 +494,25 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid) struct pca953x_chip *chip = devid; u8 pending[MAX_BANK]; u8 level; + unsigned nhandled = 0; int i; if (!pca953x_irq_pending(chip, pending)) - return IRQ_HANDLED; + return IRQ_NONE; for (i = 0; i < NBANK(chip); i++) { while (pending[i]) { level = __ffs(pending[i]); - handle_nested_irq(irq_find_mapping(chip->domain, + handle_nested_irq(irq_find_mapping(chip->gpio_chip.irqdomain, level + (BANK_SZ * i))); pending[i] &= ~(1 << level); + nhandled++; } } - return IRQ_HANDLED; + return (nhandled > 0) ? IRQ_HANDLED : IRQ_NONE; } -static int pca953x_gpio_irq_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) -{ - irq_clear_status_flags(irq, IRQ_NOREQUEST); - irq_set_chip_data(irq, d->host_data); - irq_set_chip(irq, &pca953x_irq_chip); - irq_set_nested_thread(irq, true); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else - irq_set_noprobe(irq); -#endif - - return 0; -} - -static const struct irq_domain_ops pca953x_irq_simple_ops = { - .map = pca953x_gpio_irq_map, - .xlate = irq_domain_xlate_twocell, -}; - static int pca953x_irq_setup(struct pca953x_chip *chip, const struct i2c_device_id *id, int irq_base) @@ -572,19 +544,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, chip->irq_stat[i] &= chip->reg_direction[i]; mutex_init(&chip->irq_lock); - chip->domain = irq_domain_add_simple(client->dev.of_node, - chip->gpio_chip.ngpio, - irq_base, - &pca953x_irq_simple_ops, - chip); - if (!chip->domain) - return -ENODEV; - ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, pca953x_irq_handler, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, + IRQF_TRIGGER_LOW | IRQF_ONESHOT | + IRQF_SHARED, dev_name(&client->dev), chip); if (ret) { dev_err(&client->dev, "failed to request irq %d\n", @@ -592,7 +557,16 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, return ret; } - chip->gpio_chip.to_irq = pca953x_gpio_to_irq; + ret = gpiochip_irqchip_add(&chip->gpio_chip, + &pca953x_irq_chip, + irq_base, + handle_simple_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_err(&client->dev, + "could not connect irqchip to gpiochip\n"); + return ret; + } } return 0; @@ -756,11 +730,11 @@ static int pca953x_probe(struct i2c_client *client, if (ret) return ret; - ret = pca953x_irq_setup(chip, id, irq_base); + ret = gpiochip_add(&chip->gpio_chip); if (ret) return ret; - ret = gpiochip_add(&chip->gpio_chip); + ret = pca953x_irq_setup(chip, id, irq_base); if (ret) return ret; diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index 82735822bc9..27b46751ea7 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c @@ -262,7 +262,7 @@ static int pcf857x_irq_domain_init(struct pcf857x *gpio, /* enable real irq */ status = devm_request_threaded_irq(&client->dev, client->irq, NULL, pcf857x_irq, IRQF_ONESHOT | - IRQF_TRIGGER_FALLING, + IRQF_TRIGGER_FALLING | IRQF_SHARED, dev_name(&client->dev), gpio); if (status) @@ -319,7 +319,7 @@ static int pcf857x_probe(struct i2c_client *client, status = pcf857x_irq_domain_init(gpio, client); if (status < 0) { dev_err(&client->dev, "irq_domain init failed\n"); - goto fail; + goto fail_irq_domain; } } @@ -414,12 +414,13 @@ static int pcf857x_probe(struct i2c_client *client, return 0; fail: - dev_dbg(&client->dev, "probe error %d for '%s'\n", - status, client->name); - if (client->irq) pcf857x_irq_domain_cleanup(gpio); +fail_irq_domain: + dev_dbg(&client->dev, "probe error %d for '%s'\n", + status, client->name); + return status; } diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index 83a15639747..d6eac9b17db 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c @@ -20,6 +20,7 @@ #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/slab.h> #define PCH_EDGE_FALLING 0 #define PCH_EDGE_RISING BIT(0) diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index b0f475243ce..84b49cfb81a 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -17,7 +17,6 @@ #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/bitops.h> -#include <linux/workqueue.h> #include <linux/gpio.h> #include <linux/device.h> #include <linux/amba/bus.h> @@ -88,7 +87,7 @@ static int pl061_direction_input(struct gpio_chip *gc, unsigned offset) spin_lock_irqsave(&chip->lock, flags); gpiodir = readb(chip->base + GPIODIR); - gpiodir &= ~(1 << offset); + gpiodir &= ~(BIT(offset)); writeb(gpiodir, chip->base + GPIODIR); spin_unlock_irqrestore(&chip->lock, flags); @@ -106,16 +105,16 @@ static int pl061_direction_output(struct gpio_chip *gc, unsigned offset, return -EINVAL; spin_lock_irqsave(&chip->lock, flags); - writeb(!!value << offset, chip->base + (1 << (offset + 2))); + writeb(!!value << offset, chip->base + (BIT(offset + 2))); gpiodir = readb(chip->base + GPIODIR); - gpiodir |= 1 << offset; + gpiodir |= BIT(offset); writeb(gpiodir, chip->base + GPIODIR); /* * gpio value is set again, because pl061 doesn't allow to set value of * a gpio pin before configuring it in OUT mode. */ - writeb(!!value << offset, chip->base + (1 << (offset + 2))); + writeb(!!value << offset, chip->base + (BIT(offset + 2))); spin_unlock_irqrestore(&chip->lock, flags); return 0; @@ -125,14 +124,14 @@ static int pl061_get_value(struct gpio_chip *gc, unsigned offset) { struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); - return !!readb(chip->base + (1 << (offset + 2))); + return !!readb(chip->base + (BIT(offset + 2))); } static void pl061_set_value(struct gpio_chip *gc, unsigned offset, int value) { struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); - writeb(!!value << offset, chip->base + (1 << (offset + 2))); + writeb(!!value << offset, chip->base + (BIT(offset + 2))); } static int pl061_irq_type(struct irq_data *d, unsigned trigger) @@ -207,7 +206,7 @@ static void pl061_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); - u8 mask = 1 << (irqd_to_hwirq(d) % PL061_GPIO_NR); + u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR); u8 gpioie; spin_lock(&chip->lock); @@ -220,7 +219,7 @@ static void pl061_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc); - u8 mask = 1 << (irqd_to_hwirq(d) % PL061_GPIO_NR); + u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR); u8 gpioie; spin_lock(&chip->lock); @@ -302,9 +301,9 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) for (i = 0; i < PL061_GPIO_NR; i++) { if (pdata) { - if (pdata->directions & (1 << i)) + if (pdata->directions & (BIT(i))) pl061_direction_output(&chip->gc, i, - pdata->values & (1 << i)); + pdata->values & (BIT(i))); else pl061_direction_input(&chip->gc, i); } @@ -331,7 +330,7 @@ static int pl061_suspend(struct device *dev) chip->csave_regs.gpio_ie = readb(chip->base + GPIOIE); for (offset = 0; offset < PL061_GPIO_NR; offset++) { - if (chip->csave_regs.gpio_dir & (1 << offset)) + if (chip->csave_regs.gpio_dir & (BIT(offset))) chip->csave_regs.gpio_data |= pl061_get_value(&chip->gc, offset) << offset; } @@ -345,10 +344,10 @@ static int pl061_resume(struct device *dev) int offset; for (offset = 0; offset < PL061_GPIO_NR; offset++) { - if (chip->csave_regs.gpio_dir & (1 << offset)) + if (chip->csave_regs.gpio_dir & (BIT(offset))) pl061_direction_output(&chip->gc, offset, chip->csave_regs.gpio_data & - (1 << offset)); + (BIT(offset))); else pl061_direction_input(&chip->gc, offset); } diff --git a/drivers/gpio/gpio-rc5t583.c b/drivers/gpio/gpio-rc5t583.c index 9b423173ab5..562b0c4d9cc 100644 --- a/drivers/gpio/gpio-rc5t583.c +++ b/drivers/gpio/gpio-rc5t583.c @@ -119,10 +119,8 @@ static int rc5t583_gpio_probe(struct platform_device *pdev) rc5t583_gpio = devm_kzalloc(&pdev->dev, sizeof(*rc5t583_gpio), GFP_KERNEL); - if (!rc5t583_gpio) { - dev_warn(&pdev->dev, "Mem allocation for rc5t583_gpio failed"); + if (!rc5t583_gpio) return -ENOMEM; - } rc5t583_gpio->gpio_chip.label = "gpio-rc5t583", rc5t583_gpio->gpio_chip.owner = THIS_MODULE, diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index 03c91482432..0c9f803fc1a 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -26,6 +26,7 @@ #include <linux/pinctrl/consumer.h> #include <linux/platform_data/gpio-rcar.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/spinlock.h> #include <linux/slab.h> @@ -362,7 +363,6 @@ static int gpio_rcar_probe(struct platform_device *pdev) p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL); if (!p) { - dev_err(dev, "failed to allocate driver data\n"); ret = -ENOMEM; goto err0; } @@ -377,6 +377,9 @@ static int gpio_rcar_probe(struct platform_device *pdev) platform_set_drvdata(pdev, p); + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); @@ -460,6 +463,8 @@ static int gpio_rcar_probe(struct platform_device *pdev) err1: irq_domain_remove(p->irq_domain); err0: + pm_runtime_put(dev); + pm_runtime_disable(dev); return ret; } @@ -473,6 +478,8 @@ static int gpio_rcar_remove(struct platform_device *pdev) return ret; irq_domain_remove(p->irq_domain); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c index 88577c3272a..9fa7e53331c 100644 --- a/drivers/gpio/gpio-rdc321x.c +++ b/drivers/gpio/gpio-rdc321x.c @@ -141,17 +141,15 @@ static int rdc321x_gpio_probe(struct platform_device *pdev) return -ENODEV; } - rdc321x_gpio_dev = kzalloc(sizeof(struct rdc321x_gpio), GFP_KERNEL); - if (!rdc321x_gpio_dev) { - dev_err(&pdev->dev, "failed to allocate private data\n"); + rdc321x_gpio_dev = devm_kzalloc(&pdev->dev, sizeof(struct rdc321x_gpio), + GFP_KERNEL); + if (!rdc321x_gpio_dev) return -ENOMEM; - } r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg1"); if (!r) { dev_err(&pdev->dev, "failed to get gpio-reg1 resource\n"); - err = -ENODEV; - goto out_free; + return -ENODEV; } spin_lock_init(&rdc321x_gpio_dev->lock); @@ -162,8 +160,7 @@ static int rdc321x_gpio_probe(struct platform_device *pdev) r = platform_get_resource_byname(pdev, IORESOURCE_IO, "gpio-reg2"); if (!r) { dev_err(&pdev->dev, "failed to get gpio-reg2 resource\n"); - err = -ENODEV; - goto out_free; + return -ENODEV; } rdc321x_gpio_dev->reg2_ctrl_base = r->start; @@ -187,21 +184,17 @@ static int rdc321x_gpio_probe(struct platform_device *pdev) rdc321x_gpio_dev->reg1_data_base, &rdc321x_gpio_dev->data_reg[0]); if (err) - goto out_free; + return err; err = pci_read_config_dword(rdc321x_gpio_dev->sb_pdev, rdc321x_gpio_dev->reg2_data_base, &rdc321x_gpio_dev->data_reg[1]); if (err) - goto out_free; + return err; dev_info(&pdev->dev, "registering %d GPIOs\n", rdc321x_gpio_dev->chip.ngpio); return gpiochip_add(&rdc321x_gpio_dev->chip); - -out_free: - kfree(rdc321x_gpio_dev); - return err; } static int rdc321x_gpio_remove(struct platform_device *pdev) @@ -213,8 +206,6 @@ static int rdc321x_gpio_remove(struct platform_device *pdev) if (ret) dev_err(&pdev->dev, "failed to unregister chip\n"); - kfree(rdc321x_gpio_dev); - return ret; } diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index 5af65719b95..a9b1cd16c84 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -97,8 +97,6 @@ static int sch_gpio_core_direction_out(struct gpio_chip *gc, u8 curr_dirs; unsigned short offset, bit; - sch_gpio_core_set(gc, gpio_num, val); - spin_lock(&gpio_lock); offset = CGIO + gpio_num / 8; @@ -109,6 +107,17 @@ static int sch_gpio_core_direction_out(struct gpio_chip *gc, outb(curr_dirs & ~(1 << bit), gpio_ba + offset); spin_unlock(&gpio_lock); + + /* + * according to the datasheet, writing to the level register has no + * effect when GPIO is programmed as input. + * Actually the the level register is read-only when configured as input. + * Thus presetting the output level before switching to output is _NOT_ possible. + * Hence we set the level after configuring the GPIO as output. + * But we cannot prevent a short low pulse if direction is set to high + * and an external pull-up is connected. + */ + sch_gpio_core_set(gc, gpio_num, val); return 0; } @@ -178,8 +187,6 @@ static int sch_gpio_resume_direction_out(struct gpio_chip *gc, u8 curr_dirs; unsigned short offset, bit; - sch_gpio_resume_set(gc, gpio_num, val); - offset = RGIO + gpio_num / 8; bit = gpio_num % 8; @@ -190,6 +197,17 @@ static int sch_gpio_resume_direction_out(struct gpio_chip *gc, outb(curr_dirs & ~(1 << bit), gpio_ba + offset); spin_unlock(&gpio_lock); + + /* + * according to the datasheet, writing to the level register has no + * effect when GPIO is programmed as input. + * Actually the the level register is read-only when configured as input. + * Thus presetting the output level before switching to output is _NOT_ possible. + * Hence we set the level after configuring the GPIO as output. + * But we cannot prevent a short low pulse if direction is set to high + * and an external pull-up is connected. + */ + sch_gpio_resume_set(gc, gpio_num, val); return 0; } diff --git a/drivers/gpio/gpio-sch311x.c b/drivers/gpio/gpio-sch311x.c index 0357387b364..f942b80ee40 100644 --- a/drivers/gpio/gpio-sch311x.c +++ b/drivers/gpio/gpio-sch311x.c @@ -327,14 +327,22 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr) if (err) return err; - /* Check device ID. We currently know about: - * SCH3112 (0x7c), SCH3114 (0x7d), and SCH3116 (0x7f). */ + /* Check device ID. */ reg = sch311x_sio_inb(sio_config_port, 0x20); - if (!(reg == 0x7c || reg == 0x7d || reg == 0x7f)) { + switch (reg) { + case 0x7c: /* SCH3112 */ + dev_id = 2; + break; + case 0x7d: /* SCH3114 */ + dev_id = 4; + break; + case 0x7f: /* SCH3116 */ + dev_id = 6; + break; + default: err = -ENODEV; goto exit; } - dev_id = reg == 0x7c ? 2 : reg == 0x7d ? 4 : 6; /* Select logical device A (runtime registers) */ sch311x_sio_outb(sio_config_port, 0x07, 0x0a); diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c index 30bcc539425..353263c85d2 100644 --- a/drivers/gpio/gpio-spear-spics.c +++ b/drivers/gpio/gpio-spear-spics.c @@ -129,10 +129,8 @@ static int spics_gpio_probe(struct platform_device *pdev) int ret; spics = devm_kzalloc(&pdev->dev, sizeof(*spics), GFP_KERNEL); - if (!spics) { - dev_err(&pdev->dev, "memory allocation fail\n"); + if (!spics) return -ENOMEM; - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); spics->base = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c index 13d73fb2b5e..b51ca9f5c14 100644 --- a/drivers/gpio/gpio-sx150x.c +++ b/drivers/gpio/gpio-sx150x.c @@ -22,7 +22,6 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> -#include <linux/workqueue.h> #include <linux/i2c/sx150x.h> #define NO_UPDATE_PENDING -1 diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 1019320984d..51f7cbd9ff7 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -12,8 +12,6 @@ #include <linux/slab.h> #include <linux/gpio.h> #include <linux/of.h> -#include <linux/irq.h> -#include <linux/irqdomain.h> #include <linux/interrupt.h> #include <linux/mfd/tc3589x.h> @@ -31,10 +29,6 @@ struct tc3589x_gpio { struct tc3589x *tc3589x; struct device *dev; struct mutex irq_lock; - struct irq_domain *domain; - - int irq_base; - /* Caches of interrupt control registers for bus_lock */ u8 regs[CACHE_NR_REGS][CACHE_NR_BANKS]; u8 oldregs[CACHE_NR_REGS][CACHE_NR_BANKS]; @@ -95,30 +89,6 @@ static int tc3589x_gpio_direction_input(struct gpio_chip *chip, return tc3589x_set_bits(tc3589x, reg, 1 << pos, 0); } -/** - * tc3589x_gpio_irq_get_irq(): Map a hardware IRQ on a chip to a Linux IRQ - * - * @tc3589x_gpio: tc3589x_gpio_irq controller to operate on. - * @irq: index of the hardware interrupt requested in the chip IRQs - * - * Useful for drivers to request their own IRQs. - */ -static int tc3589x_gpio_irq_get_irq(struct tc3589x_gpio *tc3589x_gpio, - int hwirq) -{ - if (!tc3589x_gpio) - return -EINVAL; - - return irq_create_mapping(tc3589x_gpio->domain, hwirq); -} - -static int tc3589x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) -{ - struct tc3589x_gpio *tc3589x_gpio = to_tc3589x_gpio(chip); - - return tc3589x_gpio_irq_get_irq(tc3589x_gpio, offset); -} - static struct gpio_chip template_chip = { .label = "tc3589x", .owner = THIS_MODULE, @@ -126,13 +96,13 @@ static struct gpio_chip template_chip = { .get = tc3589x_gpio_get, .direction_output = tc3589x_gpio_direction_output, .set = tc3589x_gpio_set, - .to_irq = tc3589x_gpio_to_irq, .can_sleep = true, }; static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type) { - struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -159,14 +129,16 @@ static int tc3589x_gpio_irq_set_type(struct irq_data *d, unsigned int type) static void tc3589x_gpio_irq_lock(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); mutex_lock(&tc3589x_gpio->irq_lock); } static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; static const u8 regmap[] = { [REG_IBE] = TC3589x_GPIOIBE0, @@ -194,7 +166,8 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) static void tc3589x_gpio_irq_mask(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -204,7 +177,8 @@ static void tc3589x_gpio_irq_mask(struct irq_data *d) static void tc3589x_gpio_irq_unmask(struct irq_data *d) { - struct tc3589x_gpio *tc3589x_gpio = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct tc3589x_gpio *tc3589x_gpio = container_of(gc, struct tc3589x_gpio, chip); int offset = d->hwirq; int regoffset = offset / 8; int mask = 1 << (offset % 8); @@ -242,7 +216,8 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev) while (stat) { int bit = __ffs(stat); int line = i * 8 + bit; - int irq = tc3589x_gpio_irq_get_irq(tc3589x_gpio, line); + int irq = irq_find_mapping(tc3589x_gpio->chip.irqdomain, + line); handle_nested_irq(irq); stat &= ~(1 << bit); @@ -254,61 +229,6 @@ static irqreturn_t tc3589x_gpio_irq(int irq, void *dev) return IRQ_HANDLED; } -static int tc3589x_gpio_irq_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hwirq) -{ - struct tc3589x *tc3589x_gpio = d->host_data; - - irq_set_chip_data(irq, tc3589x_gpio); - irq_set_chip_and_handler(irq, &tc3589x_gpio_irq_chip, - handle_simple_irq); - irq_set_nested_thread(irq, 1); -#ifdef CONFIG_ARM - set_irq_flags(irq, IRQF_VALID); -#else - irq_set_noprobe(irq); -#endif - - return 0; -} - -static void tc3589x_gpio_irq_unmap(struct irq_domain *d, unsigned int irq) -{ -#ifdef CONFIG_ARM - set_irq_flags(irq, 0); -#endif - irq_set_chip_and_handler(irq, NULL, NULL); - irq_set_chip_data(irq, NULL); -} - -static struct irq_domain_ops tc3589x_irq_ops = { - .map = tc3589x_gpio_irq_map, - .unmap = tc3589x_gpio_irq_unmap, - .xlate = irq_domain_xlate_twocell, -}; - -static int tc3589x_gpio_irq_init(struct tc3589x_gpio *tc3589x_gpio, - struct device_node *np) -{ - int base = tc3589x_gpio->irq_base; - - /* - * If this results in a linear domain, irq_create_mapping() will - * take care of allocating IRQ descriptors at runtime. When a base - * is provided, the IRQ descriptors will be allocated when the - * domain is instantiated. - */ - tc3589x_gpio->domain = irq_domain_add_simple(np, - tc3589x_gpio->chip.ngpio, base, &tc3589x_irq_ops, - tc3589x_gpio); - if (!tc3589x_gpio->domain) { - dev_err(tc3589x_gpio->dev, "Failed to create irqdomain\n"); - return -ENOSYS; - } - - return 0; -} - static int tc3589x_gpio_probe(struct platform_device *pdev) { struct tc3589x *tc3589x = dev_get_drvdata(pdev->dev.parent); @@ -329,7 +249,8 @@ static int tc3589x_gpio_probe(struct platform_device *pdev) if (irq < 0) return irq; - tc3589x_gpio = kzalloc(sizeof(struct tc3589x_gpio), GFP_KERNEL); + tc3589x_gpio = devm_kzalloc(&pdev->dev, sizeof(struct tc3589x_gpio), + GFP_KERNEL); if (!tc3589x_gpio) return -ENOMEM; @@ -347,30 +268,36 @@ static int tc3589x_gpio_probe(struct platform_device *pdev) tc3589x_gpio->chip.of_node = np; #endif - tc3589x_gpio->irq_base = tc3589x->irq_base ? - tc3589x->irq_base + TC3589x_INT_GPIO(0) : 0; - /* Bring the GPIO module out of reset */ ret = tc3589x_set_bits(tc3589x, TC3589x_RSTCTRL, TC3589x_RSTCTRL_GPIRST, 0); if (ret < 0) - goto out_free; - - ret = tc3589x_gpio_irq_init(tc3589x_gpio, np); - if (ret) - goto out_free; + return ret; - ret = request_threaded_irq(irq, NULL, tc3589x_gpio_irq, IRQF_ONESHOT, - "tc3589x-gpio", tc3589x_gpio); + ret = devm_request_threaded_irq(&pdev->dev, + irq, NULL, tc3589x_gpio_irq, + IRQF_ONESHOT, "tc3589x-gpio", + tc3589x_gpio); if (ret) { dev_err(&pdev->dev, "unable to get irq: %d\n", ret); - goto out_free; + return ret; } ret = gpiochip_add(&tc3589x_gpio->chip); if (ret) { dev_err(&pdev->dev, "unable to add gpiochip: %d\n", ret); - goto out_freeirq; + return ret; + } + + ret = gpiochip_irqchip_add(&tc3589x_gpio->chip, + &tc3589x_gpio_irq_chip, + 0, + handle_simple_irq, + IRQ_TYPE_NONE); + if (ret) { + dev_err(&pdev->dev, + "could not connect irqchip to gpiochip\n"); + return ret; } if (pdata && pdata->setup) @@ -379,12 +306,6 @@ static int tc3589x_gpio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tc3589x_gpio); return 0; - -out_freeirq: - free_irq(irq, tc3589x_gpio); -out_free: - kfree(tc3589x_gpio); - return ret; } static int tc3589x_gpio_remove(struct platform_device *pdev) @@ -392,7 +313,6 @@ static int tc3589x_gpio_remove(struct platform_device *pdev) struct tc3589x_gpio *tc3589x_gpio = platform_get_drvdata(pdev); struct tc3589x *tc3589x = tc3589x_gpio->tc3589x; struct tc3589x_gpio_platform_data *pdata = tc3589x->pdata->gpio; - int irq = platform_get_irq(pdev, 0); int ret; if (pdata && pdata->remove) @@ -405,10 +325,6 @@ static int tc3589x_gpio_remove(struct platform_device *pdev) return ret; } - free_irq(irq, tc3589x_gpio); - - kfree(tc3589x_gpio); - return 0; } diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index 2b49f878b56..4e8fb8261a8 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -408,7 +408,7 @@ static struct tegra_gpio_soc_config tegra30_gpio_config = { .upper_offset = 0x80, }; -static struct of_device_id tegra_gpio_of_match[] = { +static const struct of_device_id tegra_gpio_of_match[] = { { .compatible = "nvidia,tegra30-gpio", .data = &tegra30_gpio_config }, { .compatible = "nvidia,tegra20-gpio", .data = &tegra20_gpio_config }, { }, @@ -458,10 +458,8 @@ static int tegra_gpio_probe(struct platform_device *pdev) tegra_gpio_banks = devm_kzalloc(&pdev->dev, tegra_gpio_bank_count * sizeof(*tegra_gpio_banks), GFP_KERNEL); - if (!tegra_gpio_banks) { - dev_err(&pdev->dev, "Couldn't allocate bank structure\n"); + if (!tegra_gpio_banks) return -ENODEV; - } irq_domain = irq_domain_add_linear(pdev->dev.of_node, tegra_gpio_chip.ngpio, diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c index f9a8fbde108..efc7c129016 100644 --- a/drivers/gpio/gpio-timberdale.c +++ b/drivers/gpio/gpio-timberdale.c @@ -224,6 +224,7 @@ static struct irq_chip timbgpio_irqchip = { static int timbgpio_probe(struct platform_device *pdev) { int err, i; + struct device *dev = &pdev->dev; struct gpio_chip *gc; struct timbgpio *tgpio; struct resource *iomem; @@ -231,35 +232,35 @@ static int timbgpio_probe(struct platform_device *pdev) int irq = platform_get_irq(pdev, 0); if (!pdata || pdata->nr_pins > 32) { - err = -EINVAL; - goto err_mem; + dev_err(dev, "Invalid platform data\n"); + return -EINVAL; } iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) { - err = -EINVAL; - goto err_mem; + dev_err(dev, "Unable to get resource\n"); + return -EINVAL; } - tgpio = kzalloc(sizeof(*tgpio), GFP_KERNEL); + tgpio = devm_kzalloc(dev, sizeof(struct timbgpio), GFP_KERNEL); if (!tgpio) { - err = -EINVAL; - goto err_mem; + dev_err(dev, "Memory alloc failed\n"); + return -EINVAL; } tgpio->irq_base = pdata->irq_base; spin_lock_init(&tgpio->lock); - if (!request_mem_region(iomem->start, resource_size(iomem), - DRIVER_NAME)) { - err = -EBUSY; - goto err_request; + if (!devm_request_mem_region(dev, iomem->start, resource_size(iomem), + DRIVER_NAME)) { + dev_err(dev, "Region already claimed\n"); + return -EBUSY; } - tgpio->membase = ioremap(iomem->start, resource_size(iomem)); + tgpio->membase = devm_ioremap(dev, iomem->start, resource_size(iomem)); if (!tgpio->membase) { - err = -ENOMEM; - goto err_ioremap; + dev_err(dev, "Cannot ioremap\n"); + return -ENOMEM; } gc = &tgpio->gpio; @@ -279,7 +280,7 @@ static int timbgpio_probe(struct platform_device *pdev) err = gpiochip_add(gc); if (err) - goto err_chipadd; + return err; platform_set_drvdata(pdev, tgpio); @@ -302,17 +303,6 @@ static int timbgpio_probe(struct platform_device *pdev) irq_set_chained_handler(irq, timbgpio_irq); return 0; - -err_chipadd: - iounmap(tgpio->membase); -err_ioremap: - release_mem_region(iomem->start, resource_size(iomem)); -err_request: - kfree(tgpio); -err_mem: - printk(KERN_ERR DRIVER_NAME": Failed to register GPIOs: %d\n", err); - - return err; } static int timbgpio_remove(struct platform_device *pdev) @@ -320,7 +310,6 @@ static int timbgpio_remove(struct platform_device *pdev) int err; struct timbgpio_platform_data *pdata = dev_get_platdata(&pdev->dev); struct timbgpio *tgpio = platform_get_drvdata(pdev); - struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); int irq = platform_get_irq(pdev, 0); if (irq >= 0 && tgpio->irq_base > 0) { @@ -338,10 +327,6 @@ static int timbgpio_remove(struct platform_device *pdev) if (err) printk(KERN_ERR DRIVER_NAME": failed to remove gpio_chip\n"); - iounmap(tgpio->membase); - release_mem_region(iomem->start, resource_size(iomem)); - kfree(tgpio); - return 0; } diff --git a/drivers/gpio/gpio-tps6586x.c b/drivers/gpio/gpio-tps6586x.c index 8994dfa1349..a69fbea4125 100644 --- a/drivers/gpio/gpio-tps6586x.c +++ b/drivers/gpio/gpio-tps6586x.c @@ -97,10 +97,8 @@ static int tps6586x_gpio_probe(struct platform_device *pdev) pdata = dev_get_platdata(pdev->dev.parent); tps6586x_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps6586x_gpio), GFP_KERNEL); - if (!tps6586x_gpio) { - dev_err(&pdev->dev, "Could not allocate tps6586x_gpio\n"); + if (!tps6586x_gpio) return -ENOMEM; - } tps6586x_gpio->parent = pdev->dev.parent; diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c index b6e818e6800..e2f8cda235e 100644 --- a/drivers/gpio/gpio-tps65910.c +++ b/drivers/gpio/gpio-tps65910.c @@ -123,10 +123,8 @@ static int tps65910_gpio_probe(struct platform_device *pdev) tps65910_gpio = devm_kzalloc(&pdev->dev, sizeof(*tps65910_gpio), GFP_KERNEL); - if (!tps65910_gpio) { - dev_err(&pdev->dev, "Could not allocate tps65910_gpio\n"); + if (!tps65910_gpio) return -ENOMEM; - } tps65910_gpio->tps65910 = tps65910; diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index 792a05ad464..12481867daf 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -289,7 +289,7 @@ static int xgpio_of_probe(struct device_node *np) return 0; } -static struct of_device_id xgpio_of_match[] = { +static const struct of_device_id xgpio_of_match[] = { { .compatible = "xlnx,xps-gpio-1.00.a", }, { /* end of list */ }, }; diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c index 9bf5034b6cd..54e54e4cc6c 100644 --- a/drivers/gpio/gpio-zevio.c +++ b/drivers/gpio/gpio-zevio.c @@ -81,9 +81,15 @@ static inline void zevio_gpio_port_set(struct zevio_gpio *c, unsigned pin, static int zevio_gpio_get(struct gpio_chip *chip, unsigned pin) { struct zevio_gpio *controller = to_zevio_gpio(chip); + u32 val, dir; - /* Only reading allowed, so no spinlock needed */ - u32 val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_INPUT); + spin_lock(&controller->lock); + dir = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_DIRECTION); + if (dir & BIT(ZEVIO_GPIO_BIT(pin))) + val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_INPUT); + else + val = zevio_gpio_port_get(controller, pin, ZEVIO_GPIO_OUTPUT); + spin_unlock(&controller->lock); return (val >> ZEVIO_GPIO_BIT(pin)) & 0x1; } @@ -172,10 +178,8 @@ static int zevio_gpio_probe(struct platform_device *pdev) int status, i; controller = devm_kzalloc(&pdev->dev, sizeof(*controller), GFP_KERNEL); - if (!controller) { - dev_err(&pdev->dev, "not enough free memory\n"); + if (!controller) return -ENOMEM; - } /* Copy our reference */ controller->chip.gc = zevio_gpio_chip; @@ -198,7 +202,7 @@ static int zevio_gpio_probe(struct platform_device *pdev) return 0; } -static struct of_device_id zevio_gpio_of_match[] = { +static const struct of_device_id zevio_gpio_of_match[] = { { .compatible = "lsi,zevio-gpio", }, { }, }; @@ -209,7 +213,7 @@ static struct platform_driver zevio_gpio_driver = { .driver = { .name = "gpio-zevio", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(zevio_gpio_of_match), + .of_match_table = zevio_gpio_of_match, }, .probe = zevio_gpio_probe, }; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 401add28933..4a987917c18 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -449,9 +449,10 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, mutex_unlock(&achip->conn_lock); if (function == ACPI_WRITE) - gpiod_set_raw_value(desc, !!((1 << i) & *value)); + gpiod_set_raw_value_cansleep(desc, + !!((1 << i) & *value)); else - *value |= (u64)gpiod_get_raw_value(desc) << i; + *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i; } out: diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 2024d45e550..af7e25c9a9a 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -48,7 +48,7 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) if (ret < 0) return false; - gg_data->out_gpio = gpio_to_desc(ret + gc->base); + gg_data->out_gpio = gpiochip_get_desc(gc, ret); return true; } @@ -96,6 +96,20 @@ struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, } EXPORT_SYMBOL(of_get_named_gpiod_flags); +int of_get_named_gpio_flags(struct device_node *np, const char *list_name, + int index, enum of_gpio_flags *flags) +{ + struct gpio_desc *desc; + + desc = of_get_named_gpiod_flags(np, list_name, index, flags); + + if (IS_ERR(desc)) + return PTR_ERR(desc); + else + return desc_to_gpio(desc); +} +EXPORT_SYMBOL(of_get_named_gpio_flags); + /** * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags * @gc: pointer to the gpio_chip structure diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index f48817d9748..d9c9cb4665d 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -1363,6 +1363,11 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, int parent_irq, irq_flow_handler_t parent_handler) { + if (gpiochip->can_sleep) { + chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n"); + return; + } + irq_set_chained_handler(parent_irq, parent_handler); /* * The parent irqchip is already using the chip_data for this @@ -1372,6 +1377,12 @@ void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, } EXPORT_SYMBOL_GPL(gpiochip_set_chained_irqchip); +/* + * This lock class tells lockdep that GPIO irqs are in a different + * category than their parents, so it won't report false recursion. + */ +static struct lock_class_key gpiochip_irq_lock_class; + /** * gpiochip_irq_map() - maps an IRQ into a GPIO irqchip * @d: the irqdomain used by this irqchip @@ -1388,22 +1399,35 @@ static int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, struct gpio_chip *chip = d->host_data; irq_set_chip_data(irq, chip); + irq_set_lockdep_class(irq, &gpiochip_irq_lock_class); irq_set_chip_and_handler(irq, chip->irqchip, chip->irq_handler); + /* Chips that can sleep need nested thread handlers */ + if (chip->can_sleep) + irq_set_nested_thread(irq, 1); #ifdef CONFIG_ARM set_irq_flags(irq, IRQF_VALID); #else irq_set_noprobe(irq); #endif - irq_set_irq_type(irq, chip->irq_default_type); + /* + * No set-up of the hardware will happen if IRQ_TYPE_NONE + * is passed as default type. + */ + if (chip->irq_default_type != IRQ_TYPE_NONE) + irq_set_irq_type(irq, chip->irq_default_type); return 0; } static void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq) { + struct gpio_chip *chip = d->host_data; + #ifdef CONFIG_ARM set_irq_flags(irq, 0); #endif + if (chip->can_sleep) + irq_set_nested_thread(irq, 0); irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); } @@ -1471,7 +1495,8 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) * @first_irq: if not dynamically assigned, the base (first) IRQ to * allocate gpiochip irqs from * @handler: the irq handler to use (often a predefined irq core function) - * @type: the default type for IRQs on this irqchip + * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE + * to have the core avoid setting up any default type in the hardware. * * This function closely associates a certain irqchip with a certain * gpiochip, providing an irq domain to translate the local IRQs to @@ -2571,22 +2596,27 @@ void gpiod_add_lookup_table(struct gpiod_lookup_table *table) mutex_unlock(&gpio_lookup_lock); } -#ifdef CONFIG_OF static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, unsigned int idx, enum gpio_lookup_flags *flags) { + static const char *suffixes[] = { "gpios", "gpio" }; char prop_name[32]; /* 32 is max size of property name */ enum of_gpio_flags of_flags; struct gpio_desc *desc; + unsigned int i; - if (con_id) - snprintf(prop_name, 32, "%s-gpios", con_id); - else - snprintf(prop_name, 32, "gpios"); + for (i = 0; i < ARRAY_SIZE(suffixes); i++) { + if (con_id) + snprintf(prop_name, 32, "%s-%s", con_id, suffixes[i]); + else + snprintf(prop_name, 32, "%s", suffixes[i]); - desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, - &of_flags); + desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, + &of_flags); + if (!IS_ERR(desc)) + break; + } if (IS_ERR(desc)) return desc; @@ -2596,14 +2626,6 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, return desc; } -#else -static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, - unsigned int idx, - enum gpio_lookup_flags *flags) -{ - return ERR_PTR(-ENODEV); -} -#endif static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, unsigned int idx, @@ -2701,7 +2723,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, } /** - * gpio_get - obtain a GPIO for a given GPIO function + * gpiod_get - obtain a GPIO for a given GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer * @@ -2716,6 +2738,22 @@ struct gpio_desc *__must_check gpiod_get(struct device *dev, const char *con_id) EXPORT_SYMBOL_GPL(gpiod_get); /** + * gpiod_get_optional - obtain an optional GPIO for a given GPIO function + * @dev: GPIO consumer, can be NULL for system-global GPIOs + * @con_id: function within the GPIO consumer + * + * This is equivalent to gpiod_get(), except that when no GPIO was assigned to + * the requested function it will return NULL. This is convenient for drivers + * that need to handle optional GPIOs. + */ +struct gpio_desc *__must_check gpiod_get_optional(struct device *dev, + const char *con_id) +{ + return gpiod_get_index_optional(dev, con_id, 0); +} +EXPORT_SYMBOL_GPL(gpiod_get_optional); + +/** * gpiod_get_index - obtain a GPIO from a multi-index GPIO function * @dev: GPIO consumer, can be NULL for system-global GPIOs * @con_id: function within the GPIO consumer @@ -2778,6 +2816,33 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, EXPORT_SYMBOL_GPL(gpiod_get_index); /** + * gpiod_get_index_optional - obtain an optional GPIO from a multi-index GPIO + * function + * @dev: GPIO consumer, can be NULL for system-global GPIOs + * @con_id: function within the GPIO consumer + * @index: index of the GPIO to obtain in the consumer + * + * This is equivalent to gpiod_get_index(), except that when no GPIO with the + * specified index was assigned to the requested function it will return NULL. + * This is convenient for drivers that need to handle optional GPIOs. + */ +struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev, + const char *con_id, + unsigned int index) +{ + struct gpio_desc *desc; + + desc = gpiod_get_index(dev, con_id, index); + if (IS_ERR(desc)) { + if (PTR_ERR(desc) == -ENOENT) + return NULL; + } + + return desc; +} +EXPORT_SYMBOL_GPL(gpiod_get_index_optional); + +/** * gpiod_put - dispose of a GPIO descriptor * @desc: GPIO descriptor to dispose of * diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index cf092941a9f..1a4103dd38d 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -15,6 +15,8 @@ #include <linux/err.h> #include <linux/device.h> +enum of_gpio_flags; + /** * struct acpi_gpio_info - ACPI GPIO specific information * @gpioint: if %true this GPIO is of type GpioInt otherwise type is GpioIo @@ -46,4 +48,7 @@ acpi_get_gpiod_by_index(struct device *dev, int index, int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label); void gpiochip_free_own_desc(struct gpio_desc *desc); +struct gpio_desc *of_get_named_gpiod_flags(struct device_node *np, + const char *list_name, int index, enum of_gpio_flags *flags); + #endif /* GPIOLIB_H */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index e930d4fe29c..1ef5ab9c9d5 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c @@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, plane->crtc = crtc; plane->fb = crtc->primary->fb; + drm_framebuffer_reference(plane->fb); return 0; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index c786cd4f457..2a3ad24276f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, buffer->sgt = sgt; exynos_gem_obj->base.import_attach = attach; - DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr, + DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr, buffer->size); return &exynos_gem_obj->base; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index eb73e3bf2a0..4ac43818756 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); - if (!dsi->reg_base) { + if (IS_ERR(dsi->reg_base)) { dev_err(&pdev->dev, "failed to remap io region\n"); - return -EADDRNOTAVAIL; + return PTR_ERR(dsi->reg_base); } dsi->phy = devm_phy_get(&pdev->dev, "dsim"); diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 7afead9c3f3..852f2dadaeb 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos) win_data->enabled = true; - DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr); + DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr); if (ctx->vblank_on) schedule_work(&ctx->work); diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 96177eec0a0..eedb023af27 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1833,7 +1833,6 @@ int i915_driver_unload(struct drm_device *dev) flush_workqueue(dev_priv->wq); mutex_lock(&dev->struct_mutex); - i915_gem_free_all_phys_object(dev); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); WARN_ON(dev_priv->mm.aliasing_ppgtt); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ec82f6bff12..388c028e223 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -242,18 +242,6 @@ struct intel_ddi_plls { #define WATCH_LISTS 0 #define WATCH_GTT 0 -#define I915_GEM_PHYS_CURSOR_0 1 -#define I915_GEM_PHYS_CURSOR_1 2 -#define I915_GEM_PHYS_OVERLAY_REGS 3 -#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) - -struct drm_i915_gem_phys_object { - int id; - struct page **page_list; - drm_dma_handle_t *handle; - struct drm_i915_gem_object *cur_obj; -}; - struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -1187,9 +1175,6 @@ struct i915_gem_mm { /** Bit 6 swizzling required for Y tiling */ uint32_t bit_6_swizzle_y; - /* storage for physical objects */ - struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; - /* accounting, useful for userland debugging */ spinlock_t object_stat_lock; size_t object_memory; @@ -1769,7 +1754,7 @@ struct drm_i915_gem_object { struct drm_file *pin_filp; /** for phy allocated objects */ - struct drm_i915_gem_phys_object *phys_obj; + drm_dma_handle_t *phys_handle; }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) @@ -1954,6 +1939,9 @@ struct drm_i915_cmd_table { #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0x00F0) == 0x0020) +/* ULX machines are also considered ULT. */ +#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ + (dev)->pdev->device == 0x0A1E) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) /* @@ -2201,10 +2189,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma); #define PIN_MAPPABLE 0x1 #define PIN_NONBLOCK 0x2 #define PIN_GLOBAL 0x4 +#define PIN_OFFSET_BIAS 0x8 +#define PIN_OFFSET_MASK (~4095) int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, - unsigned flags); + uint64_t flags); int __must_check i915_vma_unbind(struct i915_vma *vma); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); @@ -2331,13 +2321,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_ring_buffer *pipelined); void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); -int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj, - int id, +int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); -void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj); -void i915_gem_free_all_phys_object(struct drm_device *dev); int i915_gem_open(struct drm_device *dev, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); @@ -2462,6 +2447,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, unsigned cache_level, + unsigned long start, + unsigned long end, unsigned flags); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); int i915_gem_evict_everything(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 2871ce75f43..3326770c9ed 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -43,10 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o static __must_check int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, bool readonly); -static int i915_gem_phys_pwrite(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file); static void i915_gem_write_fence(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj); @@ -209,6 +205,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, return 0; } +static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) +{ + drm_dma_handle_t *phys = obj->phys_handle; + + if (!phys) + return; + + if (obj->madv == I915_MADV_WILLNEED) { + struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + char *vaddr = phys->vaddr; + int i; + + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page = shmem_read_mapping_page(mapping, i); + if (!IS_ERR(page)) { + char *dst = kmap_atomic(page); + memcpy(dst, vaddr, PAGE_SIZE); + drm_clflush_virt_range(dst, PAGE_SIZE); + kunmap_atomic(dst); + + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + } + vaddr += PAGE_SIZE; + } + i915_gem_chipset_flush(obj->base.dev); + } + +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); +#endif + drm_pci_free(obj->base.dev, phys); + obj->phys_handle = NULL; +} + +int +i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, + int align) +{ + drm_dma_handle_t *phys; + struct address_space *mapping; + char *vaddr; + int i; + + if (obj->phys_handle) { + if ((unsigned long)obj->phys_handle->vaddr & (align -1)) + return -EBUSY; + + return 0; + } + + if (obj->madv != I915_MADV_WILLNEED) + return -EFAULT; + + if (obj->base.filp == NULL) + return -EINVAL; + + /* create a new object */ + phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); + if (!phys) + return -ENOMEM; + + vaddr = phys->vaddr; +#ifdef CONFIG_X86 + set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE); +#endif + mapping = file_inode(obj->base.filp)->i_mapping; + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page; + char *src; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) { +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); +#endif + drm_pci_free(obj->base.dev, phys); + return PTR_ERR(page); + } + + src = kmap_atomic(page); + memcpy(vaddr, src, PAGE_SIZE); + kunmap_atomic(src); + + mark_page_accessed(page); + page_cache_release(page); + + vaddr += PAGE_SIZE; + } + + obj->phys_handle = phys; + return 0; +} + +static int +i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_device *dev = obj->base.dev; + void *vaddr = obj->phys_handle->vaddr + args->offset; + char __user *user_data = to_user_ptr(args->data_ptr); + + if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { + unsigned long unwritten; + + /* The physical object once assigned is fixed for the lifetime + * of the obj, so we can safely drop the lock and continue + * to access vaddr. + */ + mutex_unlock(&dev->struct_mutex); + unwritten = copy_from_user(vaddr, user_data, args->size); + mutex_lock(&dev->struct_mutex); + if (unwritten) + return -EFAULT; + } + + i915_gem_chipset_flush(dev); + return 0; +} + void *i915_gem_object_alloc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -921,8 +1039,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj->phys_obj) { - ret = i915_gem_phys_pwrite(dev, obj, args, file); + if (obj->phys_handle) { + ret = i915_gem_phys_pwrite(obj, args, file); goto out; } @@ -3208,12 +3326,14 @@ static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, unsigned alignment, - unsigned flags) + uint64_t flags) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 size, fence_size, fence_alignment, unfenced_alignment; - size_t gtt_max = + unsigned long start = + flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; + unsigned long end = flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; struct i915_vma *vma; int ret; @@ -3242,11 +3362,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ - if (obj->base.size > gtt_max) { - DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", + if (obj->base.size > end) { + DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", obj->base.size, flags & PIN_MAPPABLE ? "mappable" : "total", - gtt_max); + end); return ERR_PTR(-E2BIG); } @@ -3263,12 +3383,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, search_free: ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, size, alignment, - obj->cache_level, 0, gtt_max, + obj->cache_level, + start, end, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); if (ret) { ret = i915_gem_evict_something(dev, vm, size, alignment, - obj->cache_level, flags); + obj->cache_level, + start, end, + flags); if (ret == 0) goto search_free; @@ -3828,11 +3951,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) return ret; } +static bool +i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + + if (alignment && + vma->node.start & (alignment - 1)) + return true; + + if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) + return true; + + if (flags & PIN_OFFSET_BIAS && + vma->node.start < (flags & PIN_OFFSET_MASK)) + return true; + + return false; +} + int i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, - unsigned flags) + uint64_t flags) { struct i915_vma *vma; int ret; @@ -3845,15 +3987,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; - if ((alignment && - vma->node.start & (alignment - 1)) || - (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) { + if (i915_vma_misplaced(vma, alignment, flags)) { WARN(vma->pin_count, "bo is already pinned with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", i915_gem_obj_offset(obj, vm), alignment, - flags & PIN_MAPPABLE, + !!(flags & PIN_MAPPABLE), obj->map_and_fenceable); ret = i915_vma_unbind(vma); if (ret) @@ -4163,9 +4303,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); - if (obj->phys_obj) - i915_gem_detach_phys_object(dev, obj); - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { int ret; @@ -4183,6 +4320,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) } } + i915_gem_object_detach_phys(obj); + /* Stolen objects don't hold a ref, but do hold pin count. Fix that up * before progressing. */ if (obj->stolen) @@ -4646,190 +4785,6 @@ i915_gem_load(struct drm_device *dev) register_shrinker(&dev_priv->mm.inactive_shrinker); } -/* - * Create a physically contiguous memory object for this object - * e.g. for cursor + overlay regs - */ -static int i915_gem_init_phys_object(struct drm_device *dev, - int id, int size, int align) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_phys_object *phys_obj; - int ret; - - if (dev_priv->mm.phys_objs[id - 1] || !size) - return 0; - - phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL); - if (!phys_obj) - return -ENOMEM; - - phys_obj->id = id; - - phys_obj->handle = drm_pci_alloc(dev, size, align); - if (!phys_obj->handle) { - ret = -ENOMEM; - goto kfree_obj; - } -#ifdef CONFIG_X86 - set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); -#endif - - dev_priv->mm.phys_objs[id - 1] = phys_obj; - - return 0; -kfree_obj: - kfree(phys_obj); - return ret; -} - -static void i915_gem_free_phys_object(struct drm_device *dev, int id) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_phys_object *phys_obj; - - if (!dev_priv->mm.phys_objs[id - 1]) - return; - - phys_obj = dev_priv->mm.phys_objs[id - 1]; - if (phys_obj->cur_obj) { - i915_gem_detach_phys_object(dev, phys_obj->cur_obj); - } - -#ifdef CONFIG_X86 - set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); -#endif - drm_pci_free(dev, phys_obj->handle); - kfree(phys_obj); - dev_priv->mm.phys_objs[id - 1] = NULL; -} - -void i915_gem_free_all_phys_object(struct drm_device *dev) -{ - int i; - - for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) - i915_gem_free_phys_object(dev, i); -} - -void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj) -{ - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; - char *vaddr; - int i; - int page_count; - - if (!obj->phys_obj) - return; - vaddr = obj->phys_obj->handle->vaddr; - - page_count = obj->base.size / PAGE_SIZE; - for (i = 0; i < page_count; i++) { - struct page *page = shmem_read_mapping_page(mapping, i); - if (!IS_ERR(page)) { - char *dst = kmap_atomic(page); - memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); - kunmap_atomic(dst); - - drm_clflush_pages(&page, 1); - - set_page_dirty(page); - mark_page_accessed(page); - page_cache_release(page); - } - } - i915_gem_chipset_flush(dev); - - obj->phys_obj->cur_obj = NULL; - obj->phys_obj = NULL; -} - -int -i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj, - int id, - int align) -{ - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; - int page_count; - int i; - - if (id > I915_MAX_PHYS_OBJECT) - return -EINVAL; - - if (obj->phys_obj) { - if (obj->phys_obj->id == id) - return 0; - i915_gem_detach_phys_object(dev, obj); - } - - /* create a new object */ - if (!dev_priv->mm.phys_objs[id - 1]) { - ret = i915_gem_init_phys_object(dev, id, - obj->base.size, align); - if (ret) { - DRM_ERROR("failed to init phys object %d size: %zu\n", - id, obj->base.size); - return ret; - } - } - - /* bind to the object */ - obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; - obj->phys_obj->cur_obj = obj; - - page_count = obj->base.size / PAGE_SIZE; - - for (i = 0; i < page_count; i++) { - struct page *page; - char *dst, *src; - - page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) - return PTR_ERR(page); - - src = kmap_atomic(page); - dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); - memcpy(dst, src, PAGE_SIZE); - kunmap_atomic(src); - - mark_page_accessed(page); - page_cache_release(page); - } - - return 0; -} - -static int -i915_gem_phys_pwrite(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) -{ - void *vaddr = obj->phys_obj->handle->vaddr + args->offset; - char __user *user_data = to_user_ptr(args->data_ptr); - - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { - unsigned long unwritten; - - /* The physical object once assigned is fixed for the lifetime - * of the obj, so we can safely drop the lock and continue - * to access vaddr. - */ - mutex_unlock(&dev->struct_mutex); - unwritten = copy_from_user(vaddr, user_data, args->size); - mutex_lock(&dev->struct_mutex); - if (unwritten) - return -EFAULT; - } - - i915_gem_chipset_flush(dev); - return 0; -} - void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 75fca63dc8c..bbf4b12d842 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) int i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, int min_size, unsigned alignment, unsigned cache_level, + unsigned long start, unsigned long end, unsigned flags) { - struct drm_i915_private *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct i915_vma *vma; int ret = 0; @@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, */ INIT_LIST_HEAD(&unwind_list); - if (flags & PIN_MAPPABLE) { - BUG_ON(!i915_is_ggtt(vm)); + if (start != 0 || end != vm->total) { drm_mm_init_scan_with_range(&vm->mm, min_size, - alignment, cache_level, 0, - dev_priv->gtt.mappable_end); + alignment, cache_level, + start, end); } else drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2c9d9cbaf65..20fef6c5026 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -35,6 +35,9 @@ #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) +#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) + +#define BATCH_OFFSET_BIAS (256*1024) struct eb_vmas { struct list_head vmas; @@ -545,7 +548,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool need_fence; - unsigned flags; + uint64_t flags; int ret; flags = 0; @@ -559,6 +562,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, if (entry->flags & EXEC_OBJECT_NEEDS_GTT) flags |= PIN_GLOBAL; + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) + flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); if (ret) @@ -592,6 +597,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, return 0; } +static bool +eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access) +{ + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + struct drm_i915_gem_object *obj = vma->obj; + bool need_fence, need_mappable; + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = need_fence || need_reloc_mappable(vma); + + WARN_ON((need_mappable || need_fence) && + !i915_is_ggtt(vma->vm)); + + if (entry->alignment && + vma->node.start & (entry->alignment - 1)) + return true; + + if (need_mappable && !obj->map_and_fenceable) + return true; + + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && + vma->node.start < BATCH_OFFSET_BIAS) + return true; + + return false; +} + static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct list_head *vmas, @@ -653,26 +688,10 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, /* Unbind any ill-fitting objects or pin. */ list_for_each_entry(vma, vmas, exec_list) { - struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - bool need_fence, need_mappable; - - obj = vma->obj; - if (!drm_mm_node_allocated(&vma->node)) continue; - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(vma); - - WARN_ON((need_mappable || need_fence) && - !i915_is_ggtt(vma->vm)); - - if ((entry->alignment && - vma->node.start & (entry->alignment - 1)) || - (need_mappable && !obj->map_and_fenceable)) + if (eb_vma_misplaced(vma, has_fenced_gpu_access)) ret = i915_vma_unbind(vma); else ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); @@ -773,9 +792,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, * relocations were valid. */ for (j = 0; j < exec[i].relocation_count; j++) { - if (copy_to_user(&user_relocs[j].presumed_offset, - &invalid_offset, - sizeof(invalid_offset))) { + if (__copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { ret = -EFAULT; mutex_lock(&dev->struct_mutex); goto err; @@ -999,6 +1018,25 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return 0; } +static struct drm_i915_gem_object * +eb_get_batch(struct eb_vmas *eb) +{ + struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); + + /* + * SNA is doing fancy tricks with compressing batch buffers, which leads + * to negative relocation deltas. Usually that works out ok since the + * relocate address is still positive, except when the batch is placed + * very low in the GTT. Ensure this doesn't happen. + * + * Note that actual hangs have only been observed on gen7, but for + * paranoia do it everywhere. + */ + vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; + + return vma->obj; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, @@ -1153,7 +1191,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; /* take note of the batch buffer before we might reorder the lists */ - batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; + batch_obj = eb_get_batch(eb); /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; @@ -1355,18 +1393,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { + struct drm_i915_gem_exec_object __user *user_exec_list = + to_user_ptr(args->buffers_ptr); + /* Copy the new buffer offsets back to the user's exec list. */ - for (i = 0; i < args->buffer_count; i++) - exec_list[i].offset = exec2_list[i].offset; - /* ... and back out to userspace */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + break; + } } } @@ -1412,14 +1453,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec2_list, - sizeof(*exec2_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + struct drm_i915_gem_exec_object2 *user_exec_list = + to_user_ptr(args->buffers_ptr); + int i; + + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user\n", + args->buffer_count); + break; + } } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ab5e93c30aa..5deb22864c5 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -34,25 +34,35 @@ static void gen8_setup_private_ppat(struct drm_i915_private *dev_priv); bool intel_enable_ppgtt(struct drm_device *dev, bool full) { - if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) + if (i915.enable_ppgtt == 0) return false; if (i915.enable_ppgtt == 1 && full) return false; + return true; +} + +static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) +{ + if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) + return 0; + + if (enable_ppgtt == 1) + return 1; + + if (enable_ppgtt == 2 && HAS_PPGTT(dev)) + return 2; + #ifdef CONFIG_INTEL_IOMMU /* Disable ppgtt on SNB if VT-d is on. */ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { DRM_INFO("Disabling PPGTT because VT-d is on\n"); - return false; + return 0; } #endif - /* Full ppgtt disabled by default for now due to issues. */ - if (full) - return false; /* HAS_PPGTT(dev) */ - else - return HAS_ALIASING_PPGTT(dev); + return HAS_ALIASING_PPGTT(dev) ? 1 : 0; } #define GEN6_PPGTT_PD_ENTRIES 512 @@ -1079,7 +1089,9 @@ alloc: if (ret == -ENOSPC && !retried) { ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, GEN6_PD_SIZE, GEN6_PD_ALIGN, - I915_CACHE_NONE, 0); + I915_CACHE_NONE, + 0, dev_priv->gtt.base.total, + 0); if (ret) return ret; @@ -2031,6 +2043,14 @@ int i915_gem_gtt_init(struct drm_device *dev) gtt->base.total >> 20); DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20); DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); + /* + * i915.enable_ppgtt is read-only, so do an early pass to validate the + * user's requested state against the hardware/driver capabilities. We + * do this now so that we can print out any log messages once rather + * than every time we check intel_enable_ppgtt(). + */ + i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); return 0; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7753249b3a9..f98ba4e6e70 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev, spin_lock(&dev_priv->irq_lock); for (i = 1; i < HPD_NUM_PINS; i++) { - WARN_ONCE(hpd[i] & hotplug_trigger && - dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, - "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", - hotplug_trigger, i, hpd[i]); + if (hpd[i] & hotplug_trigger && + dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { + /* + * On GMCH platforms the interrupt mask bits only + * prevent irq generation, not the setting of the + * hotplug bits itself. So only WARN about unexpected + * interrupts on saner platforms. + */ + WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), + "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", + hotplug_trigger, i, hpd[i]); + + continue; + } if (!(hpd[i] & hotplug_trigger) || dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9f5b18d9d88..c77af69c2d8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -827,6 +827,7 @@ enum punit_power_well { # define MI_FLUSH_ENABLE (1 << 12) # define ASYNC_FLIP_PERF_DISABLE (1 << 14) # define MODE_IDLE (1 << 9) +# define STOP_RING (1 << 8) #define GEN6_GT_MODE 0x20d0 #define GEN7_GT_MODE 0x7008 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index fa486c5fbb0..aff4a113cda 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -560,47 +560,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) dev_priv->vbt.edp_pps = *edp_pps; - dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 : - DP_LINK_BW_1_62; + switch (edp_link_params->rate) { + case EDP_RATE_1_62: + dev_priv->vbt.edp_rate = DP_LINK_BW_1_62; + break; + case EDP_RATE_2_7: + dev_priv->vbt.edp_rate = DP_LINK_BW_2_7; + break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n", + edp_link_params->rate); + break; + } + switch (edp_link_params->lanes) { - case 0: + case EDP_LANE_1: dev_priv->vbt.edp_lanes = 1; break; - case 1: + case EDP_LANE_2: dev_priv->vbt.edp_lanes = 2; break; - case 3: - default: + case EDP_LANE_4: dev_priv->vbt.edp_lanes = 4; break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n", + edp_link_params->lanes); + break; } + switch (edp_link_params->preemphasis) { - case 0: + case EDP_PREEMPHASIS_NONE: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; break; - case 1: + case EDP_PREEMPHASIS_3_5dB: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; break; - case 2: + case EDP_PREEMPHASIS_6dB: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; break; - case 3: + case EDP_PREEMPHASIS_9_5dB: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", + edp_link_params->preemphasis); + break; } + switch (edp_link_params->vswing) { - case 0: + case EDP_VSWING_0_4V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; break; - case 1: + case EDP_VSWING_0_6V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; break; - case 2: + case EDP_VSWING_0_8V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; break; - case 3: + case EDP_VSWING_1_2V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", + edp_link_params->vswing); + break; } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dae976f51d8..5b60e25baa3 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7825,14 +7825,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = i915_gem_obj_ggtt_offset(obj); } else { int align = IS_I830(dev) ? 16 * 1024 : 256; - ret = i915_gem_attach_phys_object(dev, obj, - (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, - align); + ret = i915_gem_object_attach_phys(obj, align); if (ret) { DRM_DEBUG_KMS("failed to attach phys object\n"); goto fail_locked; } - addr = obj->phys_obj->handle->busaddr; + addr = obj->phys_handle->busaddr; } if (IS_GEN2(dev)) @@ -7840,10 +7838,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, finish: if (intel_crtc->cursor_bo) { - if (INTEL_INFO(dev)->cursor_needs_physical) { - if (intel_crtc->cursor_bo != obj) - i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); - } else + if (!INTEL_INFO(dev)->cursor_needs_physical) i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } @@ -9654,11 +9649,22 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(pipe_src_w); PIPE_CONF_CHECK_I(pipe_src_h); - PIPE_CONF_CHECK_I(gmch_pfit.control); - /* pfit ratios are autocomputed by the hw on gen4+ */ - if (INTEL_INFO(dev)->gen < 4) - PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); - PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); + /* + * FIXME: BIOS likes to set up a cloned config with lvds+external + * screen. Since we don't yet re-compute the pipe config when moving + * just the lvds port away to another pipe the sw tracking won't match. + * + * Proper atomic modesets with recomputed global state will fix this. + * Until then just don't check gmch state for inherited modes. + */ + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { + PIPE_CONF_CHECK_I(gmch_pfit.control); + /* pfit ratios are autocomputed by the hw on gen4+ */ + if (INTEL_INFO(dev)->gen < 4) + PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); + PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); + } + PIPE_CONF_CHECK_I(pch_pfit.enabled); if (current_config->pch_pfit.enabled) { PIPE_CONF_CHECK_I(pch_pfit.pos); @@ -11384,15 +11390,6 @@ void intel_modeset_init(struct drm_device *dev) } } -static void -intel_connector_break_all_links(struct intel_connector *connector) -{ - connector->base.dpms = DRM_MODE_DPMS_OFF; - connector->base.encoder = NULL; - connector->encoder->connectors_active = false; - connector->encoder->base.crtc = NULL; -} - static void intel_enable_pipe_a(struct drm_device *dev) { struct intel_connector *connector; @@ -11474,8 +11471,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) if (connector->encoder->base.crtc != &crtc->base) continue; - intel_connector_break_all_links(connector); + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; } + /* multiple connectors may have the same encoder: + * handle them and break crtc link separately */ + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) + if (connector->encoder->base.crtc == &crtc->base) { + connector->encoder->base.crtc = NULL; + connector->encoder->connectors_active = false; + } WARN_ON(crtc->active); crtc->base.enabled = false; @@ -11557,6 +11563,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) drm_get_encoder_name(&encoder->base)); encoder->disable(encoder); } + encoder->base.crtc = NULL; + encoder->connectors_active = false; /* Inconsistent output/port/pipe state happens presumably due to * a bug in one of the get_hw_state functions. Or someplace else @@ -11567,8 +11575,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) base.head) { if (connector->encoder != encoder) continue; - - intel_connector_break_all_links(connector); + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; } } /* Enabled encoders without active connectors will be fixed in @@ -11616,6 +11624,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) base.head) { memset(&crtc->config, 0, sizeof(crtc->config)); + crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; + crtc->active = dev_priv->display.get_pipe_config(crtc, &crtc->config); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d2a55884ad5..2a00cb828d2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -105,7 +105,8 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) case DP_LINK_BW_2_7: break; case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ - if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && + if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || + INTEL_INFO(dev)->gen >= 8) && intel_dp->dpcd[DP_DPCD_REV] >= 0x12) max_link_bw = DP_LINK_BW_5_4; else @@ -120,6 +121,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) return max_link_bw; } +static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + u8 source_max, sink_max; + + source_max = 4; + if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && + (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) + source_max = 2; + + sink_max = drm_dp_max_lane_count(intel_dp->dpcd); + + return min(source_max, sink_max); +} + /* * The units on the numbers in the next two are... bizarre. Examples will * make it clearer; this one parallels an example in the eDP spec. @@ -170,7 +187,7 @@ intel_dp_mode_valid(struct drm_connector *connector, } max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); - max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); + max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(target_clock, 18); @@ -750,8 +767,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc *intel_crtc = encoder->new_crtc; struct intel_connector *intel_connector = intel_dp->attached_connector; int lane_count, clock; - int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + int min_lane_count = 1; + int max_lane_count = intel_dp_max_lane_count(intel_dp); /* Conveniently, the link BW constants become indices with a shift...*/ + int min_clock = 0; int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; int bpp, mode_rate; static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; @@ -784,19 +803,38 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ bpp = pipe_config->pipe_bpp; - if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && - dev_priv->vbt.edp_bpp < bpp) { - DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", - dev_priv->vbt.edp_bpp); - bpp = dev_priv->vbt.edp_bpp; + if (is_edp(intel_dp)) { + if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) { + DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", + dev_priv->vbt.edp_bpp); + bpp = dev_priv->vbt.edp_bpp; + } + + if (IS_BROADWELL(dev)) { + /* Yes, it's an ugly hack. */ + min_lane_count = max_lane_count; + DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", + min_lane_count); + } else if (dev_priv->vbt.edp_lanes) { + min_lane_count = min(dev_priv->vbt.edp_lanes, + max_lane_count); + DRM_DEBUG_KMS("using min %u lanes per VBT\n", + min_lane_count); + } + + if (dev_priv->vbt.edp_rate) { + min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock); + DRM_DEBUG_KMS("using min %02x link bw per VBT\n", + bws[min_clock]); + } } for (; bpp >= 6*3; bpp -= 2*3) { mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, bpp); - for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { - for (clock = 0; clock <= max_clock; clock++) { + for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { + for (clock = min_clock; clock <= max_clock; clock++) { link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); link_avail = intel_dp_max_data_rate(link_clock, lane_count); @@ -3619,7 +3657,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, { struct drm_connector *connector = &intel_connector->base; struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *fixed_mode = NULL; bool has_dpcd; @@ -3629,6 +3668,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, if (!is_edp(intel_dp)) return true; + /* The VDD bit needs a power domain reference, so if the bit is already + * enabled when we boot, grab this reference. */ + if (edp_have_panel_vdd(intel_dp)) { + enum intel_display_power_domain power_domain; + power_domain = intel_display_port_power_domain(intel_encoder); + intel_display_power_get(dev_priv, power_domain); + } + /* Cache DPCD and EDID for edp. */ intel_edp_panel_vdd_on(intel_dp); has_dpcd = intel_dp_get_dpcd(intel_dp); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0542de98226..328b1a70264 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -236,7 +236,8 @@ struct intel_crtc_config { * tracked with quirk flags so that fastboot and state checker can act * accordingly. */ -#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ +#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ +#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */ unsigned long quirks; /* User requested mode, only valid as a starting point to diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index b4d44e62f0c..f73ba5e6b7a 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper, mutex_lock(&dev->struct_mutex); + if (intel_fb && + (sizes->fb_width > intel_fb->base.width || + sizes->fb_height > intel_fb->base.height)) { + DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d)," + " releasing it\n", + intel_fb->base.width, intel_fb->base.height, + sizes->fb_width, sizes->fb_height); + drm_framebuffer_unreference(&intel_fb->base); + intel_fb = ifbdev->fb = NULL; + } if (!intel_fb || WARN_ON(!intel_fb->obj)) { DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); ret = intelfb_alloc(helper, sizes); @@ -377,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, height); } + /* No preferred mode marked by the EDID? Are there any modes? */ + if (!modes[i] && !list_empty(&connector->modes)) { + DRM_DEBUG_KMS("using first mode listed on connector %s\n", + drm_get_connector_name(connector)); + modes[i] = list_first_entry(&connector->modes, + struct drm_display_mode, + head); + } + /* last resort: use current mode */ if (!modes[i]) { /* diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b0413e19062..157267aa356 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) } } -static int hdmi_portclock_limit(struct intel_hdmi *hdmi) +static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) { struct drm_device *dev = intel_hdmi_to_dev(hdmi); - if (!hdmi->has_hdmi_sink || IS_G4X(dev)) + if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) return 165000; else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) return 300000; @@ -837,7 +837,8 @@ static enum drm_mode_status intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) + if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector), + true)) return MODE_CLOCK_HIGH; if (mode->clock < 20000) return MODE_CLOCK_LOW; @@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; - int portclock_limit = hdmi_portclock_limit(intel_hdmi); + int portclock_limit = hdmi_portclock_limit(intel_hdmi, false); int desired_bpp; if (intel_hdmi->color_range_auto) { diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d8adc9104dc..129db0c7d83 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) struct overlay_registers __iomem *regs; if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; + regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else regs = io_mapping_map_wc(dev_priv->gtt.mappable, i915_gem_obj_ggtt_offset(overlay->reg_bo)); @@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev) overlay->reg_bo = reg_bo; if (OVERLAY_NEEDS_PHYSICAL(dev)) { - ret = i915_gem_attach_phys_object(dev, reg_bo, - I915_GEM_PHYS_OVERLAY_REGS, - PAGE_SIZE); + ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); goto out_free_bo; } - overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; + overlay->flip_addr = reg_bo->phys_handle->busaddr; } else { ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); if (ret) { @@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) /* Cast to make sparse happy, but it's wc memory anyway, so * equivalent to the wc io mapping on X86. */ regs = (struct overlay_registers __iomem *) - overlay->reg_bo->phys_obj->handle->vaddr; + overlay->reg_bo->phys_handle->vaddr; else regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, i915_gem_obj_ggtt_offset(overlay->reg_bo)); @@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) error->dovsta = I915_READ(DOVSTA); error->isr = I915_READ(ISR); if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; + error->base = (__force long)overlay->reg_bo->phys_handle->vaddr; else error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 0eead16aeda..cb8cfb7e097 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -492,6 +492,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, enum pipe pipe = intel_get_pipe_from_connector(connector); u32 freq; unsigned long flags; + u64 n; if (!panel->backlight.present || pipe == INVALID_PIPE) return; @@ -502,10 +503,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, /* scale to hardware max, but be careful to not overflow */ freq = panel->backlight.max; - if (freq < max) - level = level * freq / max; - else - level = freq / max * level; + n = (u64)level * freq; + do_div(n, max); + level = n; panel->backlight.level = level; if (panel->backlight.device) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 19e94c3edc1..d93dcf683e8 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2095,6 +2095,43 @@ static void intel_print_wm_latency(struct drm_device *dev, } } +static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, + uint16_t wm[5], uint16_t min) +{ + int level, max_level = ilk_wm_max_level(dev_priv->dev); + + if (wm[0] >= min) + return false; + + wm[0] = max(wm[0], min); + for (level = 1; level <= max_level; level++) + wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); + + return true; +} + +static void snb_wm_latency_quirk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool changed; + + /* + * The BIOS provided WM memory latency values are often + * inadequate for high resolution displays. Adjust them. + */ + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | + ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | + ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + + if (!changed) + return; + + DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); + intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); + intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); + intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); +} + static void ilk_setup_wm_latency(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2112,6 +2149,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev) intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); + + if (IS_GEN6(dev)) + snb_wm_latency_quirk(dev); } static void ilk_compute_wm_parameters(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6bc68bdcf43..79fb4cc2137 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) I915_WRITE(HWS_PGA, addr); } -static int init_ring_common(struct intel_ring_buffer *ring) +static bool stop_ring(struct intel_ring_buffer *ring) { - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj = ring->obj; - int ret = 0; - u32 head; + struct drm_i915_private *dev_priv = to_i915(ring->dev); - gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); + if (!IS_GEN2(ring->dev)) { + I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); + if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { + DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); + return false; + } + } - /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); - if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) - DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); - if (I915_NEED_GFX_HWS(dev)) - intel_ring_setup_status_page(ring); - else - ring_setup_phys_status_page(ring); + if (!IS_GEN2(ring->dev)) { + (void)I915_READ_CTL(ring); + I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); + } - head = I915_READ_HEAD(ring) & HEAD_ADDR; + return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; +} - /* G45 ring initialization fails to reset head to zero */ - if (head != 0) { +static int init_ring_common(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj = ring->obj; + int ret = 0; + + gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); + + if (!stop_ring(ring)) { + /* G45 ring initialization often fails to reset head to zero */ DRM_DEBUG_KMS("%s head not reset to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, @@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_READ_TAIL(ring), I915_READ_START(ring)); - I915_WRITE_HEAD(ring, 0); - - if (I915_READ_HEAD(ring) & HEAD_ADDR) { + if (!stop_ring(ring)) { DRM_ERROR("failed to set %s head to zero " "ctl %08x head %08x tail %08x start %08x\n", ring->name, @@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_READ_HEAD(ring), I915_READ_TAIL(ring), I915_READ_START(ring)); + ret = -EIO; + goto out; } } + if (I915_NEED_GFX_HWS(dev)) + intel_ring_setup_status_page(ring); + else + ring_setup_phys_status_page(ring); + /* Initialize the ring. This must happen _after_ we've cleared the ring * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 270a6a97343..2b91c4b4d34 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -34,6 +34,7 @@ struct intel_hw_status_page { #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) +#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val) enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d27155adf5d..46be00d66df 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2424,8 +2424,8 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector, if (ret < 0) goto err1; - ret = sysfs_create_link(&encoder->ddc.dev.kobj, - &drm_connector->kdev->kobj, + ret = sysfs_create_link(&drm_connector->kdev->kobj, + &encoder->ddc.dev.kobj, encoder->ddc.dev.kobj.name); if (ret < 0) goto err2; diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index f729dc71d5b..d0c75779d3f 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) { __raw_i915_write32(dev_priv, FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); + __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, + _MASKED_BIT_DISABLE(0xffff)); /* something from same cacheline, but !FORCEWAKE_VLV */ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 3e6c0f3ed59..ef9957dbac9 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc) MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); } else { /* disable cursor: */ - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0); - mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), - MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB)); + mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), + mdp4_kms->blank_cursor_iova); } /* and drop the iova ref + obj rev when done scanning out: */ @@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc, if (old_bo) { /* drop our previous reference: */ - msm_gem_put_iova(old_bo, mdp4_kms->id); - drm_gem_object_unreference_unlocked(old_bo); + drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo); } - crtc_flush(crtc); request_pending(crtc, PENDING_CURSOR); return 0; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c index c740ccd1cc6..8edd531cb62 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c @@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms) VERB("status=%08x", status); + mdp_dispatch_irqs(mdp_kms, status); + for (id = 0; id < priv->num_crtcs; id++) if (status & mdp4_crtc_vblank(priv->crtcs[id])) drm_handle_vblank(dev, id); - mdp_dispatch_irqs(mdp_kms, status); - return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c index 272e707c948..0bb4faa1752 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c @@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file) static void mdp4_destroy(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); + if (mdp4_kms->blank_cursor_iova) + msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); + if (mdp4_kms->blank_cursor_bo) + drm_gem_object_unreference(mdp4_kms->blank_cursor_bo); kfree(mdp4_kms); } @@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev) goto fail; } + mutex_lock(&dev->struct_mutex); + mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); + mutex_unlock(&dev->struct_mutex); + if (IS_ERR(mdp4_kms->blank_cursor_bo)) { + ret = PTR_ERR(mdp4_kms->blank_cursor_bo); + dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); + mdp4_kms->blank_cursor_bo = NULL; + goto fail; + } + + ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id, + &mdp4_kms->blank_cursor_iova); + if (ret) { + dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); + goto fail; + } + return kms; fail: diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h index 66a4d31aec8..715520c54cd 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h @@ -44,6 +44,10 @@ struct mdp4_kms { struct clk *lut_clk; struct mdp_irq error_handler; + + /* empty/blank cursor bo to use when cursor is "disabled" */ + struct drm_gem_object *blank_cursor_bo; + uint32_t blank_cursor_iova; }; #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base) diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c index 353d494a497..f2b985bc2ad 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c @@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) VERB("status=%08x", status); + mdp_dispatch_irqs(mdp_kms, status); + for (id = 0; id < priv->num_crtcs; id++) if (status & mdp5_crtc_vblank(priv->crtcs[id])) drm_handle_vblank(dev, id); - - mdp_dispatch_irqs(mdp_kms, status); } irqreturn_t mdp5_irq(struct msm_kms *kms) diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 6c6d7d4c9b4..a752ab83b81 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, dma_addr_t paddr; int ret, size; - /* only doing ARGB32 since this is what is needed to alpha-blend - * with video overlays: - */ sizes->surface_bpp = 32; - sizes->surface_depth = 32; + sizes->surface_depth = 24; DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, sizes->surface_height, sizes->surface_bpp, diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 3da8264d303..bb8026daebc 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj) if (iommu_present(&platform_bus_type)) drm_gem_put_pages(obj, msm_obj->pages, true, false); - else + else { drm_mm_remove_node(msm_obj->vram_node); + drm_free_large(msm_obj->pages); + } msm_obj->pages = NULL; } diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index 7762665ad8f..876de9ac379 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c @@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, } if (outp == 8) - return false; + return conf; data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); if (data == 0x0000) diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c index 1dc37b1ddbf..b0d0fb2f4d0 100644 --- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxgm107.c @@ -863,7 +863,7 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) { mmio_data(0x003000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); mmio_data(0x008000, 0x0100, NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS); - mmio_data(0x060000, 0x1000, NV_MEM_ACCESS_RW); + mmio_data(0x200000, 0x1000, NV_MEM_ACCESS_RW); mmio_list(0x40800c, 0x00000000, 8, 1); mmio_list(0x408010, 0x80000000, 0, 0); @@ -877,6 +877,8 @@ gm107_grctx_generate_mods(struct nvc0_graph_priv *priv, struct nvc0_grctx *info) mmio_list(0x418e24, 0x00000000, 8, 0); mmio_list(0x418e28, 0x80000030, 0, 0); + mmio_list(0x4064c8, 0x018002c0, 0, 0); + mmio_list(0x418810, 0x80000000, 12, 2); mmio_list(0x419848, 0x10000000, 12, 2); mmio_list(0x419c2c, 0x10000000, 12, 2); diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c index fb0b6b2d142..222e8ebb669 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c @@ -168,7 +168,8 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) */ i = 16; do { - if ((nv_rd32(bios, 0x300000) & 0xffff) == 0xaa55) + u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff; + if (data == 0xaa55) break; } while (i--); @@ -176,14 +177,15 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios) goto out; /* read entire bios image to system memory */ - bios->size = ((nv_rd32(bios, 0x300000) >> 16) & 0xff) * 512; + bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff; + bios->size = bios->size * 512; if (!bios->size) goto out; bios->data = kmalloc(bios->size, GFP_KERNEL); if (bios->data) { - for (i = 0; i < bios->size; i+=4) - nv_wo32(bios, i, nv_rd32(bios, 0x300000 + i)); + for (i = 0; i < bios->size; i += 4) + ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i); } /* check the PCI record header */ diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c index 43fec17ea54..bbf117be572 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c @@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line) case 0x00: return 2; case 0x19: return 1; case 0x1c: return 0; + case 0x1e: return 2; default: break; } diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 83face3f608..279206997e5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -389,9 +389,6 @@ bool nouveau_acpi_rom_supported(struct pci_dev *pdev) acpi_status status; acpi_handle dhandle, rom_handle; - if (!nouveau_dsm_priv.dsm_detected && !nouveau_dsm_priv.optimus_detected) - return false; - dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) return false; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 3ff030dc1ee..da764a4ed95 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -764,9 +764,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, } ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); - mutex_unlock(&chan->cli->mutex); if (ret) goto fail_unreserve; + mutex_unlock(&chan->cli->mutex); /* Update the crtc struct and cleanup */ crtc->primary->fb = fb; diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index fb187c78978..c31c12b4e66 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1177,27 +1177,43 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, /* Set NUM_BANKS. */ if (rdev->family >= CHIP_TAHITI) { - unsigned tileb, index, num_banks, tile_split_bytes; + unsigned index, num_banks; - /* Calculate the macrotile mode index. */ - tile_split_bytes = 64 << tile_split; - tileb = 8 * 8 * target_fb->bits_per_pixel / 8; - tileb = min(tile_split_bytes, tileb); + if (rdev->family >= CHIP_BONAIRE) { + unsigned tileb, tile_split_bytes; - for (index = 0; tileb > 64; index++) { - tileb >>= 1; - } + /* Calculate the macrotile mode index. */ + tile_split_bytes = 64 << tile_split; + tileb = 8 * 8 * target_fb->bits_per_pixel / 8; + tileb = min(tile_split_bytes, tileb); - if (index >= 16) { - DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", - target_fb->bits_per_pixel, tile_split); - return -EINVAL; - } + for (index = 0; tileb > 64; index++) + tileb >>= 1; + + if (index >= 16) { + DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", + target_fb->bits_per_pixel, tile_split); + return -EINVAL; + } - if (rdev->family >= CHIP_BONAIRE) num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; - else + } else { + switch (target_fb->bits_per_pixel) { + case 8: + index = 10; + break; + case 16: + index = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP; + break; + default: + case 32: + index = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP; + break; + } + num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3; + } + fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks); } else { /* NI and older. */ @@ -1720,8 +1736,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) } /* otherwise, pick one of the plls */ if ((rdev->family == CHIP_KAVERI) || - (rdev->family == CHIP_KABINI)) { - /* KB/KV has PPLL1 and PPLL2 */ + (rdev->family == CHIP_KABINI) || + (rdev->family == CHIP_MULLINS)) { + /* KB/KV/ML has PPLL1 and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -1885,6 +1902,9 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) is_tvcv = true; + if (!radeon_crtc->adjusted_clock) + return -EINVAL; + atombios_crtc_set_pll(crtc, adjusted_mode); if (ASIC_IS_DCE4(rdev)) diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index bc0119fb6c1..54e4f52549a 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -366,11 +366,11 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector) if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) return; - if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3)) + if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_SINK_OUI, buf, 3) == 3) DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", buf[0], buf[1], buf[2]); - if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3)) + if (drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_BRANCH_OUI, buf, 3) == 3) DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", buf[0], buf[1], buf[2]); } @@ -419,21 +419,23 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, if (dp_bridge != ENCODER_OBJECT_ID_NONE) { /* DP bridge chips */ - drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, - DP_EDP_CONFIGURATION_CAP, &tmp); - if (tmp & 1) - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; - else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || - (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) - panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; - else - panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, + DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { + if (tmp & 1) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || + (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) + panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; + else + panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + } } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { /* eDP */ - drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, - DP_EDP_CONFIGURATION_CAP, &tmp); - if (tmp & 1) - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, + DP_EDP_CONFIGURATION_CAP, &tmp) == 1) { + if (tmp & 1) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + } } return panel_mode; @@ -809,11 +811,15 @@ void radeon_dp_link_train(struct drm_encoder *encoder, else dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; - drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp); - if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) - dp_info.tp3_supported = true; - else + if (drm_dp_dpcd_readb(&radeon_connector->ddc_bus->aux, DP_MAX_LANE_COUNT, &tmp) + == 1) { + if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) + dp_info.tp3_supported = true; + else + dp_info.tp3_supported = false; + } else { dp_info.tp3_supported = false; + } memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE); dp_info.rdev = rdev; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 199eb194716..d2fd9896808 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -63,6 +63,12 @@ MODULE_FIRMWARE("radeon/KABINI_ce.bin"); MODULE_FIRMWARE("radeon/KABINI_mec.bin"); MODULE_FIRMWARE("radeon/KABINI_rlc.bin"); MODULE_FIRMWARE("radeon/KABINI_sdma.bin"); +MODULE_FIRMWARE("radeon/MULLINS_pfp.bin"); +MODULE_FIRMWARE("radeon/MULLINS_me.bin"); +MODULE_FIRMWARE("radeon/MULLINS_ce.bin"); +MODULE_FIRMWARE("radeon/MULLINS_mec.bin"); +MODULE_FIRMWARE("radeon/MULLINS_rlc.bin"); +MODULE_FIRMWARE("radeon/MULLINS_sdma.bin"); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); @@ -1473,6 +1479,43 @@ static const u32 hawaii_mgcg_cgcg_init[] = 0xd80c, 0xff000ff0, 0x00000100 }; +static const u32 godavari_golden_registers[] = +{ + 0x55e4, 0xff607fff, 0xfc000100, + 0x6ed8, 0x00010101, 0x00010000, + 0x9830, 0xffffffff, 0x00000000, + 0x98302, 0xf00fffff, 0x00000400, + 0x6130, 0xffffffff, 0x00010000, + 0x5bb0, 0x000000f0, 0x00000070, + 0x5bc0, 0xf0311fff, 0x80300000, + 0x98f8, 0x73773777, 0x12010001, + 0x98fc, 0xffffffff, 0x00000010, + 0x8030, 0x00001f0f, 0x0000100a, + 0x2f48, 0x73773777, 0x12010001, + 0x2408, 0x000fffff, 0x000c007f, + 0x8a14, 0xf000003f, 0x00000007, + 0x8b24, 0xffffffff, 0x00ff0fff, + 0x30a04, 0x0000ff0f, 0x00000000, + 0x28a4c, 0x07ffffff, 0x06000000, + 0x4d8, 0x00000fff, 0x00000100, + 0xd014, 0x00010000, 0x00810001, + 0xd814, 0x00010000, 0x00810001, + 0x3e78, 0x00000001, 0x00000002, + 0xc768, 0x00000008, 0x00000008, + 0xc770, 0x00000f00, 0x00000800, + 0xc774, 0x00000f00, 0x00000800, + 0xc798, 0x00ffffff, 0x00ff7fbf, + 0xc79c, 0x00ffffff, 0x00ff7faf, + 0x8c00, 0x000000ff, 0x00000001, + 0x214f8, 0x01ff01ff, 0x00000002, + 0x21498, 0x007ff800, 0x00200000, + 0x2015c, 0xffffffff, 0x00000f40, + 0x88c4, 0x001f3ae3, 0x00000082, + 0x88d4, 0x0000001f, 0x00000010, + 0x30934, 0xffffffff, 0x00000000 +}; + + static void cik_init_golden_registers(struct radeon_device *rdev) { switch (rdev->family) { @@ -1504,6 +1547,20 @@ static void cik_init_golden_registers(struct radeon_device *rdev) kalindi_golden_spm_registers, (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); break; + case CHIP_MULLINS: + radeon_program_register_sequence(rdev, + kalindi_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init)); + radeon_program_register_sequence(rdev, + godavari_golden_registers, + (const u32)ARRAY_SIZE(godavari_golden_registers)); + radeon_program_register_sequence(rdev, + kalindi_golden_common_registers, + (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); + radeon_program_register_sequence(rdev, + kalindi_golden_spm_registers, + (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); + break; case CHIP_KAVERI: radeon_program_register_sequence(rdev, spectre_mgcg_cgcg_init, @@ -1834,6 +1891,15 @@ static int cik_init_microcode(struct radeon_device *rdev) rlc_req_size = KB_RLC_UCODE_SIZE * 4; sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; break; + case CHIP_MULLINS: + chip_name = "MULLINS"; + pfp_req_size = CIK_PFP_UCODE_SIZE * 4; + me_req_size = CIK_ME_UCODE_SIZE * 4; + ce_req_size = CIK_CE_UCODE_SIZE * 4; + mec_req_size = CIK_MEC_UCODE_SIZE * 4; + rlc_req_size = ML_RLC_UCODE_SIZE * 4; + sdma_req_size = CIK_SDMA_UCODE_SIZE * 4; + break; default: BUG(); } @@ -3272,6 +3338,7 @@ static void cik_gpu_init(struct radeon_device *rdev) gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_KABINI: + case CHIP_MULLINS: default: rdev->config.cik.max_shader_engines = 1; rdev->config.cik.max_tile_pipes = 2; @@ -3702,6 +3769,7 @@ int cik_copy_cpdma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } @@ -5800,6 +5868,9 @@ static int cik_rlc_resume(struct radeon_device *rdev) case CHIP_KABINI: size = KB_RLC_UCODE_SIZE; break; + case CHIP_MULLINS: + size = ML_RLC_UCODE_SIZE; + break; } cik_rlc_stop(rdev); @@ -6548,6 +6619,7 @@ void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer) buffer[count++] = cpu_to_le32(0x00000000); break; case CHIP_KABINI: + case CHIP_MULLINS: buffer[count++] = cpu_to_le32(0x00000000); /* XXX */ buffer[count++] = cpu_to_le32(0x00000000); break; @@ -6693,6 +6765,19 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev) WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } + /* pflip */ + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 4) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + } + if (rdev->num_crtc >= 6) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + } /* dac hotplug */ WREG32(DAC_AUTODETECT_INT_CONTROL, 0); @@ -7049,6 +7134,25 @@ int cik_irq_set(struct radeon_device *rdev) WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + if (rdev->num_crtc >= 4) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + if (rdev->num_crtc >= 6) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + } + WREG32(DC_HPD1_INT_CONTROL, hpd1); WREG32(DC_HPD2_INT_CONTROL, hpd2); WREG32(DC_HPD3_INT_CONTROL, hpd3); @@ -7085,6 +7189,29 @@ static inline void cik_irq_ack(struct radeon_device *rdev) rdev->irq.stat_regs.cik.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5); rdev->irq.stat_regs.cik.disp_int_cont6 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE6); + rdev->irq.stat_regs.cik.d1grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC0_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d2grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC1_REGISTER_OFFSET); + if (rdev->num_crtc >= 4) { + rdev->irq.stat_regs.cik.d3grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC2_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d4grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC3_REGISTER_OFFSET); + } + if (rdev->num_crtc >= 6) { + rdev->irq.stat_regs.cik.d5grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC4_REGISTER_OFFSET); + rdev->irq.stat_regs.cik.d6grph_int = RREG32(GRPH_INT_STATUS + + EVERGREEN_CRTC5_REGISTER_OFFSET); + } + + if (rdev->irq.stat_regs.cik.d1grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d2grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int & LB_D1_VLINE_INTERRUPT) @@ -7095,6 +7222,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev) WREG32(LB_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK); if (rdev->num_crtc >= 4) { + if (rdev->irq.stat_regs.cik.d3grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d4grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) @@ -7106,6 +7239,12 @@ static inline void cik_irq_ack(struct radeon_device *rdev) } if (rdev->num_crtc >= 6) { + if (rdev->irq.stat_regs.cik.d5grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); + if (rdev->irq.stat_regs.cik.d6grph_int & GRPH_PFLIP_INT_OCCURRED) + WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_CLEAR); if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) WREG32(LB_VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK); if (rdev->irq.stat_regs.cik.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) @@ -7457,6 +7596,15 @@ restart_ih: break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index f7e46cf682a..72e464c79a8 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -562,6 +562,7 @@ int cik_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 213873270d5..dd7926394a8 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -888,6 +888,15 @@ # define DC_HPD6_RX_INTERRUPT (1 << 18) #define DISP_INTERRUPT_STATUS_CONTINUE6 0x6780 +/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */ +#define GRPH_INT_STATUS 0x6858 +# define GRPH_PFLIP_INT_OCCURRED (1 << 0) +# define GRPH_PFLIP_INT_CLEAR (1 << 8) +/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */ +#define GRPH_INT_CONTROL 0x685c +# define GRPH_PFLIP_INT_MASK (1 << 0) +# define GRPH_PFLIP_INT_TYPE (1 << 8) + #define DAC_AUTODETECT_INT_CONTROL 0x67c8 #define DC_HPD1_INT_STATUS 0x601c diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index b406546440d..0f7a51a3694 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4371,7 +4371,6 @@ int evergreen_irq_set(struct radeon_device *rdev) u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; u32 dma_cntl, dma_cntl1 = 0; u32 thermal_int = 0; @@ -4554,15 +4553,21 @@ int evergreen_irq_set(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } WREG32(DC_HPD1_INT_CONTROL, hpd1); @@ -4951,6 +4956,15 @@ restart_ih: break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index 287fe966d7d..478caefe0fe 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c @@ -151,6 +151,7 @@ int evergreen_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 16ec9d56a23..3f6e817d97e 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -546,6 +546,52 @@ static int kv_set_divider_value(struct radeon_device *rdev, return 0; } +static u32 kv_convert_vid2_to_vid7(struct radeon_device *rdev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_2bit) +{ + struct radeon_clock_voltage_dependency_table *vddc_sclk_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 i; + + if (vddc_sclk_table && vddc_sclk_table->count) { + if (vid_2bit < vddc_sclk_table->count) + return vddc_sclk_table->entries[vid_2bit].v; + else + return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; + } else { + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) + return vid_mapping_table->entries[i].vid_7bit; + } + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; + } +} + +static u32 kv_convert_vid7_to_vid2(struct radeon_device *rdev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_7bit) +{ + struct radeon_clock_voltage_dependency_table *vddc_sclk_table = + &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 i; + + if (vddc_sclk_table && vddc_sclk_table->count) { + for (i = 0; i < vddc_sclk_table->count; i++) { + if (vddc_sclk_table->entries[i].v == vid_7bit) + return i; + } + return vddc_sclk_table->count - 1; + } else { + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) + return vid_mapping_table->entries[i].vid_2bit; + } + + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; + } +} + static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev, u16 voltage) { @@ -556,9 +602,9 @@ static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev, u32 vid_2bit) { struct kv_power_info *pi = kv_get_pi(rdev); - u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev, - &pi->sys_info.vid_mapping_table, - vid_2bit); + u32 vid_8bit = kv_convert_vid2_to_vid7(rdev, + &pi->sys_info.vid_mapping_table, + vid_2bit); return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit); } @@ -639,7 +685,7 @@ static int kv_force_lowest_valid(struct radeon_device *rdev) static int kv_unforce_levels(struct radeon_device *rdev) { - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel); else return kv_set_enabled_levels(rdev); @@ -1362,13 +1408,20 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) struct radeon_uvd_clock_voltage_dependency_table *table = &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; int ret; + u32 mask; if (!gate) { - if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state) + if (table->count) pi->uvd_boot_level = table->count - 1; else pi->uvd_boot_level = 0; + if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { + mask = 1 << pi->uvd_boot_level; + } else { + mask = 0x1f; + } + ret = kv_copy_bytes_to_smc(rdev, pi->dpm_table_start + offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), @@ -1377,11 +1430,9 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate) if (ret) return ret; - if (!pi->caps_uvd_dpm || - pi->caps_stable_p_state) - kv_send_msg_to_smc_with_parameter(rdev, - PPSMC_MSG_UVDDPM_SetEnabledMask, - (1 << pi->uvd_boot_level)); + kv_send_msg_to_smc_with_parameter(rdev, + PPSMC_MSG_UVDDPM_SetEnabledMask, + mask); } return kv_enable_uvd_dpm(rdev, !gate); @@ -1617,7 +1668,7 @@ static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate) if (pi->acp_power_gated == gate) return; - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return; pi->acp_power_gated = gate; @@ -1786,7 +1837,7 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) } } - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { if (pi->enable_dpm) { kv_set_valid_clock_range(rdev, new_ps); kv_update_dfs_bypass_settings(rdev, new_ps); @@ -1812,6 +1863,8 @@ int kv_dpm_set_power_state(struct radeon_device *rdev) return ret; } kv_update_sclk_t(rdev); + if (rdev->family == CHIP_MULLINS) + kv_enable_nb_dpm(rdev); } } else { if (pi->enable_dpm) { @@ -1862,7 +1915,7 @@ void kv_dpm_reset_asic(struct radeon_device *rdev) { struct kv_power_info *pi = kv_get_pi(rdev); - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { kv_force_lowest_valid(rdev); kv_init_graphics_levels(rdev); kv_program_bootup_state(rdev); @@ -1901,14 +1954,41 @@ static void kv_construct_max_power_limits_table(struct radeon_device *rdev, static void kv_patch_voltage_values(struct radeon_device *rdev) { int i; - struct radeon_uvd_clock_voltage_dependency_table *table = + struct radeon_uvd_clock_voltage_dependency_table *uvd_table = &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct radeon_vce_clock_voltage_dependency_table *vce_table = + &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + struct radeon_clock_voltage_dependency_table *samu_table = + &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + struct radeon_clock_voltage_dependency_table *acp_table = + &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - if (table->count) { - for (i = 0; i < table->count; i++) - table->entries[i].v = + if (uvd_table->count) { + for (i = 0; i < uvd_table->count; i++) + uvd_table->entries[i].v = kv_convert_8bit_index_to_voltage(rdev, - table->entries[i].v); + uvd_table->entries[i].v); + } + + if (vce_table->count) { + for (i = 0; i < vce_table->count; i++) + vce_table->entries[i].v = + kv_convert_8bit_index_to_voltage(rdev, + vce_table->entries[i].v); + } + + if (samu_table->count) { + for (i = 0; i < samu_table->count; i++) + samu_table->entries[i].v = + kv_convert_8bit_index_to_voltage(rdev, + samu_table->entries[i].v); + } + + if (acp_table->count) { + for (i = 0; i < acp_table->count; i++) + acp_table->entries[i].v = + kv_convert_8bit_index_to_voltage(rdev, + acp_table->entries[i].v); } } @@ -1941,7 +2021,7 @@ static int kv_force_dpm_highest(struct radeon_device *rdev) break; } - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); else return kv_set_enabled_level(rdev, i); @@ -1961,7 +2041,7 @@ static int kv_force_dpm_lowest(struct radeon_device *rdev) break; } - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i); else return kv_set_enabled_level(rdev, i); @@ -2118,7 +2198,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev, else pi->battery_state = false; - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { ps->dpm0_pg_nb_ps_lo = 0x1; ps->dpm0_pg_nb_ps_hi = 0x0; ps->dpmx_nb_ps_lo = 0x1; @@ -2179,7 +2259,7 @@ static int kv_calculate_nbps_level_settings(struct radeon_device *rdev) if (pi->lowest_valid > pi->highest_valid) return -EINVAL; - if (rdev->family == CHIP_KABINI) { + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) { for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { pi->graphics_level[i].GnbSlow = 1; pi->graphics_level[i].ForceNbPs1 = 0; @@ -2253,9 +2333,9 @@ static void kv_init_graphics_levels(struct radeon_device *rdev) break; kv_set_divider_value(rdev, i, table->entries[i].clk); - vid_2bit = sumo_convert_vid7_to_vid2(rdev, - &pi->sys_info.vid_mapping_table, - table->entries[i].v); + vid_2bit = kv_convert_vid7_to_vid2(rdev, + &pi->sys_info.vid_mapping_table, + table->entries[i].v); kv_set_vid(rdev, i, vid_2bit); kv_set_at(rdev, i, pi->at[i]); kv_dpm_power_level_enabled_for_throttle(rdev, i, true); @@ -2324,7 +2404,7 @@ static void kv_program_nbps_index_settings(struct radeon_device *rdev, struct kv_power_info *pi = kv_get_pi(rdev); u32 nbdpmconfig1; - if (rdev->family == CHIP_KABINI) + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) return; if (pi->sys_info.nb_dpm_enable) { @@ -2631,9 +2711,6 @@ int kv_dpm_init(struct radeon_device *rdev) pi->sram_end = SMC_RAM_END; - if (rdev->family == CHIP_KABINI) - pi->high_voltage_t = 4001; - pi->enable_nb_dpm = true; pi->caps_power_containment = true; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 6e887d004eb..bbc189fd3dd 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2839,6 +2839,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } @@ -3505,7 +3506,6 @@ int r600_irq_set(struct radeon_device *rdev) u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; u32 hdmi0, hdmi1; - u32 d1grph = 0, d2grph = 0; u32 dma_cntl; u32 thermal_int = 0; @@ -3614,8 +3614,8 @@ int r600_irq_set(struct radeon_device *rdev) WREG32(CP_INT_CNTL, cp_int_cntl); WREG32(DMA_CNTL, dma_cntl); WREG32(DxMODE_INT_MASK, mode_int); - WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); - WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); + WREG32(D1GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); + WREG32(D2GRPH_INTERRUPT_CONTROL, DxGRPH_PFLIP_INT_MASK); WREG32(GRBM_INT_CNTL, grbm_int_cntl); if (ASIC_IS_DCE3(rdev)) { WREG32(DC_HPD1_INT_CONTROL, hpd1); @@ -3918,6 +3918,14 @@ restart_ih: break; } break; + case 9: /* D1 pflip */ + DRM_DEBUG("IH: D1 flip\n"); + radeon_crtc_handle_flip(rdev, 0); + break; + case 11: /* D2 pflip */ + DRM_DEBUG("IH: D2 flip\n"); + radeon_crtc_handle_flip(rdev, 1); + break; case 19: /* HPD/DAC hotplug */ switch (src_data) { case 0: diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 53fcb28f557..4969cef44a1 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -489,6 +489,7 @@ int r600_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index b58e1afdda7..8149e7cf430 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -730,6 +730,12 @@ struct cik_irq_stat_regs { u32 disp_int_cont4; u32 disp_int_cont5; u32 disp_int_cont6; + u32 d1grph_int; + u32 d2grph_int; + u32 d3grph_int; + u32 d4grph_int; + u32 d5grph_int; + u32 d6grph_int; }; union radeon_irq_stat_regs { @@ -1636,6 +1642,7 @@ struct radeon_vce { unsigned fb_version; atomic_t handles[RADEON_MAX_VCE_HANDLES]; struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; + unsigned img_size[RADEON_MAX_VCE_HANDLES]; struct delayed_work idle_work; }; @@ -1649,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, uint32_t handle, struct radeon_fence **fence); void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); void radeon_vce_note_usage(struct radeon_device *rdev); -int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); +int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size); int radeon_vce_cs_parse(struct radeon_cs_parser *p); bool radeon_vce_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, @@ -2634,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) -#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI)) +#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \ + (rdev->family == CHIP_MULLINS)) #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ (rdev->ddev->pdev->device == 0x6850) || \ diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index b8a24a75d4f..be20e62dac8 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -2516,6 +2516,7 @@ int radeon_asic_init(struct radeon_device *rdev) break; case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: rdev->asic = &kv_asic; /* set num crtcs */ if (rdev->family == CHIP_KAVERI) { diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b3633d9a531..9ab30976287 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } } + if (!found) { + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } + } + if (!found) return false; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 2b6e0ebcc13..41ecf8a6061 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -152,6 +152,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) uint32_t domain = r->write_domain ? r->write_domain : r->read_domains; + if (domain & RADEON_GEM_DOMAIN_CPU) { + DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid " + "for command submission\n"); + return -EINVAL; + } + p->relocs[i].domain = domain; if (domain == RADEON_GEM_DOMAIN_VRAM) domain |= RADEON_GEM_DOMAIN_GTT; @@ -342,10 +348,17 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return -EINVAL; /* we only support VM on some SI+ rings */ - if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) && - ((p->cs_flags & RADEON_CS_USE_VM) == 0)) { - DRM_ERROR("Ring %d requires VM!\n", p->ring); - return -EINVAL; + if ((p->cs_flags & RADEON_CS_USE_VM) == 0) { + if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) { + DRM_ERROR("Ring %d requires VM!\n", p->ring); + return -EINVAL; + } + } else { + if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) { + DRM_ERROR("VM not supported on ring %d!\n", + p->ring); + return -EINVAL; + } } } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 511fe26198e..14671406212 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -99,6 +99,7 @@ static const char radeon_family_name[][16] = { "KAVERI", "KABINI", "HAWAII", + "MULLINS", "LAST", }; @@ -1532,11 +1533,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) radeon_restore_bios_scratch_regs(rdev); - if (fbcon) { - radeon_fbdev_set_suspend(rdev, 0); - console_unlock(); - } - /* init dig PHYs, disp eng pll */ if (rdev->is_atom_bios) { radeon_atom_encoder_init(rdev); @@ -1561,6 +1557,12 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } drm_kms_helper_poll_enable(dev); + + if (fbcon) { + radeon_fbdev_set_suspend(rdev, 0); + console_unlock(); + } + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8d99d5ee801..356b733caaf 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -284,6 +284,10 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id) u32 update_pending; int vpos, hpos; + /* can happen during initialization */ + if (radeon_crtc == NULL) + return; + spin_lock_irqsave(&rdev->ddev->event_lock, flags); work = radeon_crtc->unpin_work; if (work == NULL || @@ -826,14 +830,14 @@ static void avivo_reduce_ratio(unsigned *nom, unsigned *den, /* make sure nominator is large enough */ if (*nom < nom_min) { - tmp = (nom_min + *nom - 1) / *nom; + tmp = DIV_ROUND_UP(nom_min, *nom); *nom *= tmp; *den *= tmp; } /* make sure the denominator is large enough */ if (*den < den_min) { - tmp = (den_min + *den - 1) / *den; + tmp = DIV_ROUND_UP(den_min, *den); *nom *= tmp; *den *= tmp; } @@ -858,7 +862,7 @@ static void avivo_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, unsigned *fb_div, unsigned *ref_div) { /* limit reference * post divider to a maximum */ - ref_div_max = min(210 / post_div, ref_div_max); + ref_div_max = max(min(100 / post_div, ref_div_max), 1u); /* get matching reference and feedback divider */ *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); @@ -993,6 +997,16 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, /* this also makes sure that the reference divider is large enough */ avivo_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); + /* avoid high jitter with small fractional dividers */ + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { + fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); + if (fb_div < fb_div_min) { + unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); + fb_div *= tmp; + ref_div *= tmp; + } + } + /* and finally save the result */ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { *fb_div_p = fb_div / 10; diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 9da5da4ffd1..4b7b87f71a6 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -97,6 +97,7 @@ enum radeon_family { CHIP_KAVERI, CHIP_KABINI, CHIP_HAWAII, + CHIP_MULLINS, CHIP_LAST, }; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0cc47f12d99..eaaedba0467 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return r; } - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (r) { - radeon_vm_fini(rdev, &fpriv->vm); - kfree(fpriv); - return r; - } + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (r) { + radeon_vm_fini(rdev, &fpriv->vm); + kfree(fpriv); + return r; + } - /* map the ib pool buffer read only into - * virtual address space */ - bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, - rdev->ring_tmp_bo.bo); - r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, - RADEON_VM_PAGE_READABLE | - RADEON_VM_PAGE_SNOOPED); + /* map the ib pool buffer read only into + * virtual address space */ + bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, + rdev->ring_tmp_bo.bo); + r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, + RADEON_VM_PAGE_READABLE | + RADEON_VM_PAGE_SNOOPED); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); - if (r) { - radeon_vm_fini(rdev, &fpriv->vm); - kfree(fpriv); - return r; + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (r) { + radeon_vm_fini(rdev, &fpriv->vm); + kfree(fpriv); + return r; + } } - file_priv->driver_priv = fpriv; } @@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev, struct radeon_bo_va *bo_va; int r; - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (!r) { - bo_va = radeon_vm_bo_find(&fpriv->vm, - rdev->ring_tmp_bo.bo); - if (bo_va) - radeon_vm_bo_rmv(rdev, bo_va); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (!r) { + bo_va = radeon_vm_bo_find(&fpriv->vm, + rdev->ring_tmp_bo.bo); + if (bo_va) + radeon_vm_bo_rmv(rdev, bo_va); + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + } } radeon_vm_fini(rdev, &fpriv->vm); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 19bec0dbfa3..4faa4d6f9bb 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, * into account. We don't want to disallow buffer moves * completely. */ - if (current_domain != RADEON_GEM_DOMAIN_CPU && + if ((lobj->alt_domain & current_domain) != 0 && (domain & current_domain) == 0 && /* will be moved */ bytes_moved > bytes_moved_threshold) { /* don't move it */ @@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 0); rdev = rbo->rdev; - if (bo->mem.mem_type == TTM_PL_VRAM) { - size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.start << PAGE_SHIFT; - if ((offset + size) > rdev->mc.visible_vram_size) { - /* hurrah the memory is not visible ! */ - radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; - r = ttm_bo_validate(bo, &rbo->placement, false, false); - if (unlikely(r != 0)) - return r; - offset = bo->mem.start << PAGE_SHIFT; - /* this should not happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; - } + if (bo->mem.mem_type != TTM_PL_VRAM) + return 0; + + size = bo->mem.num_pages << PAGE_SHIFT; + offset = bo->mem.start << PAGE_SHIFT; + if ((offset + size) <= rdev->mc.visible_vram_size) + return 0; + + /* hurrah the memory is not visible ! */ + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); + rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + r = ttm_bo_validate(bo, &rbo->placement, false, false); + if (unlikely(r == -ENOMEM)) { + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); + return ttm_bo_validate(bo, &rbo->placement, false, false); + } else if (unlikely(r != 0)) { + return r; } + + offset = bo->mem.start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return -EINVAL; + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 6fac8efe834..53d6e1bb48d 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set profile when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (strncmp("default", buf, strlen("default")) == 0) @@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set method when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + count = -EINVAL; + goto fail; + } + /* we don't support the legacy modes with dpm */ if (rdev->pm.pm_method == PM_METHOD_DPM) { count = -EINVAL; @@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); @@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set dpm state when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("battery", buf, strlen("battery")) == 0) rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; @@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); @@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, enum radeon_dpm_forced_level level; int ret = 0; + /* Can't force performance level when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("low", buf, strlen("low")) == 0) { level = RADEON_DPM_FORCED_LEVEL_LOW; @@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, char *buf) { struct radeon_device *rdev = dev_get_drvdata(dev); + struct drm_device *ddev = rdev->ddev; int temp; + /* Can't get temperature when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + if (rdev->asic->pm.get_temperature) temp = radeon_get_temperature(rdev); else @@ -1300,6 +1336,7 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_KABINI: case CHIP_KAVERI: case CHIP_HAWAII: + case CHIP_MULLINS: /* DPM requires the RLC, RV770+ dGPU requires SMC */ if (!rdev->rlc_fw) rdev->pm.pm_method = PM_METHOD_PROFILE; @@ -1613,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; + struct drm_device *ddev = rdev->ddev; - if (rdev->pm.dpm_enabled) { + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + seq_printf(m, "PX asic powered off\n"); + } else if (rdev->pm.dpm_enabled) { mutex_lock(&rdev->pm.mutex); if (rdev->asic->dpm.debugfs_print_current_performance_level) radeon_dpm_debugfs_print_current_performance_level(rdev, m); diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h index 58d12938c0b..4e7c3269b18 100644 --- a/drivers/gpu/drm/radeon/radeon_ucode.h +++ b/drivers/gpu/drm/radeon/radeon_ucode.h @@ -52,6 +52,7 @@ #define BONAIRE_RLC_UCODE_SIZE 2048 #define KB_RLC_UCODE_SIZE 2560 #define KV_RLC_UCODE_SIZE 2560 +#define ML_RLC_UCODE_SIZE 2560 /* MC */ #define BTC_MC_UCODE_SIZE 6024 diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 5748bdaeacc..1b65ae2433c 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -99,6 +99,7 @@ int radeon_uvd_init(struct radeon_device *rdev) case CHIP_KABINI: case CHIP_KAVERI: case CHIP_HAWAII: + case CHIP_MULLINS: fw_name = FIRMWARE_BONAIRE; break; @@ -465,6 +466,10 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, cmd = radeon_get_ib_value(p, p->idx) >> 1; if (cmd < 0x4) { + if (end <= start) { + DRM_ERROR("invalid reloc offset %X!\n", offset); + return -EINVAL; + } if ((end - start) < buf_sizes[cmd]) { DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned)(end - start), buf_sizes[cmd]); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index ced53dd03e7..3971d968af6 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -66,6 +66,7 @@ int radeon_vce_init(struct radeon_device *rdev) case CHIP_BONAIRE: case CHIP_KAVERI: case CHIP_KABINI: + case CHIP_MULLINS: fw_name = FIRMWARE_BONAIRE; break; @@ -442,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, * @p: parser context * @lo: address of lower dword * @hi: address of higher dword + * @size: size of checker for relocation buffer * * Patch relocation inside command stream with real buffer address */ -int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) +int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, + unsigned size) { struct radeon_cs_chunk *relocs_chunk; - uint64_t offset; + struct radeon_cs_reloc *reloc; + uint64_t start, end, offset; unsigned idx; relocs_chunk = &p->chunks[p->chunk_relocs_idx]; @@ -461,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) return -EINVAL; } - offset += p->relocs_ptr[(idx / 4)]->gpu_offset; + reloc = p->relocs_ptr[(idx / 4)]; + start = reloc->gpu_offset; + end = start + radeon_bo_size(reloc->robj); + start += offset; - p->ib.ptr[lo] = offset & 0xFFFFFFFF; - p->ib.ptr[hi] = offset >> 32; + p->ib.ptr[lo] = start & 0xFFFFFFFF; + p->ib.ptr[hi] = start >> 32; + + if (end <= start) { + DRM_ERROR("invalid reloc offset %llX!\n", offset); + return -EINVAL; + } + if ((end - start) < size) { + DRM_ERROR("buffer to small (%d / %d)!\n", + (unsigned)(end - start), size); + return -EINVAL; + } return 0; } /** + * radeon_vce_validate_handle - validate stream handle + * + * @p: parser context + * @handle: handle to validate + * + * Validates the handle and return the found session index or -EINVAL + * we we don't have another free session index. + */ +int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) +{ + unsigned i; + + /* validate the handle */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + if (atomic_read(&p->rdev->vce.handles[i]) == handle) + return i; + } + + /* handle not found try to alloc a new one */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { + p->rdev->vce.filp[i] = p->filp; + p->rdev->vce.img_size[i] = 0; + return i; + } + } + + DRM_ERROR("No more free VCE handles!\n"); + return -EINVAL; +} + +/** * radeon_vce_cs_parse - parse and validate the command stream * * @p: parser context @@ -477,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) */ int radeon_vce_cs_parse(struct radeon_cs_parser *p) { - uint32_t handle = 0; - bool destroy = false; + int session_idx = -1; + bool destroyed = false; + uint32_t tmp, handle = 0; + uint32_t *size = &tmp; int i, r; while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { @@ -490,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } + if (destroyed) { + DRM_ERROR("No other command allowed after destroy!\n"); + return -EINVAL; + } + switch (cmd) { case 0x00000001: // session handle = radeon_get_ib_value(p, p->idx + 2); + session_idx = radeon_vce_validate_handle(p, handle); + if (session_idx < 0) + return session_idx; + size = &p->rdev->vce.img_size[session_idx]; break; case 0x00000002: // task info + break; + case 0x01000001: // create + *size = radeon_get_ib_value(p, p->idx + 8) * + radeon_get_ib_value(p, p->idx + 10) * + 8 * 3 / 2; + break; + case 0x04000001: // config extension case 0x04000002: // pic control case 0x04000005: // rate control @@ -505,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) break; case 0x03000001: // encode - r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); + r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, + *size); if (r) return r; - r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); + r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, + *size / 3); if (r) return r; break; case 0x02000001: // destroy - destroy = true; + destroyed = true; break; case 0x05000001: // context buffer + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + *size * 2); + if (r) + return r; + break; + case 0x05000004: // video bitstream buffer + tmp = radeon_get_ib_value(p, p->idx + 4); + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + tmp); + if (r) + return r; + break; + case 0x05000005: // feedback buffer - r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + 4096); if (r) return r; break; @@ -531,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } + if (session_idx == -1) { + DRM_ERROR("no session command at start of IB\n"); + return -EINVAL; + } + p->idx += len / 4; } - if (destroy) { + if (destroyed) { /* IB contains a destroy msg, free the handle */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); - - return 0; - } - - /* create or encode, validate the handle */ - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (atomic_read(&p->rdev->vce.handles[i]) == handle) - return 0; } - /* handle not found try to alloc a new one */ - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { - p->rdev->vce.filp[i] = p->filp; - return 0; - } - } - - DRM_ERROR("No more free VCE handles!\n"); - return -EINVAL; + return 0; } /** diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 2aae6ce49d3..1f426696de3 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -130,10 +130,10 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, struct list_head *head) { struct radeon_cs_reloc *list; - unsigned i, idx, size; + unsigned i, idx; - size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_cs_reloc); - list = kmalloc(size, GFP_KERNEL); + list = kmalloc_array(vm->max_pde_used + 1, + sizeof(struct radeon_cs_reloc), GFP_KERNEL); if (!list) return NULL; @@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ndw = 64; /* assume the worst case */ - ndw += vm->max_pde_used * 12; + ndw += vm->max_pde_used * 16; /* update too big for an IB */ if (ndw > 0xfffff) diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index aca8cbe8a33..bbf2e076ee4 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c @@ -86,6 +86,7 @@ int rv770_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ac708e00618..22a63c98ba1 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5780,7 +5780,6 @@ int si_irq_set(struct radeon_device *rdev) u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; - u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 dma_cntl, dma_cntl1; u32 thermal_int = 0; @@ -5919,16 +5918,22 @@ int si_irq_set(struct radeon_device *rdev) } if (rdev->num_crtc >= 2) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 4) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (rdev->num_crtc >= 6) { - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, + GRPH_PFLIP_INT_MASK); } if (!ASIC_IS_NODCE(rdev)) { @@ -6292,6 +6297,15 @@ restart_ih: break; } break; + case 8: /* D1 page flip */ + case 10: /* D2 page flip */ + case 12: /* D3 page flip */ + case 14: /* D4 page flip */ + case 16: /* D5 page flip */ + case 18: /* D6 page flip */ + DRM_DEBUG("IH: D%d flip\n", ((src_id - 8) >> 1) + 1); + radeon_crtc_handle_flip(rdev, (src_id - 8) >> 1); + break; case 42: /* HPD hotplug */ switch (src_data) { case 0: diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index cf0fdad8c27..de0ca070122 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -213,6 +213,7 @@ int si_copy_dma(struct radeon_device *rdev, r = radeon_fence_emit(rdev, fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); + radeon_semaphore_free(rdev, &sem, NULL); return r; } diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 683532f8493..7321283602c 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -107,8 +107,8 @@ #define SPLL_CHG_STATUS (1 << 1) #define SPLL_CNTL_MODE 0x618 #define SPLL_SW_DIR_CONTROL (1 << 0) -# define SPLL_REFCLK_SEL(x) ((x) << 8) -# define SPLL_REFCLK_SEL_MASK 0xFF00 +# define SPLL_REFCLK_SEL(x) ((x) << 26) +# define SPLL_REFCLK_SEL_MASK (3 << 26) #define CG_SPLL_SPREAD_SPECTRUM 0x620 #define SSEN (1 << 0) diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index 0a243f0e5d6..be42c812520 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -83,7 +83,10 @@ int uvd_v1_0_init(struct radeon_device *rdev) int r; /* raise clocks while booting up the VCPU */ - radeon_set_uvd_clocks(rdev, 53300, 40000); + if (rdev->family < CHIP_RV740) + radeon_set_uvd_clocks(rdev, 10000, 10000); + else + radeon_set_uvd_clocks(rdev, 53300, 40000); r = uvd_v1_0_start(rdev); if (r) @@ -407,7 +410,10 @@ int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) struct radeon_fence *fence = NULL; int r; - r = radeon_set_uvd_clocks(rdev, 53300, 40000); + if (rdev->family < CHIP_RV740) + r = radeon_set_uvd_clocks(rdev, 10000, 10000); + else + r = radeon_set_uvd_clocks(rdev, 53300, 40000); if (r) { DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r); return r; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 36c717af6cf..edb871d7d39 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc) struct drm_device *drm = crtc->dev; struct drm_plane *plane; - list_for_each_entry(plane, &drm->mode_config.plane_list, head) { + drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) { if (plane->crtc == crtc) { tegra_plane_disable(plane); plane->crtc = NULL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 931490b9cfe..87df0b3674f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, SVGA3dCmdSurfaceDMA dma; } *cmd; int ret; + SVGA3dCmdSurfaceDMASuffix *suffix; + uint32_t bo_size; cmd = container_of(header, struct vmw_dma_cmd, header); + suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma + + header->size - sizeof(*suffix)); + + /* Make sure device and verifier stays in sync. */ + if (unlikely(suffix->suffixSize != sizeof(*suffix))) { + DRM_ERROR("Invalid DMA suffix size.\n"); + return -EINVAL; + } + ret = vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->dma.guest.ptr, &vmw_bo); if (unlikely(ret != 0)) return ret; + /* Make sure DMA doesn't cross BO boundaries. */ + bo_size = vmw_bo->base.num_pages * PAGE_SIZE; + if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) { + DRM_ERROR("Invalid DMA offset.\n"); + return -EINVAL; + } + + bo_size -= cmd->dma.guest.ptr.offset; + if (unlikely(suffix->maximumOffset > bo_size)) + suffix->maximumOffset = bo_size; + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, user_surface_converter, &cmd->dma.host.sid, NULL); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 10a2c086645..da52279de93 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1253,7 +1253,8 @@ EXPORT_SYMBOL_GPL(hid_output_report); static int hid_report_len(struct hid_report *report) { - return ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7; + /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */ + return ((report->size - 1) >> 3) + 1 + (report->id > 0); } /* @@ -1266,7 +1267,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) * of implement() working on 8 byte chunks */ - int len = hid_report_len(report); + int len = hid_report_len(report) + 7; return kmalloc(len, flags); } diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index c8af7202c28..34bb2205d2e 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -301,6 +301,9 @@ #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 +#define USB_VENDOR_ID_ELITEGROUP 0x03fc +#define USB_DEVICE_ID_ELITEGROUP_05D8 0x05d8 + #define USB_VENDOR_ID_ELO 0x04E7 #define USB_DEVICE_ID_ELO_TS2515 0x0022 #define USB_DEVICE_ID_ELO_TS2700 0x0020 @@ -834,6 +837,10 @@ #define USB_DEVICE_ID_SYNAPTICS_LTS2 0x1d10 #define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 #define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 +#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 + +#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 +#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 #define USB_VENDOR_ID_THINGM 0x27b8 #define USB_DEVICE_ID_BLINK1 0x01ed diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 35278e43c7a..51e25b9407f 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -1155,6 +1155,11 @@ static const struct hid_device_id mt_devices[] = { MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, + /* Elitegroup panel */ + { .driver_data = MT_CLS_SERIAL, + MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP, + USB_DEVICE_ID_ELITEGROUP_05D8) }, + /* Flatfrog Panels */ { .driver_data = MT_CLS_FLATFROG, MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index af8244b1c1f..be14b5690e9 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -708,6 +708,9 @@ static const struct hid_device_id sensor_hub_devices[] = { { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0, USB_DEVICE_ID_STM_HID_SENSOR), .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, + { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_TEXAS_INSTRUMENTS, + USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA), + .driver_data = HID_SENSOR_HUB_ENUM_QUIRK}, { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID, HID_ANY_ID) }, { } diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index dbd83878ff9..8e4ddb36988 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -119,6 +119,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_QUAD_HD, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP_V103, HID_QUIRK_NO_INIT_REPORTS }, { 0, 0 } }; diff --git a/drivers/hsi/Kconfig b/drivers/hsi/Kconfig index d94e38dd80c..2c76de438eb 100644 --- a/drivers/hsi/Kconfig +++ b/drivers/hsi/Kconfig @@ -14,6 +14,7 @@ config HSI_BOARDINFO bool default y +source "drivers/hsi/controllers/Kconfig" source "drivers/hsi/clients/Kconfig" endif # HSI diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile index 9d5d33f90de..360371e134f 100644 --- a/drivers/hsi/Makefile +++ b/drivers/hsi/Makefile @@ -3,4 +3,5 @@ # obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o obj-$(CONFIG_HSI) += hsi.o +obj-y += controllers/ obj-y += clients/ diff --git a/drivers/hsi/clients/Kconfig b/drivers/hsi/clients/Kconfig index 3bacd275f47..71b9f9ab86e 100644 --- a/drivers/hsi/clients/Kconfig +++ b/drivers/hsi/clients/Kconfig @@ -4,6 +4,23 @@ comment "HSI clients" +config NOKIA_MODEM + tristate "Nokia Modem" + depends on HSI && SSI_PROTOCOL + help + Say Y here if you want to add support for the modem on Nokia + N900 (Nokia RX-51) hardware. + + If unsure, say N. + +config SSI_PROTOCOL + tristate "SSI protocol" + depends on HSI && PHONET && (OMAP_SSI=y || OMAP_SSI=m) + help + If you say Y here, you will enable the SSI protocol aka McSAAB. + + If unsure, say N. + config HSI_CHAR tristate "HSI/SSI character driver" depends on HSI diff --git a/drivers/hsi/clients/Makefile b/drivers/hsi/clients/Makefile index 327c0e27c8b..4d5bc0e0b27 100644 --- a/drivers/hsi/clients/Makefile +++ b/drivers/hsi/clients/Makefile @@ -2,4 +2,6 @@ # Makefile for HSI clients # -obj-$(CONFIG_HSI_CHAR) += hsi_char.o +obj-$(CONFIG_NOKIA_MODEM) += nokia-modem.o +obj-$(CONFIG_SSI_PROTOCOL) += ssi_protocol.o +obj-$(CONFIG_HSI_CHAR) += hsi_char.o diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c index e61e5f991aa..57f70c28fa3 100644 --- a/drivers/hsi/clients/hsi_char.c +++ b/drivers/hsi/clients/hsi_char.c @@ -367,7 +367,7 @@ static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc) return -EINVAL; tmp = cl->rx_cfg; cl->rx_cfg.mode = rxc->mode; - cl->rx_cfg.channels = rxc->channels; + cl->rx_cfg.num_hw_channels = rxc->channels; cl->rx_cfg.flow = rxc->flow; ret = hsi_setup(cl); if (ret < 0) { @@ -383,7 +383,7 @@ static int hsc_rx_set(struct hsi_client *cl, struct hsc_rx_config *rxc) static inline void hsc_rx_get(struct hsi_client *cl, struct hsc_rx_config *rxc) { rxc->mode = cl->rx_cfg.mode; - rxc->channels = cl->rx_cfg.channels; + rxc->channels = cl->rx_cfg.num_hw_channels; rxc->flow = cl->rx_cfg.flow; } @@ -402,7 +402,7 @@ static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc) return -EINVAL; tmp = cl->tx_cfg; cl->tx_cfg.mode = txc->mode; - cl->tx_cfg.channels = txc->channels; + cl->tx_cfg.num_hw_channels = txc->channels; cl->tx_cfg.speed = txc->speed; cl->tx_cfg.arb_mode = txc->arb_mode; ret = hsi_setup(cl); @@ -417,7 +417,7 @@ static int hsc_tx_set(struct hsi_client *cl, struct hsc_tx_config *txc) static inline void hsc_tx_get(struct hsi_client *cl, struct hsc_tx_config *txc) { txc->mode = cl->tx_cfg.mode; - txc->channels = cl->tx_cfg.channels; + txc->channels = cl->tx_cfg.num_hw_channels; txc->speed = cl->tx_cfg.speed; txc->arb_mode = cl->tx_cfg.arb_mode; } @@ -435,7 +435,7 @@ static ssize_t hsc_read(struct file *file, char __user *buf, size_t len, return -EINVAL; if (len > max_data_size) len = max_data_size; - if (channel->ch >= channel->cl->rx_cfg.channels) + if (channel->ch >= channel->cl->rx_cfg.num_hw_channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_READ, &channel->flags)) return -EBUSY; @@ -492,7 +492,7 @@ static ssize_t hsc_write(struct file *file, const char __user *buf, size_t len, return -EINVAL; if (len > max_data_size) len = max_data_size; - if (channel->ch >= channel->cl->tx_cfg.channels) + if (channel->ch >= channel->cl->tx_cfg.num_hw_channels) return -ECHRNG; if (test_and_set_bit(HSC_CH_WRITE, &channel->flags)) return -EBUSY; @@ -705,7 +705,7 @@ static int hsc_probe(struct device *dev) if (!hsc_major) { ret = alloc_chrdev_region(&hsc_dev, hsc_baseminor, HSC_DEVS, devname); - if (ret > 0) + if (ret == 0) hsc_major = MAJOR(hsc_dev); } else { hsc_dev = MKDEV(hsc_major, hsc_baseminor); diff --git a/drivers/hsi/clients/nokia-modem.c b/drivers/hsi/clients/nokia-modem.c new file mode 100644 index 00000000000..363b780dace --- /dev/null +++ b/drivers/hsi/clients/nokia-modem.c @@ -0,0 +1,285 @@ +/* + * nokia-modem.c + * + * HSI client driver for Nokia N900 modem. + * + * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/gpio/consumer.h> +#include <linux/hsi/hsi.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_gpio.h> +#include <linux/hsi/ssi_protocol.h> + +static unsigned int pm; +module_param(pm, int, 0400); +MODULE_PARM_DESC(pm, + "Enable power management (0=disabled, 1=userland based [default])"); + +struct nokia_modem_gpio { + struct gpio_desc *gpio; + const char *name; +}; + +struct nokia_modem_device { + struct tasklet_struct nokia_modem_rst_ind_tasklet; + int nokia_modem_rst_ind_irq; + struct device *device; + struct nokia_modem_gpio *gpios; + int gpio_amount; + struct hsi_client *ssi_protocol; +}; + +static void do_nokia_modem_rst_ind_tasklet(unsigned long data) +{ + struct nokia_modem_device *modem = (struct nokia_modem_device *)data; + + if (!modem) + return; + + dev_info(modem->device, "CMT rst line change detected\n"); + + if (modem->ssi_protocol) + ssip_reset_event(modem->ssi_protocol); +} + +static irqreturn_t nokia_modem_rst_ind_isr(int irq, void *data) +{ + struct nokia_modem_device *modem = (struct nokia_modem_device *)data; + + tasklet_schedule(&modem->nokia_modem_rst_ind_tasklet); + + return IRQ_HANDLED; +} + +static void nokia_modem_gpio_unexport(struct device *dev) +{ + struct nokia_modem_device *modem = dev_get_drvdata(dev); + int i; + + for (i = 0; i < modem->gpio_amount; i++) { + sysfs_remove_link(&dev->kobj, modem->gpios[i].name); + gpiod_unexport(modem->gpios[i].gpio); + } +} + +static int nokia_modem_gpio_probe(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct nokia_modem_device *modem = dev_get_drvdata(dev); + int gpio_count, gpio_name_count, i, err; + + gpio_count = of_gpio_count(np); + + if (gpio_count < 0) { + dev_err(dev, "missing gpios: %d\n", gpio_count); + return gpio_count; + } + + gpio_name_count = of_property_count_strings(np, "gpio-names"); + + if (gpio_count != gpio_name_count) { + dev_err(dev, "number of gpios does not equal number of gpio names\n"); + return -EINVAL; + } + + modem->gpios = devm_kzalloc(dev, gpio_count * + sizeof(struct nokia_modem_gpio), GFP_KERNEL); + if (!modem->gpios) { + dev_err(dev, "Could not allocate memory for gpios\n"); + return -ENOMEM; + } + + modem->gpio_amount = gpio_count; + + for (i = 0; i < gpio_count; i++) { + modem->gpios[i].gpio = devm_gpiod_get_index(dev, NULL, i); + if (IS_ERR(modem->gpios[i].gpio)) { + dev_err(dev, "Could not get gpio %d\n", i); + return PTR_ERR(modem->gpios[i].gpio); + } + + err = of_property_read_string_index(np, "gpio-names", i, + &(modem->gpios[i].name)); + if (err) { + dev_err(dev, "Could not get gpio name %d\n", i); + return err; + } + + err = gpiod_direction_output(modem->gpios[i].gpio, 0); + if (err) + return err; + + err = gpiod_export(modem->gpios[i].gpio, 0); + if (err) + return err; + + err = gpiod_export_link(dev, modem->gpios[i].name, + modem->gpios[i].gpio); + if (err) + return err; + } + + return 0; +} + +static int nokia_modem_probe(struct device *dev) +{ + struct device_node *np; + struct nokia_modem_device *modem; + struct hsi_client *cl = to_hsi_client(dev); + struct hsi_port *port = hsi_get_port(cl); + int irq, pflags, err; + struct hsi_board_info ssip; + + np = dev->of_node; + if (!np) { + dev_err(dev, "device tree node not found\n"); + return -ENXIO; + } + + modem = devm_kzalloc(dev, sizeof(*modem), GFP_KERNEL); + if (!modem) { + dev_err(dev, "Could not allocate memory for nokia_modem_device\n"); + return -ENOMEM; + } + dev_set_drvdata(dev, modem); + + irq = irq_of_parse_and_map(np, 0); + if (irq < 0) { + dev_err(dev, "Invalid rst_ind interrupt (%d)\n", irq); + return irq; + } + modem->nokia_modem_rst_ind_irq = irq; + pflags = irq_get_trigger_type(irq); + + tasklet_init(&modem->nokia_modem_rst_ind_tasklet, + do_nokia_modem_rst_ind_tasklet, (unsigned long)modem); + err = devm_request_irq(dev, irq, nokia_modem_rst_ind_isr, + IRQF_DISABLED | pflags, "modem_rst_ind", modem); + if (err < 0) { + dev_err(dev, "Request rst_ind irq(%d) failed (flags %d)\n", + irq, pflags); + return err; + } + enable_irq_wake(irq); + + if(pm) { + err = nokia_modem_gpio_probe(dev); + if (err < 0) { + dev_err(dev, "Could not probe GPIOs\n"); + goto error1; + } + } + + ssip.name = "ssi-protocol"; + ssip.tx_cfg = cl->tx_cfg; + ssip.rx_cfg = cl->rx_cfg; + ssip.platform_data = NULL; + ssip.archdata = NULL; + + modem->ssi_protocol = hsi_new_client(port, &ssip); + if (!modem->ssi_protocol) { + dev_err(dev, "Could not register ssi-protocol device\n"); + goto error2; + } + + err = device_attach(&modem->ssi_protocol->device); + if (err == 0) { + dev_err(dev, "Missing ssi-protocol driver\n"); + err = -EPROBE_DEFER; + goto error3; + } else if (err < 0) { + dev_err(dev, "Could not load ssi-protocol driver (%d)\n", err); + goto error3; + } + + /* TODO: register cmt-speech hsi client */ + + dev_info(dev, "Registered Nokia HSI modem\n"); + + return 0; + +error3: + hsi_remove_client(&modem->ssi_protocol->device, NULL); +error2: + nokia_modem_gpio_unexport(dev); +error1: + disable_irq_wake(modem->nokia_modem_rst_ind_irq); + tasklet_kill(&modem->nokia_modem_rst_ind_tasklet); + + return err; +} + +static int nokia_modem_remove(struct device *dev) +{ + struct nokia_modem_device *modem = dev_get_drvdata(dev); + + if (!modem) + return 0; + + if (modem->ssi_protocol) { + hsi_remove_client(&modem->ssi_protocol->device, NULL); + modem->ssi_protocol = NULL; + } + + nokia_modem_gpio_unexport(dev); + dev_set_drvdata(dev, NULL); + disable_irq_wake(modem->nokia_modem_rst_ind_irq); + tasklet_kill(&modem->nokia_modem_rst_ind_tasklet); + + return 0; +} + +#ifdef CONFIG_OF +static const struct of_device_id nokia_modem_of_match[] = { + { .compatible = "nokia,n900-modem", }, + {}, +}; +MODULE_DEVICE_TABLE(of, nokia_modem_of_match); +#endif + +static struct hsi_client_driver nokia_modem_driver = { + .driver = { + .name = "nokia-modem", + .owner = THIS_MODULE, + .probe = nokia_modem_probe, + .remove = nokia_modem_remove, + .of_match_table = of_match_ptr(nokia_modem_of_match), + }, +}; + +static int __init nokia_modem_init(void) +{ + return hsi_register_client_driver(&nokia_modem_driver); +} +module_init(nokia_modem_init); + +static void __exit nokia_modem_exit(void) +{ + hsi_unregister_client_driver(&nokia_modem_driver); +} +module_exit(nokia_modem_exit); + +MODULE_ALIAS("hsi:nokia-modem"); +MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); +MODULE_DESCRIPTION("HSI driver module for Nokia N900 Modem"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c new file mode 100644 index 00000000000..ce4be3738d4 --- /dev/null +++ b/drivers/hsi/clients/ssi_protocol.c @@ -0,0 +1,1191 @@ +/* + * ssi_protocol.c + * + * Implementation of the SSI McSAAB improved protocol. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org> + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/atomic.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/if_ether.h> +#include <linux/if_arp.h> +#include <linux/if_phonet.h> +#include <linux/init.h> +#include <linux/irq.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/notifier.h> +#include <linux/scatterlist.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/hsi/hsi.h> +#include <linux/hsi/ssi_protocol.h> + +void ssi_waketest(struct hsi_client *cl, unsigned int enable); + +#define SSIP_TXQUEUE_LEN 100 +#define SSIP_MAX_MTU 65535 +#define SSIP_DEFAULT_MTU 4000 +#define PN_MEDIA_SOS 21 +#define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */ +#define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */ +#define SSIP_KATOUT 15 /* 15 msecs */ +#define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */ +#define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) +#define SSIP_CMT_LOADER_SYNC 0x11223344 +/* + * SSI protocol command definitions + */ +#define SSIP_COMMAND(data) ((data) >> 28) +#define SSIP_PAYLOAD(data) ((data) & 0xfffffff) +/* Commands */ +#define SSIP_SW_BREAK 0 +#define SSIP_BOOTINFO_REQ 1 +#define SSIP_BOOTINFO_RESP 2 +#define SSIP_WAKETEST_RESULT 3 +#define SSIP_START_TRANS 4 +#define SSIP_READY 5 +/* Payloads */ +#define SSIP_DATA_VERSION(data) ((data) & 0xff) +#define SSIP_LOCAL_VERID 1 +#define SSIP_WAKETEST_OK 0 +#define SSIP_WAKETEST_FAILED 1 +#define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff) +#define SSIP_MSG_ID(data) ((data) & 0xff) +/* Generic Command */ +#define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff)) +/* Commands for the control channel */ +#define SSIP_BOOTINFO_REQ_CMD(ver) \ + SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver)) +#define SSIP_BOOTINFO_RESP_CMD(ver) \ + SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver)) +#define SSIP_START_TRANS_CMD(pdulen, id) \ + SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id))) +#define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) +#define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) + +/* Main state machine states */ +enum { + INIT, + HANDSHAKE, + ACTIVE, +}; + +/* Send state machine states */ +enum { + SEND_IDLE, + WAIT4READY, + SEND_READY, + SENDING, + SENDING_SWBREAK, +}; + +/* Receive state machine states */ +enum { + RECV_IDLE, + RECV_READY, + RECEIVING, +}; + +/** + * struct ssi_protocol - SSI protocol (McSAAB) data + * @main_state: Main state machine + * @send_state: TX state machine + * @recv_state: RX state machine + * @waketest: Flag to follow wake line test + * @rxid: RX data id + * @txid: TX data id + * @txqueue_len: TX queue length + * @tx_wd: TX watchdog + * @rx_wd: RX watchdog + * @keep_alive: Workaround for SSI HW bug + * @lock: To serialize access to this struct + * @netdev: Phonet network device + * @txqueue: TX data queue + * @cmdqueue: Queue of free commands + * @cl: HSI client own reference + * @link: Link for ssip_list + * @tx_usecount: Refcount to keep track the slaves that use the wake line + * @channel_id_cmd: HSI channel id for command stream + * @channel_id_data: HSI channel id for data stream + */ +struct ssi_protocol { + unsigned int main_state; + unsigned int send_state; + unsigned int recv_state; + unsigned int waketest:1; + u8 rxid; + u8 txid; + unsigned int txqueue_len; + struct timer_list tx_wd; + struct timer_list rx_wd; + struct timer_list keep_alive; /* wake-up workaround */ + spinlock_t lock; + struct net_device *netdev; + struct list_head txqueue; + struct list_head cmdqueue; + struct hsi_client *cl; + struct list_head link; + atomic_t tx_usecnt; + int channel_id_cmd; + int channel_id_data; +}; + +/* List of ssi protocol instances */ +static LIST_HEAD(ssip_list); + +static void ssip_rxcmd_complete(struct hsi_msg *msg); + +static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd) +{ + u32 *data; + + data = sg_virt(msg->sgt.sgl); + *data = cmd; +} + +static inline u32 ssip_get_cmd(struct hsi_msg *msg) +{ + u32 *data; + + data = sg_virt(msg->sgt.sgl); + + return *data; +} + +static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) +{ + skb_frag_t *frag; + struct scatterlist *sg; + int i; + + BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); + + sg = msg->sgt.sgl; + sg_set_buf(sg, skb->data, skb_headlen(skb)); + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + sg = sg_next(sg); + BUG_ON(!sg); + frag = &skb_shinfo(skb)->frags[i]; + sg_set_page(sg, frag->page.p, frag->size, frag->page_offset); + } +} + +static void ssip_free_data(struct hsi_msg *msg) +{ + struct sk_buff *skb; + + skb = msg->context; + pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, + skb); + msg->destructor = NULL; + dev_kfree_skb(skb); + hsi_free_msg(msg); +} + +static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi, + struct sk_buff *skb, gfp_t flags) +{ + struct hsi_msg *msg; + + msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); + if (!msg) + return NULL; + ssip_skb_to_msg(skb, msg); + msg->destructor = ssip_free_data; + msg->channel = ssi->channel_id_data; + msg->context = skb; + + return msg; +} + +static inline void ssip_release_cmd(struct hsi_msg *msg) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); + + dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); + spin_lock_bh(&ssi->lock); + list_add_tail(&msg->link, &ssi->cmdqueue); + spin_unlock_bh(&ssi->lock); +} + +static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg; + + BUG_ON(list_empty(&ssi->cmdqueue)); + + spin_lock_bh(&ssi->lock); + msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); + list_del(&msg->link); + spin_unlock_bh(&ssi->lock); + msg->destructor = ssip_release_cmd; + + return msg; +} + +static void ssip_free_cmds(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg, *tmp; + + list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { + list_del(&msg->link); + msg->destructor = NULL; + kfree(sg_virt(msg->sgt.sgl)); + hsi_free_msg(msg); + } +} + +static int ssip_alloc_cmds(struct ssi_protocol *ssi) +{ + struct hsi_msg *msg; + u32 *buf; + unsigned int i; + + for (i = 0; i < SSIP_MAX_CMDS; i++) { + msg = hsi_alloc_msg(1, GFP_KERNEL); + if (!msg) + goto out; + buf = kmalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) { + hsi_free_msg(msg); + goto out; + } + sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); + msg->channel = ssi->channel_id_cmd; + list_add_tail(&msg->link, &ssi->cmdqueue); + } + + return 0; +out: + ssip_free_cmds(ssi); + + return -ENOMEM; +} + +static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) +{ + ssi->recv_state = state; + switch (state) { + case RECV_IDLE: + del_timer(&ssi->rx_wd); + if (ssi->send_state == SEND_IDLE) + del_timer(&ssi->keep_alive); + break; + case RECV_READY: + /* CMT speech workaround */ + if (atomic_read(&ssi->tx_usecnt)) + break; + /* Otherwise fall through */ + case RECEIVING: + mod_timer(&ssi->keep_alive, jiffies + + msecs_to_jiffies(SSIP_KATOUT)); + mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + break; + default: + break; + } +} + +static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state) +{ + ssi->send_state = state; + switch (state) { + case SEND_IDLE: + case SEND_READY: + del_timer(&ssi->tx_wd); + if (ssi->recv_state == RECV_IDLE) + del_timer(&ssi->keep_alive); + break; + case WAIT4READY: + case SENDING: + case SENDING_SWBREAK: + mod_timer(&ssi->keep_alive, + jiffies + msecs_to_jiffies(SSIP_KATOUT)); + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + break; + default: + break; + } +} + +struct hsi_client *ssip_slave_get_master(struct hsi_client *slave) +{ + struct hsi_client *master = ERR_PTR(-ENODEV); + struct ssi_protocol *ssi; + + list_for_each_entry(ssi, &ssip_list, link) + if (slave->device.parent == ssi->cl->device.parent) { + master = ssi->cl; + break; + } + + return master; +} +EXPORT_SYMBOL_GPL(ssip_slave_get_master); + +int ssip_slave_start_tx(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + + dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt)); + spin_lock_bh(&ssi->lock); + if (ssi->send_state == SEND_IDLE) { + ssip_set_txstate(ssi, WAIT4READY); + hsi_start_tx(master); + } + spin_unlock_bh(&ssi->lock); + atomic_inc(&ssi->tx_usecnt); + + return 0; +} +EXPORT_SYMBOL_GPL(ssip_slave_start_tx); + +int ssip_slave_stop_tx(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + + WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0); + + if (atomic_dec_and_test(&ssi->tx_usecnt)) { + spin_lock_bh(&ssi->lock); + if ((ssi->send_state == SEND_READY) || + (ssi->send_state == WAIT4READY)) { + ssip_set_txstate(ssi, SEND_IDLE); + hsi_stop_tx(master); + } + spin_unlock_bh(&ssi->lock); + } + dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt)); + + return 0; +} +EXPORT_SYMBOL_GPL(ssip_slave_stop_tx); + +int ssip_slave_running(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + return netif_running(ssi->netdev); +} +EXPORT_SYMBOL_GPL(ssip_slave_running); + +static void ssip_reset(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct list_head *head, *tmp; + struct hsi_msg *msg; + + if (netif_running(ssi->netdev)) + netif_carrier_off(ssi->netdev); + hsi_flush(cl); + spin_lock_bh(&ssi->lock); + if (ssi->send_state != SEND_IDLE) + hsi_stop_tx(cl); + if (ssi->waketest) + ssi_waketest(cl, 0); + del_timer(&ssi->rx_wd); + del_timer(&ssi->tx_wd); + del_timer(&ssi->keep_alive); + ssi->main_state = 0; + ssi->send_state = 0; + ssi->recv_state = 0; + ssi->waketest = 0; + ssi->rxid = 0; + ssi->txid = 0; + list_for_each_safe(head, tmp, &ssi->txqueue) { + msg = list_entry(head, struct hsi_msg, link); + dev_dbg(&cl->device, "Pending TX data\n"); + list_del(head); + ssip_free_data(msg); + } + ssi->txqueue_len = 0; + spin_unlock_bh(&ssi->lock); +} + +static void ssip_dump_state(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + spin_lock_bh(&ssi->lock); + dev_err(&cl->device, "Main state: %d\n", ssi->main_state); + dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state); + dev_err(&cl->device, "Send state: %d\n", ssi->send_state); + dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? + "Online" : "Offline"); + dev_err(&cl->device, "Wake test %d\n", ssi->waketest); + dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); + dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); + + list_for_each_entry(msg, &ssi->txqueue, link) + dev_err(&cl->device, "pending TX data (%p)\n", msg); + spin_unlock_bh(&ssi->lock); +} + +static void ssip_error(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + ssip_dump_state(cl); + ssip_reset(cl); + msg = ssip_claim_cmd(ssi); + msg->complete = ssip_rxcmd_complete; + hsi_async_read(cl, msg); +} + +static void ssip_keep_alive(unsigned long data) +{ + struct hsi_client *cl = (struct hsi_client *)data; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", + ssi->main_state, ssi->recv_state, ssi->send_state); + + spin_lock(&ssi->lock); + if (ssi->recv_state == RECV_IDLE) + switch (ssi->send_state) { + case SEND_READY: + if (atomic_read(&ssi->tx_usecnt) == 0) + break; + /* + * Fall through. Workaround for cmt-speech + * in that case we relay on audio timers. + */ + case SEND_IDLE: + spin_unlock(&ssi->lock); + return; + } + mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); + spin_unlock(&ssi->lock); +} + +static void ssip_wd(unsigned long data) +{ + struct hsi_client *cl = (struct hsi_client *)data; + + dev_err(&cl->device, "Watchdog trigerred\n"); + ssip_error(cl); +} + +static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n"); + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID)); + msg->complete = ssip_release_cmd; + hsi_async_write(cl, msg); + dev_dbg(&cl->device, "Issuing RX command\n"); + msg = ssip_claim_cmd(ssi); + msg->complete = ssip_rxcmd_complete; + hsi_async_read(cl, msg); +} + +static void ssip_start_rx(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, + ssi->recv_state); + spin_lock(&ssi->lock); + /* + * We can have two UP events in a row due to a short low + * high transition. Therefore we need to ignore the sencond UP event. + */ + if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { + if (ssi->main_state == INIT) { + ssi->main_state = HANDSHAKE; + spin_unlock(&ssi->lock); + ssip_send_bootinfo_req_cmd(cl); + } else { + spin_unlock(&ssi->lock); + } + return; + } + ssip_set_rxstate(ssi, RECV_READY); + spin_unlock(&ssi->lock); + + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_READY_CMD); + msg->complete = ssip_release_cmd; + dev_dbg(&cl->device, "Send READY\n"); + hsi_async_write(cl, msg); +} + +static void ssip_stop_rx(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); + spin_lock(&ssi->lock); + if (likely(ssi->main_state == ACTIVE)) + ssip_set_rxstate(ssi, RECV_IDLE); + spin_unlock(&ssi->lock); +} + +static void ssip_free_strans(struct hsi_msg *msg) +{ + ssip_free_data(msg->context); + ssip_release_cmd(msg); +} + +static void ssip_strans_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *data; + + data = msg->context; + ssip_release_cmd(msg); + spin_lock(&ssi->lock); + ssip_set_txstate(ssi, SENDING); + spin_unlock(&ssi->lock); + hsi_async_write(cl, data); +} + +static int ssip_xmit(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg, *dmsg; + struct sk_buff *skb; + + spin_lock_bh(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + spin_unlock_bh(&ssi->lock); + return 0; + } + dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link); + list_del(&dmsg->link); + ssi->txqueue_len--; + spin_unlock_bh(&ssi->lock); + + msg = ssip_claim_cmd(ssi); + skb = dmsg->context; + msg->context = dmsg; + msg->complete = ssip_strans_complete; + msg->destructor = ssip_free_strans; + + spin_lock_bh(&ssi->lock); + ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), + ssi->txid)); + ssi->txid++; + ssip_set_txstate(ssi, SENDING); + spin_unlock_bh(&ssi->lock); + + dev_dbg(&cl->device, "Send STRANS (%d frames)\n", + SSIP_BYTES_TO_FRAMES(skb->len)); + + return hsi_async_write(cl, msg); +} + +/* In soft IRQ context */ +static void ssip_pn_rx(struct sk_buff *skb) +{ + struct net_device *dev = skb->dev; + + if (unlikely(!netif_running(dev))) { + dev_dbg(&dev->dev, "Drop RX packet\n"); + dev->stats.rx_dropped++; + dev_kfree_skb(skb); + return; + } + if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) { + dev_dbg(&dev->dev, "Error drop RX packet\n"); + dev->stats.rx_errors++; + dev->stats.rx_length_errors++; + dev_kfree_skb(skb); + return; + } + dev->stats.rx_packets++; + dev->stats.rx_bytes += skb->len; + + /* length field is exchanged in network byte order */ + ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]); + dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n", + ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); + + skb->protocol = htons(ETH_P_PHONET); + skb_reset_mac_header(skb); + __skb_pull(skb, 1); + netif_rx(skb); +} + +static void ssip_rx_data_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct sk_buff *skb; + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "RX data error\n"); + ssip_free_data(msg); + ssip_error(cl); + return; + } + del_timer(&ssi->rx_wd); /* FIXME: Revisit */ + skb = msg->context; + ssip_pn_rx(skb); + hsi_free_msg(msg); +} + +static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + /* Workaroud: Ignore CMT Loader message leftover */ + if (cmd == SSIP_CMT_LOADER_SYNC) + return; + + switch (ssi->main_state) { + case ACTIVE: + dev_err(&cl->device, "Boot info req on active state\n"); + ssip_error(cl); + /* Fall through */ + case INIT: + spin_lock(&ssi->lock); + ssi->main_state = HANDSHAKE; + if (!ssi->waketest) { + ssi->waketest = 1; + ssi_waketest(cl, 1); /* FIXME: To be removed */ + } + /* Start boot handshake watchdog */ + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + spin_unlock(&ssi->lock); + dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); + if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) + dev_warn(&cl->device, "boot info req verid mismatch\n"); + msg = ssip_claim_cmd(ssi); + ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID)); + msg->complete = ssip_release_cmd; + hsi_async_write(cl, msg); + break; + case HANDSHAKE: + /* Ignore */ + break; + default: + dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state); + break; + } +} + +static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) + dev_warn(&cl->device, "boot info resp verid mismatch\n"); + + spin_lock(&ssi->lock); + if (ssi->main_state != ACTIVE) + /* Use tx_wd as a boot watchdog in non ACTIVE state */ + mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); + else + dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", + ssi->main_state); + spin_unlock(&ssi->lock); +} + +static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + unsigned int wkres = SSIP_PAYLOAD(cmd); + + spin_lock(&ssi->lock); + if (ssi->main_state != HANDSHAKE) { + dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", + ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + if (ssi->waketest) { + ssi->waketest = 0; + ssi_waketest(cl, 0); /* FIXME: To be removed */ + } + ssi->main_state = ACTIVE; + del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ + spin_unlock(&ssi->lock); + + dev_notice(&cl->device, "WAKELINES TEST %s\n", + wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); + if (wkres & SSIP_WAKETEST_FAILED) { + ssip_error(cl); + return; + } + dev_dbg(&cl->device, "CMT is ONLINE\n"); + netif_wake_queue(ssi->netdev); + netif_carrier_on(ssi->netdev); +} + +static void ssip_rx_ready(struct hsi_client *cl) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + spin_lock(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", + ssi->send_state, ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + if (ssi->send_state != WAIT4READY) { + dev_dbg(&cl->device, "Ignore spurious READY command\n"); + spin_unlock(&ssi->lock); + return; + } + ssip_set_txstate(ssi, SEND_READY); + spin_unlock(&ssi->lock); + ssip_xmit(cl); +} + +static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct sk_buff *skb; + struct hsi_msg *msg; + int len = SSIP_PDU_LENGTH(cmd); + + dev_dbg(&cl->device, "RX strans: %d frames\n", len); + spin_lock(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", + ssi->send_state, ssi->main_state); + spin_unlock(&ssi->lock); + return; + } + ssip_set_rxstate(ssi, RECEIVING); + if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { + dev_err(&cl->device, "START TRANS id %d expeceted %d\n", + SSIP_MSG_ID(cmd), ssi->rxid); + spin_unlock(&ssi->lock); + goto out1; + } + ssi->rxid++; + spin_unlock(&ssi->lock); + skb = netdev_alloc_skb(ssi->netdev, len * 4); + if (unlikely(!skb)) { + dev_err(&cl->device, "No memory for rx skb\n"); + goto out1; + } + skb->dev = ssi->netdev; + skb_put(skb, len * 4); + msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); + if (unlikely(!msg)) { + dev_err(&cl->device, "No memory for RX data msg\n"); + goto out2; + } + msg->complete = ssip_rx_data_complete; + hsi_async_read(cl, msg); + + return; +out2: + dev_kfree_skb(skb); +out1: + ssip_error(cl); +} + +static void ssip_rxcmd_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + u32 cmd = ssip_get_cmd(msg); + unsigned int cmdid = SSIP_COMMAND(cmd); + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "RX error detected\n"); + ssip_release_cmd(msg); + ssip_error(cl); + return; + } + hsi_async_read(cl, msg); + dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd); + switch (cmdid) { + case SSIP_SW_BREAK: + /* Ignored */ + break; + case SSIP_BOOTINFO_REQ: + ssip_rx_bootinforeq(cl, cmd); + break; + case SSIP_BOOTINFO_RESP: + ssip_rx_bootinforesp(cl, cmd); + break; + case SSIP_WAKETEST_RESULT: + ssip_rx_waketest(cl, cmd); + break; + case SSIP_START_TRANS: + ssip_rx_strans(cl, cmd); + break; + case SSIP_READY: + ssip_rx_ready(cl); + break; + default: + dev_warn(&cl->device, "command 0x%08x not supported\n", cmd); + break; + } +} + +static void ssip_swbreak_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + ssip_release_cmd(msg); + spin_lock(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + if (atomic_read(&ssi->tx_usecnt)) { + ssip_set_txstate(ssi, SEND_READY); + } else { + ssip_set_txstate(ssi, SEND_IDLE); + hsi_stop_tx(cl); + } + spin_unlock(&ssi->lock); + } else { + spin_unlock(&ssi->lock); + ssip_xmit(cl); + } + netif_wake_queue(ssi->netdev); +} + +static void ssip_tx_data_complete(struct hsi_msg *msg) +{ + struct hsi_client *cl = msg->cl; + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *cmsg; + + if (msg->status == HSI_STATUS_ERROR) { + dev_err(&cl->device, "TX data error\n"); + ssip_error(cl); + goto out; + } + spin_lock(&ssi->lock); + if (list_empty(&ssi->txqueue)) { + ssip_set_txstate(ssi, SENDING_SWBREAK); + spin_unlock(&ssi->lock); + cmsg = ssip_claim_cmd(ssi); + ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); + cmsg->complete = ssip_swbreak_complete; + dev_dbg(&cl->device, "Send SWBREAK\n"); + hsi_async_write(cl, cmsg); + } else { + spin_unlock(&ssi->lock); + ssip_xmit(cl); + } +out: + ssip_free_data(msg); +} + +void ssip_port_event(struct hsi_client *cl, unsigned long event) +{ + switch (event) { + case HSI_EVENT_START_RX: + ssip_start_rx(cl); + break; + case HSI_EVENT_STOP_RX: + ssip_stop_rx(cl); + break; + default: + return; + } +} + +static int ssip_pn_open(struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + int err; + + err = hsi_claim_port(cl, 1); + if (err < 0) { + dev_err(&cl->device, "SSI port already claimed\n"); + return err; + } + err = hsi_register_port_event(cl, ssip_port_event); + if (err < 0) { + dev_err(&cl->device, "Register HSI port event failed (%d)\n", + err); + return err; + } + dev_dbg(&cl->device, "Configuring SSI port\n"); + hsi_setup(cl); + spin_lock_bh(&ssi->lock); + if (!ssi->waketest) { + ssi->waketest = 1; + ssi_waketest(cl, 1); /* FIXME: To be removed */ + } + ssi->main_state = INIT; + spin_unlock_bh(&ssi->lock); + + return 0; +} + +static int ssip_pn_stop(struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + + ssip_reset(cl); + hsi_unregister_port_event(cl); + hsi_release_port(cl); + + return 0; +} + +static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU) + return -EINVAL; + dev->mtu = new_mtu; + + return 0; +} + +static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev->dev.parent); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct hsi_msg *msg; + + if ((skb->protocol != htons(ETH_P_PHONET)) || + (skb->len < SSIP_MIN_PN_HDR)) + goto drop; + /* Pad to 32-bits - FIXME: Revisit*/ + if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) + goto drop; + + /* + * Modem sends Phonet messages over SSI with its own endianess... + * Assume that modem has the same endianess as we do. + */ + if (skb_cow_head(skb, 0)) + goto drop; + + /* length field is exchanged in network byte order */ + ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]); + + msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); + if (!msg) { + dev_dbg(&cl->device, "Dropping tx data: No memory\n"); + goto drop; + } + msg->complete = ssip_tx_data_complete; + + spin_lock_bh(&ssi->lock); + if (unlikely(ssi->main_state != ACTIVE)) { + spin_unlock_bh(&ssi->lock); + dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n"); + goto drop2; + } + list_add_tail(&msg->link, &ssi->txqueue); + ssi->txqueue_len++; + if (dev->tx_queue_len < ssi->txqueue_len) { + dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len); + netif_stop_queue(dev); + } + if (ssi->send_state == SEND_IDLE) { + ssip_set_txstate(ssi, WAIT4READY); + spin_unlock_bh(&ssi->lock); + dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len); + hsi_start_tx(cl); + } else if (ssi->send_state == SEND_READY) { + /* Needed for cmt-speech workaround */ + dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", + ssi->txqueue_len); + spin_unlock_bh(&ssi->lock); + ssip_xmit(cl); + } else { + spin_unlock_bh(&ssi->lock); + } + dev->stats.tx_packets++; + dev->stats.tx_bytes += skb->len; + + return 0; +drop2: + hsi_free_msg(msg); +drop: + dev->stats.tx_dropped++; + dev_kfree_skb(skb); + + return 0; +} + +/* CMT reset event handler */ +void ssip_reset_event(struct hsi_client *master) +{ + struct ssi_protocol *ssi = hsi_client_drvdata(master); + dev_err(&ssi->cl->device, "CMT reset detected!\n"); + ssip_error(ssi->cl); +} +EXPORT_SYMBOL_GPL(ssip_reset_event); + +static const struct net_device_ops ssip_pn_ops = { + .ndo_open = ssip_pn_open, + .ndo_stop = ssip_pn_stop, + .ndo_start_xmit = ssip_pn_xmit, + .ndo_change_mtu = ssip_pn_set_mtu, +}; + +static void ssip_pn_setup(struct net_device *dev) +{ + dev->features = 0; + dev->netdev_ops = &ssip_pn_ops; + dev->type = ARPHRD_PHONET; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->mtu = SSIP_DEFAULT_MTU; + dev->hard_header_len = 1; + dev->dev_addr[0] = PN_MEDIA_SOS; + dev->addr_len = 1; + dev->tx_queue_len = SSIP_TXQUEUE_LEN; + + dev->destructor = free_netdev; + dev->header_ops = &phonet_header_ops; +} + +static int ssi_protocol_probe(struct device *dev) +{ + static const char ifname[] = "phonet%d"; + struct hsi_client *cl = to_hsi_client(dev); + struct ssi_protocol *ssi; + int err; + + ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); + if (!ssi) { + dev_err(dev, "No memory for ssi protocol\n"); + return -ENOMEM; + } + + spin_lock_init(&ssi->lock); + init_timer_deferrable(&ssi->rx_wd); + init_timer_deferrable(&ssi->tx_wd); + init_timer(&ssi->keep_alive); + ssi->rx_wd.data = (unsigned long)cl; + ssi->rx_wd.function = ssip_wd; + ssi->tx_wd.data = (unsigned long)cl; + ssi->tx_wd.function = ssip_wd; + ssi->keep_alive.data = (unsigned long)cl; + ssi->keep_alive.function = ssip_keep_alive; + INIT_LIST_HEAD(&ssi->txqueue); + INIT_LIST_HEAD(&ssi->cmdqueue); + atomic_set(&ssi->tx_usecnt, 0); + hsi_client_set_drvdata(cl, ssi); + ssi->cl = cl; + + ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); + if (ssi->channel_id_cmd < 0) { + err = ssi->channel_id_cmd; + dev_err(dev, "Could not get cmd channel (%d)\n", err); + goto out; + } + + ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data"); + if (ssi->channel_id_data < 0) { + err = ssi->channel_id_data; + dev_err(dev, "Could not get data channel (%d)\n", err); + goto out; + } + + err = ssip_alloc_cmds(ssi); + if (err < 0) { + dev_err(dev, "No memory for commands\n"); + goto out; + } + + ssi->netdev = alloc_netdev(0, ifname, ssip_pn_setup); + if (!ssi->netdev) { + dev_err(dev, "No memory for netdev\n"); + err = -ENOMEM; + goto out1; + } + + SET_NETDEV_DEV(ssi->netdev, dev); + netif_carrier_off(ssi->netdev); + err = register_netdev(ssi->netdev); + if (err < 0) { + dev_err(dev, "Register netdev failed (%d)\n", err); + goto out2; + } + + list_add(&ssi->link, &ssip_list); + + dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n", + ssi->channel_id_cmd, ssi->channel_id_data); + + return 0; +out2: + free_netdev(ssi->netdev); +out1: + ssip_free_cmds(ssi); +out: + kfree(ssi); + + return err; +} + +static int ssi_protocol_remove(struct device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev); + struct ssi_protocol *ssi = hsi_client_drvdata(cl); + + list_del(&ssi->link); + unregister_netdev(ssi->netdev); + ssip_free_cmds(ssi); + hsi_client_set_drvdata(cl, NULL); + kfree(ssi); + + return 0; +} + +static struct hsi_client_driver ssip_driver = { + .driver = { + .name = "ssi-protocol", + .owner = THIS_MODULE, + .probe = ssi_protocol_probe, + .remove = ssi_protocol_remove, + }, +}; + +static int __init ssip_init(void) +{ + pr_info("SSI protocol aka McSAAB added\n"); + + return hsi_register_client_driver(&ssip_driver); +} +module_init(ssip_init); + +static void __exit ssip_exit(void) +{ + hsi_unregister_client_driver(&ssip_driver); + pr_info("SSI protocol driver removed\n"); +} +module_exit(ssip_exit); + +MODULE_ALIAS("hsi:ssi-protocol"); +MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); +MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>"); +MODULE_DESCRIPTION("SSI protocol improved aka McSAAB"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hsi/controllers/Kconfig b/drivers/hsi/controllers/Kconfig new file mode 100644 index 00000000000..6aba2780817 --- /dev/null +++ b/drivers/hsi/controllers/Kconfig @@ -0,0 +1,19 @@ +# +# HSI controllers configuration +# +comment "HSI controllers" + +config OMAP_SSI + tristate "OMAP SSI hardware driver" + depends on HSI && OF && (ARCH_OMAP3 || (ARM && COMPILE_TEST)) + ---help--- + SSI is a legacy version of HSI. It is usually used to connect + an application engine with a cellular modem. + If you say Y here, you will enable the OMAP SSI hardware driver. + + If unsure, say N. + +config OMAP_SSI_PORT + tristate + default m if OMAP_SSI=m + default y if OMAP_SSI=y diff --git a/drivers/hsi/controllers/Makefile b/drivers/hsi/controllers/Makefile new file mode 100644 index 00000000000..d2665cf9c54 --- /dev/null +++ b/drivers/hsi/controllers/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for HSI controllers drivers +# + +obj-$(CONFIG_OMAP_SSI) += omap_ssi.o +obj-$(CONFIG_OMAP_SSI_PORT) += omap_ssi_port.o diff --git a/drivers/hsi/controllers/omap_ssi.c b/drivers/hsi/controllers/omap_ssi.c new file mode 100644 index 00000000000..0fc7a7fd014 --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi.c @@ -0,0 +1,625 @@ +/* OMAP SSI driver. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/compiler.h> +#include <linux/err.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/delay.h> +#include <linux/seq_file.h> +#include <linux/scatterlist.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/debugfs.h> +#include <linux/pm_runtime.h> +#include <linux/of_platform.h> +#include <linux/hsi/hsi.h> +#include <linux/idr.h> + +#include "omap_ssi_regs.h" +#include "omap_ssi.h" + +/* For automatically allocated device IDs */ +static DEFINE_IDA(platform_omap_ssi_ida); + +#ifdef CONFIG_DEBUG_FS +static int ssi_debug_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_controller *ssi = m->private; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + + pm_runtime_get_sync(ssi->device.parent); + seq_printf(m, "REVISION\t: 0x%08x\n", readl(sys + SSI_REVISION_REG)); + seq_printf(m, "SYSCONFIG\t: 0x%08x\n", readl(sys + SSI_SYSCONFIG_REG)); + seq_printf(m, "SYSSTATUS\t: 0x%08x\n", readl(sys + SSI_SYSSTATUS_REG)); + pm_runtime_put_sync(ssi->device.parent); + + return 0; +} + +static int ssi_debug_gdd_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_controller *ssi = m->private; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *gdd = omap_ssi->gdd; + void __iomem *sys = omap_ssi->sys; + int lch; + + pm_runtime_get_sync(ssi->device.parent); + + seq_printf(m, "GDD_MPU_STATUS\t: 0x%08x\n", + readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG)); + seq_printf(m, "GDD_MPU_ENABLE\t: 0x%08x\n\n", + readl(sys + SSI_GDD_MPU_IRQ_ENABLE_REG)); + seq_printf(m, "HW_ID\t\t: 0x%08x\n", + readl(gdd + SSI_GDD_HW_ID_REG)); + seq_printf(m, "PPORT_ID\t: 0x%08x\n", + readl(gdd + SSI_GDD_PPORT_ID_REG)); + seq_printf(m, "MPORT_ID\t: 0x%08x\n", + readl(gdd + SSI_GDD_MPORT_ID_REG)); + seq_printf(m, "TEST\t\t: 0x%08x\n", + readl(gdd + SSI_GDD_TEST_REG)); + seq_printf(m, "GCR\t\t: 0x%08x\n", + readl(gdd + SSI_GDD_GCR_REG)); + + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { + seq_printf(m, "\nGDD LCH %d\n=========\n", lch); + seq_printf(m, "CSDP\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CSDP_REG(lch))); + seq_printf(m, "CCR\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CCR_REG(lch))); + seq_printf(m, "CICR\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CICR_REG(lch))); + seq_printf(m, "CSR\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CSR_REG(lch))); + seq_printf(m, "CSSA\t\t: 0x%08x\n", + readl(gdd + SSI_GDD_CSSA_REG(lch))); + seq_printf(m, "CDSA\t\t: 0x%08x\n", + readl(gdd + SSI_GDD_CDSA_REG(lch))); + seq_printf(m, "CEN\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CEN_REG(lch))); + seq_printf(m, "CSAC\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CSAC_REG(lch))); + seq_printf(m, "CDAC\t\t: 0x%04x\n", + readw(gdd + SSI_GDD_CDAC_REG(lch))); + seq_printf(m, "CLNK_CTRL\t: 0x%04x\n", + readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch))); + } + + pm_runtime_put_sync(ssi->device.parent); + + return 0; +} + +static int ssi_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_show, inode->i_private); +} + +static int ssi_gdd_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_gdd_show, inode->i_private); +} + +static const struct file_operations ssi_regs_fops = { + .open = ssi_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations ssi_gdd_regs_fops = { + .open = ssi_gdd_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init ssi_debug_add_ctrl(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct dentry *dir; + + /* SSI controller */ + omap_ssi->dir = debugfs_create_dir(dev_name(&ssi->device), NULL); + if (IS_ERR(omap_ssi->dir)) + return PTR_ERR(omap_ssi->dir); + + debugfs_create_file("regs", S_IRUGO, omap_ssi->dir, ssi, + &ssi_regs_fops); + /* SSI GDD (DMA) */ + dir = debugfs_create_dir("gdd", omap_ssi->dir); + if (IS_ERR(dir)) + goto rback; + debugfs_create_file("regs", S_IRUGO, dir, ssi, &ssi_gdd_regs_fops); + + return 0; +rback: + debugfs_remove_recursive(omap_ssi->dir); + + return PTR_ERR(dir); +} + +static void ssi_debug_remove_ctrl(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + debugfs_remove_recursive(omap_ssi->dir); +} +#endif /* CONFIG_DEBUG_FS */ + +/* + * FIXME: Horrible HACK needed until we remove the useless wakeline test + * in the CMT. To be removed !!!! + */ +void ssi_waketest(struct hsi_client *cl, unsigned int enable) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + omap_port->wktest = !!enable; + if (omap_port->wktest) { + pm_runtime_get_sync(ssi->device.parent); + writel_relaxed(SSI_WAKE(0), + omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); + } else { + writel_relaxed(SSI_WAKE(0), + omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); + pm_runtime_put_sync(ssi->device.parent); + } +} +EXPORT_SYMBOL_GPL(ssi_waketest); + +static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg = omap_ssi->gdd_trn[lch].msg; + struct hsi_port *port = to_hsi_port(msg->cl->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + unsigned int dir; + u32 csr; + u32 val; + + spin_lock(&omap_ssi->lock); + + val = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + val &= ~SSI_GDD_LCH(lch); + writel_relaxed(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + + if (msg->ttype == HSI_MSG_READ) { + dir = DMA_FROM_DEVICE; + val = SSI_DATAAVAILABLE(msg->channel); + pm_runtime_put_sync(ssi->device.parent); + } else { + dir = DMA_TO_DEVICE; + val = SSI_DATAACCEPT(msg->channel); + /* Keep clocks reference for write pio event */ + } + dma_unmap_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, dir); + csr = readw(omap_ssi->gdd + SSI_GDD_CSR_REG(lch)); + omap_ssi->gdd_trn[lch].msg = NULL; /* release GDD lch */ + dev_dbg(&port->device, "DMA completed ch %d ttype %d\n", + msg->channel, msg->ttype); + spin_unlock(&omap_ssi->lock); + if (csr & SSI_CSR_TOUR) { /* Timeout error */ + msg->status = HSI_STATUS_ERROR; + msg->actual_len = 0; + spin_lock(&omap_port->lock); + list_del(&msg->link); /* Dequeue msg */ + spin_unlock(&omap_port->lock); + msg->complete(msg); + return; + } + spin_lock(&omap_port->lock); + val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + msg->status = HSI_STATUS_COMPLETED; + msg->actual_len = sg_dma_len(msg->sgt.sgl); +} + +static void ssi_gdd_tasklet(unsigned long dev) +{ + struct hsi_controller *ssi = (struct hsi_controller *)dev; + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + unsigned int lch; + u32 status_reg; + + pm_runtime_get_sync(ssi->device.parent); + + status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { + if (status_reg & SSI_GDD_LCH(lch)) + ssi_gdd_complete(ssi, lch); + } + writel_relaxed(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG); + status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); + + pm_runtime_put_sync(ssi->device.parent); + + if (status_reg) + tasklet_hi_schedule(&omap_ssi->gdd_tasklet); + else + enable_irq(omap_ssi->gdd_irq); + +} + +static irqreturn_t ssi_gdd_isr(int irq, void *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + tasklet_hi_schedule(&omap_ssi->gdd_tasklet); + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +static unsigned long ssi_get_clk_rate(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + unsigned long rate = clk_get_rate(omap_ssi->fck); + return rate; +} + +static int __init ssi_get_iomem(struct platform_device *pd, + const char *name, void __iomem **pbase, dma_addr_t *phy) +{ + struct resource *mem; + struct resource *ioarea; + void __iomem *base; + struct hsi_controller *ssi = platform_get_drvdata(pd); + + mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); + if (!mem) { + dev_err(&pd->dev, "IO memory region missing (%s)\n", name); + return -ENXIO; + } + ioarea = devm_request_mem_region(&ssi->device, mem->start, + resource_size(mem), dev_name(&pd->dev)); + if (!ioarea) { + dev_err(&pd->dev, "%s IO memory region request failed\n", + mem->name); + return -ENXIO; + } + base = devm_ioremap(&ssi->device, mem->start, resource_size(mem)); + if (!base) { + dev_err(&pd->dev, "%s IO remap failed\n", mem->name); + return -ENXIO; + } + *pbase = base; + + if (phy) + *phy = mem->start; + + return 0; +} + +static int __init ssi_add_controller(struct hsi_controller *ssi, + struct platform_device *pd) +{ + struct omap_ssi_controller *omap_ssi; + int err; + + omap_ssi = devm_kzalloc(&ssi->device, sizeof(*omap_ssi), GFP_KERNEL); + if (!omap_ssi) { + dev_err(&pd->dev, "not enough memory for omap ssi\n"); + return -ENOMEM; + } + + ssi->id = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL); + if (ssi->id < 0) { + err = ssi->id; + goto out_err; + } + + ssi->owner = THIS_MODULE; + ssi->device.parent = &pd->dev; + dev_set_name(&ssi->device, "ssi%d", ssi->id); + hsi_controller_set_drvdata(ssi, omap_ssi); + omap_ssi->dev = &ssi->device; + err = ssi_get_iomem(pd, "sys", &omap_ssi->sys, NULL); + if (err < 0) + goto out_err; + err = ssi_get_iomem(pd, "gdd", &omap_ssi->gdd, NULL); + if (err < 0) + goto out_err; + omap_ssi->gdd_irq = platform_get_irq_byname(pd, "gdd_mpu"); + if (omap_ssi->gdd_irq < 0) { + dev_err(&pd->dev, "GDD IRQ resource missing\n"); + err = omap_ssi->gdd_irq; + goto out_err; + } + tasklet_init(&omap_ssi->gdd_tasklet, ssi_gdd_tasklet, + (unsigned long)ssi); + err = devm_request_irq(&ssi->device, omap_ssi->gdd_irq, ssi_gdd_isr, + 0, "gdd_mpu", ssi); + if (err < 0) { + dev_err(&ssi->device, "Request GDD IRQ %d failed (%d)", + omap_ssi->gdd_irq, err); + goto out_err; + } + + omap_ssi->port = devm_kzalloc(&ssi->device, + sizeof(struct omap_ssi_port *) * ssi->num_ports, GFP_KERNEL); + if (!omap_ssi->port) { + err = -ENOMEM; + goto out_err; + } + + omap_ssi->fck = devm_clk_get(&ssi->device, "ssi_ssr_fck"); + if (IS_ERR(omap_ssi->fck)) { + dev_err(&pd->dev, "Could not acquire clock \"ssi_ssr_fck\": %li\n", + PTR_ERR(omap_ssi->fck)); + err = -ENODEV; + goto out_err; + } + + /* TODO: find register, which can be used to detect context loss */ + omap_ssi->get_loss = NULL; + + omap_ssi->max_speed = UINT_MAX; + spin_lock_init(&omap_ssi->lock); + err = hsi_register_controller(ssi); + + if (err < 0) + goto out_err; + + return 0; + +out_err: + ida_simple_remove(&platform_omap_ssi_ida, ssi->id); + return err; +} + +static int __init ssi_hw_init(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + unsigned int i; + u32 val; + int err; + + err = pm_runtime_get_sync(ssi->device.parent); + if (err < 0) { + dev_err(&ssi->device, "runtime PM failed %d\n", err); + return err; + } + /* Reseting SSI controller */ + writel_relaxed(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG); + val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG); + for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) { + msleep(20); + val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG); + } + if (!(val & SSI_RESETDONE)) { + dev_err(&ssi->device, "SSI HW reset failed\n"); + pm_runtime_put_sync(ssi->device.parent); + return -EIO; + } + /* Reseting GDD */ + writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG); + /* Get FCK rate in KHz */ + omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000); + dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate); + /* Set default PM settings */ + val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART; + writel_relaxed(val, omap_ssi->sys + SSI_SYSCONFIG_REG); + omap_ssi->sysconfig = val; + writel_relaxed(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG); + omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON; + pm_runtime_put_sync(ssi->device.parent); + + return 0; +} + +static void ssi_remove_controller(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int id = ssi->id; + tasklet_kill(&omap_ssi->gdd_tasklet); + hsi_unregister_controller(ssi); + ida_simple_remove(&platform_omap_ssi_ida, id); +} + +static inline int ssi_of_get_available_ports_count(const struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_available_child_of_node(np, child) + if (of_device_is_compatible(child, "ti,omap3-ssi-port")) + num++; + + return num; +} + +static int ssi_remove_ports(struct device *dev, void *c) +{ + struct platform_device *pdev = to_platform_device(dev); + + of_device_unregister(pdev); + + return 0; +} + +static int __init ssi_probe(struct platform_device *pd) +{ + struct platform_device *childpdev; + struct device_node *np = pd->dev.of_node; + struct device_node *child; + struct hsi_controller *ssi; + int err; + int num_ports; + + if (!np) { + dev_err(&pd->dev, "missing device tree data\n"); + return -EINVAL; + } + + num_ports = ssi_of_get_available_ports_count(np); + + ssi = hsi_alloc_controller(num_ports, GFP_KERNEL); + if (!ssi) { + dev_err(&pd->dev, "No memory for controller\n"); + return -ENOMEM; + } + + platform_set_drvdata(pd, ssi); + + err = ssi_add_controller(ssi, pd); + if (err < 0) + goto out1; + + pm_runtime_irq_safe(&pd->dev); + pm_runtime_enable(&pd->dev); + + err = ssi_hw_init(ssi); + if (err < 0) + goto out2; +#ifdef CONFIG_DEBUG_FS + err = ssi_debug_add_ctrl(ssi); + if (err < 0) + goto out2; +#endif + + for_each_available_child_of_node(np, child) { + if (!of_device_is_compatible(child, "ti,omap3-ssi-port")) + continue; + + childpdev = of_platform_device_create(child, NULL, &pd->dev); + if (!childpdev) { + err = -ENODEV; + dev_err(&pd->dev, "failed to create ssi controller port\n"); + goto out3; + } + } + + dev_info(&pd->dev, "ssi controller %d initialized (%d ports)!\n", + ssi->id, num_ports); + return err; +out3: + device_for_each_child(&pd->dev, NULL, ssi_remove_ports); +out2: + ssi_remove_controller(ssi); +out1: + platform_set_drvdata(pd, NULL); + pm_runtime_disable(&pd->dev); + + return err; +} + +static int __exit ssi_remove(struct platform_device *pd) +{ + struct hsi_controller *ssi = platform_get_drvdata(pd); + +#ifdef CONFIG_DEBUG_FS + ssi_debug_remove_ctrl(ssi); +#endif + ssi_remove_controller(ssi); + platform_set_drvdata(pd, NULL); + + pm_runtime_disable(&pd->dev); + + /* cleanup of of_platform_populate() call */ + device_for_each_child(&pd->dev, NULL, ssi_remove_ports); + + return 0; +} + +#ifdef CONFIG_PM_RUNTIME +static int omap_ssi_runtime_suspend(struct device *dev) +{ + struct hsi_controller *ssi = dev_get_drvdata(dev); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(dev, "runtime suspend!\n"); + + if (omap_ssi->get_loss) + omap_ssi->loss_count = + omap_ssi->get_loss(ssi->device.parent); + + return 0; +} + +static int omap_ssi_runtime_resume(struct device *dev) +{ + struct hsi_controller *ssi = dev_get_drvdata(dev); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(dev, "runtime resume!\n"); + + if ((omap_ssi->get_loss) && (omap_ssi->loss_count == + omap_ssi->get_loss(ssi->device.parent))) + return 0; + + writel_relaxed(omap_ssi->gdd_gcr, omap_ssi->gdd + SSI_GDD_GCR_REG); + + return 0; +} + +static const struct dev_pm_ops omap_ssi_pm_ops = { + SET_RUNTIME_PM_OPS(omap_ssi_runtime_suspend, omap_ssi_runtime_resume, + NULL) +}; + +#define DEV_PM_OPS (&omap_ssi_pm_ops) +#else +#define DEV_PM_OPS NULL +#endif + +#ifdef CONFIG_OF +static const struct of_device_id omap_ssi_of_match[] = { + { .compatible = "ti,omap3-ssi", }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_ssi_of_match); +#else +#define omap_ssi_of_match NULL +#endif + +static struct platform_driver ssi_pdriver = { + .remove = __exit_p(ssi_remove), + .driver = { + .name = "omap_ssi", + .owner = THIS_MODULE, + .pm = DEV_PM_OPS, + .of_match_table = omap_ssi_of_match, + }, +}; + +module_platform_driver_probe(ssi_pdriver, ssi_probe); + +MODULE_ALIAS("platform:omap_ssi"); +MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); +MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); +MODULE_DESCRIPTION("Synchronous Serial Interface Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hsi/controllers/omap_ssi.h b/drivers/hsi/controllers/omap_ssi.h new file mode 100644 index 00000000000..9d056417d88 --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi.h @@ -0,0 +1,166 @@ +/* OMAP SSI internal interface. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * Copyright (C) 2013 Sebastian Reichel + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __LINUX_HSI_OMAP_SSI_H__ +#define __LINUX_HSI_OMAP_SSI_H__ + +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/hsi/hsi.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/io.h> + +#define SSI_MAX_CHANNELS 8 +#define SSI_MAX_GDD_LCH 8 +#define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) + +/** + * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context + * @mode: Bit transmission mode + * @channels: Number of channels + * @framesize: Frame size in bits + * @timeout: RX frame timeout + * @divisor: TX divider + * @arb_mode: Arbitration mode for TX frame (Round robin, priority) + */ +struct omap_ssm_ctx { + u32 mode; + u32 channels; + u32 frame_size; + union { + u32 timeout; /* Rx Only */ + struct { + u32 arb_mode; + u32 divisor; + }; /* Tx only */ + }; +}; + +/** + * struct omap_ssi_port - OMAP SSI port data + * @dev: device associated to the port (HSI port) + * @pdev: platform device associated to the port + * @sst_dma: SSI transmitter physical base address + * @ssr_dma: SSI receiver physical base address + * @sst_base: SSI transmitter base address + * @ssr_base: SSI receiver base address + * @wk_lock: spin lock to serialize access to the wake lines + * @lock: Spin lock to serialize access to the SSI port + * @channels: Current number of channels configured (1,2,4 or 8) + * @txqueue: TX message queues + * @rxqueue: RX message queues + * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode) + * @irq: IRQ number + * @wake_irq: IRQ number for incoming wake line (-1 if none) + * @wake_gpio: GPIO number for incoming wake line (-1 if none) + * @pio_tasklet: Bottom half for PIO transfers and events + * @wake_tasklet: Bottom half for incoming wake events + * @wkin_cken: Keep track of clock references due to the incoming wake line + * @wk_refcount: Reference count for output wake line + * @sys_mpu_enable: Context for the interrupt enable register for irq 0 + * @sst: Context for the synchronous serial transmitter + * @ssr: Context for the synchronous serial receiver + */ +struct omap_ssi_port { + struct device *dev; + struct device *pdev; + dma_addr_t sst_dma; + dma_addr_t ssr_dma; + void __iomem *sst_base; + void __iomem *ssr_base; + spinlock_t wk_lock; + spinlock_t lock; + unsigned int channels; + struct list_head txqueue[SSI_MAX_CHANNELS]; + struct list_head rxqueue[SSI_MAX_CHANNELS]; + struct list_head brkqueue; + unsigned int irq; + int wake_irq; + int wake_gpio; + struct tasklet_struct pio_tasklet; + struct tasklet_struct wake_tasklet; + bool wktest:1; /* FIXME: HACK to be removed */ + bool wkin_cken:1; /* Workaround */ + unsigned int wk_refcount; + /* OMAP SSI port context */ + u32 sys_mpu_enable; /* We use only one irq */ + struct omap_ssm_ctx sst; + struct omap_ssm_ctx ssr; + u32 loss_count; + u32 port_id; +#ifdef CONFIG_DEBUG_FS + struct dentry *dir; +#endif +}; + +/** + * struct gdd_trn - GDD transaction data + * @msg: Pointer to the HSI message being served + * @sg: Pointer to the current sg entry being served + */ +struct gdd_trn { + struct hsi_msg *msg; + struct scatterlist *sg; +}; + +/** + * struct omap_ssi_controller - OMAP SSI controller data + * @dev: device associated to the controller (HSI controller) + * @sys: SSI I/O base address + * @gdd: GDD I/O base address + * @fck: SSI functional clock + * @gdd_irq: IRQ line for GDD + * @gdd_tasklet: bottom half for DMA transfers + * @gdd_trn: Array of GDD transaction data for ongoing GDD transfers + * @lock: lock to serialize access to GDD + * @loss_count: To follow if we need to restore context or not + * @max_speed: Maximum TX speed (Kb/s) set by the clients. + * @sysconfig: SSI controller saved context + * @gdd_gcr: SSI GDD saved context + * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any + * @port: Array of pointers of the ports of the controller + * @dir: Debugfs SSI root directory + */ +struct omap_ssi_controller { + struct device *dev; + void __iomem *sys; + void __iomem *gdd; + struct clk *fck; + unsigned int gdd_irq; + struct tasklet_struct gdd_tasklet; + struct gdd_trn gdd_trn[SSI_MAX_GDD_LCH]; + spinlock_t lock; + unsigned long fck_rate; + u32 loss_count; + u32 max_speed; + /* OMAP SSI Controller context */ + u32 sysconfig; + u32 gdd_gcr; + int (*get_loss)(struct device *dev); + struct omap_ssi_port **port; +#ifdef CONFIG_DEBUG_FS + struct dentry *dir; +#endif +}; + +#endif /* __LINUX_HSI_OMAP_SSI_H__ */ diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c new file mode 100644 index 00000000000..b8693f0b27f --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi_port.c @@ -0,0 +1,1399 @@ +/* OMAP SSI port driver. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <linux/pm_runtime.h> + +#include <linux/of_gpio.h> +#include <linux/debugfs.h> + +#include "omap_ssi_regs.h" +#include "omap_ssi.h" + +static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused) +{ + return 0; +} + +static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) +{ + return 0; +} + +static inline unsigned int ssi_wakein(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + return gpio_get_value(omap_port->wake_gpio); +} + +#ifdef CONFIG_DEBUG_FS +static void ssi_debug_remove_port(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + debugfs_remove_recursive(omap_port->dir); +} + +static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused) +{ + struct hsi_port *port = m->private; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *base = omap_ssi->sys; + unsigned int ch; + + pm_runtime_get_sync(omap_port->pdev); + if (omap_port->wake_irq > 0) + seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port)); + seq_printf(m, "WAKE\t\t: 0x%08x\n", + readl(base + SSI_WAKE_REG(port->num))); + seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0, + readl(base + SSI_MPU_ENABLE_REG(port->num, 0))); + seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0, + readl(base + SSI_MPU_STATUS_REG(port->num, 0))); + /* SST */ + base = omap_port->sst_base; + seq_puts(m, "\nSST\n===\n"); + seq_printf(m, "ID SST\t\t: 0x%08x\n", + readl(base + SSI_SST_ID_REG)); + seq_printf(m, "MODE\t\t: 0x%08x\n", + readl(base + SSI_SST_MODE_REG)); + seq_printf(m, "FRAMESIZE\t: 0x%08x\n", + readl(base + SSI_SST_FRAMESIZE_REG)); + seq_printf(m, "DIVISOR\t\t: 0x%08x\n", + readl(base + SSI_SST_DIVISOR_REG)); + seq_printf(m, "CHANNELS\t: 0x%08x\n", + readl(base + SSI_SST_CHANNELS_REG)); + seq_printf(m, "ARBMODE\t\t: 0x%08x\n", + readl(base + SSI_SST_ARBMODE_REG)); + seq_printf(m, "TXSTATE\t\t: 0x%08x\n", + readl(base + SSI_SST_TXSTATE_REG)); + seq_printf(m, "BUFSTATE\t: 0x%08x\n", + readl(base + SSI_SST_BUFSTATE_REG)); + seq_printf(m, "BREAK\t\t: 0x%08x\n", + readl(base + SSI_SST_BREAK_REG)); + for (ch = 0; ch < omap_port->channels; ch++) { + seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, + readl(base + SSI_SST_BUFFER_CH_REG(ch))); + } + /* SSR */ + base = omap_port->ssr_base; + seq_puts(m, "\nSSR\n===\n"); + seq_printf(m, "ID SSR\t\t: 0x%08x\n", + readl(base + SSI_SSR_ID_REG)); + seq_printf(m, "MODE\t\t: 0x%08x\n", + readl(base + SSI_SSR_MODE_REG)); + seq_printf(m, "FRAMESIZE\t: 0x%08x\n", + readl(base + SSI_SSR_FRAMESIZE_REG)); + seq_printf(m, "CHANNELS\t: 0x%08x\n", + readl(base + SSI_SSR_CHANNELS_REG)); + seq_printf(m, "TIMEOUT\t\t: 0x%08x\n", + readl(base + SSI_SSR_TIMEOUT_REG)); + seq_printf(m, "RXSTATE\t\t: 0x%08x\n", + readl(base + SSI_SSR_RXSTATE_REG)); + seq_printf(m, "BUFSTATE\t: 0x%08x\n", + readl(base + SSI_SSR_BUFSTATE_REG)); + seq_printf(m, "BREAK\t\t: 0x%08x\n", + readl(base + SSI_SSR_BREAK_REG)); + seq_printf(m, "ERROR\t\t: 0x%08x\n", + readl(base + SSI_SSR_ERROR_REG)); + seq_printf(m, "ERRORACK\t: 0x%08x\n", + readl(base + SSI_SSR_ERRORACK_REG)); + for (ch = 0; ch < omap_port->channels; ch++) { + seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, + readl(base + SSI_SSR_BUFFER_CH_REG(ch))); + } + pm_runtime_put_sync(omap_port->pdev); + + return 0; +} + +static int ssi_port_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, ssi_debug_port_show, inode->i_private); +} + +static const struct file_operations ssi_port_regs_fops = { + .open = ssi_port_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int ssi_div_get(void *data, u64 *val) +{ + struct hsi_port *port = data; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + pm_runtime_get_sync(omap_port->pdev); + *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); + pm_runtime_put_sync(omap_port->pdev); + + return 0; +} + +static int ssi_div_set(void *data, u64 val) +{ + struct hsi_port *port = data; + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + if (val > 127) + return -EINVAL; + + pm_runtime_get_sync(omap_port->pdev); + writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); + omap_port->sst.divisor = val; + pm_runtime_put_sync(omap_port->pdev); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n"); + +static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port, + struct dentry *dir) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + + dir = debugfs_create_dir(dev_name(omap_port->dev), dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + omap_port->dir = dir; + debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops); + dir = debugfs_create_dir("sst", dir); + if (IS_ERR(dir)) + return PTR_ERR(dir); + debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port, + &ssi_sst_div_fops); + + return 0; +} +#endif + +static int ssi_claim_lch(struct hsi_msg *msg) +{ + + struct hsi_port *port = hsi_get_port(msg->cl); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int lch; + + for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) + if (!omap_ssi->gdd_trn[lch].msg) { + omap_ssi->gdd_trn[lch].msg = msg; + omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl; + return lch; + } + + return -EBUSY; +} + +static int ssi_start_dma(struct hsi_msg *msg, int lch) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *gdd = omap_ssi->gdd; + int err; + u16 csdp; + u16 ccr; + u32 s_addr; + u32 d_addr; + u32 tmp; + + if (msg->ttype == HSI_MSG_READ) { + err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, + DMA_FROM_DEVICE); + if (err < 0) { + dev_dbg(&ssi->device, "DMA map SG failed !\n"); + return err; + } + csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | + SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | + SSI_DATA_TYPE_S32; + ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */ + ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST | + SSI_CCR_ENABLE; + s_addr = omap_port->ssr_dma + + SSI_SSR_BUFFER_CH_REG(msg->channel); + d_addr = sg_dma_address(msg->sgt.sgl); + } else { + err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, + DMA_TO_DEVICE); + if (err < 0) { + dev_dbg(&ssi->device, "DMA map SG failed !\n"); + return err; + } + csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | + SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | + SSI_DATA_TYPE_S32; + ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */ + ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST | + SSI_CCR_ENABLE; + s_addr = sg_dma_address(msg->sgt.sgl); + d_addr = omap_port->sst_dma + + SSI_SST_BUFFER_CH_REG(msg->channel); + } + dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n", + lch, csdp, ccr, s_addr, d_addr); + + /* Hold clocks during the transfer */ + pm_runtime_get_sync(omap_port->pdev); + + writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch)); + writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); + writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); + writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch)); + writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length), + gdd + SSI_GDD_CEN_REG(lch)); + + spin_lock_bh(&omap_ssi->lock); + tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp |= SSI_GDD_LCH(lch); + writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + spin_unlock_bh(&omap_ssi->lock); + writew(ccr, gdd + SSI_GDD_CCR_REG(lch)); + msg->status = HSI_STATUS_PROCEEDING; + + return 0; +} + +static int ssi_start_pio(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + u32 val; + + pm_runtime_get_sync(omap_port->pdev); + if (msg->ttype == HSI_MSG_WRITE) { + val = SSI_DATAACCEPT(msg->channel); + /* Hold clocks for pio writes */ + pm_runtime_get_sync(omap_port->pdev); + } else { + val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; + } + dev_dbg(&port->device, "Single %s transfer\n", + msg->ttype ? "write" : "read"); + val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + pm_runtime_put_sync(omap_port->pdev); + msg->actual_len = 0; + msg->status = HSI_STATUS_PROCEEDING; + + return 0; +} + +static int ssi_start_transfer(struct list_head *queue) +{ + struct hsi_msg *msg; + int lch = -1; + + if (list_empty(queue)) + return 0; + msg = list_first_entry(queue, struct hsi_msg, link); + if (msg->status != HSI_STATUS_QUEUED) + return 0; + if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32))) + lch = ssi_claim_lch(msg); + if (lch >= 0) + return ssi_start_dma(msg, lch); + else + return ssi_start_pio(msg); +} + +static int ssi_async_break(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + int err = 0; + u32 tmp; + + pm_runtime_get_sync(omap_port->pdev); + if (msg->ttype == HSI_MSG_WRITE) { + if (omap_port->sst.mode != SSI_MODE_FRAME) { + err = -EINVAL; + goto out; + } + writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); + msg->status = HSI_STATUS_COMPLETED; + msg->complete(msg); + } else { + if (omap_port->ssr.mode != SSI_MODE_FRAME) { + err = -EINVAL; + goto out; + } + spin_lock_bh(&omap_port->lock); + tmp = readl(omap_ssi->sys + + SSI_MPU_ENABLE_REG(port->num, 0)); + writel(tmp | SSI_BREAKDETECTED, + omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + msg->status = HSI_STATUS_PROCEEDING; + list_add_tail(&msg->link, &omap_port->brkqueue); + spin_unlock_bh(&omap_port->lock); + } +out: + pm_runtime_put_sync(omap_port->pdev); + + return err; +} + +static int ssi_async(struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(msg->cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct list_head *queue; + int err = 0; + + BUG_ON(!msg); + + if (msg->sgt.nents > 1) + return -ENOSYS; /* TODO: Add sg support */ + + if (msg->break_frame) + return ssi_async_break(msg); + + if (msg->ttype) { + BUG_ON(msg->channel >= omap_port->sst.channels); + queue = &omap_port->txqueue[msg->channel]; + } else { + BUG_ON(msg->channel >= omap_port->ssr.channels); + queue = &omap_port->rxqueue[msg->channel]; + } + msg->status = HSI_STATUS_QUEUED; + spin_lock_bh(&omap_port->lock); + list_add_tail(&msg->link, queue); + err = ssi_start_transfer(queue); + if (err < 0) { + list_del(&msg->link); + msg->status = HSI_STATUS_ERROR; + } + spin_unlock_bh(&omap_port->lock); + dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", + msg->status, msg->ttype, msg->channel); + + return err; +} + +static u32 ssi_calculate_div(struct hsi_controller *ssi) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + u32 tx_fckrate = (u32) omap_ssi->fck_rate; + + /* / 2 : SSI TX clock is always half of the SSI functional clock */ + tx_fckrate >>= 1; + /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */ + tx_fckrate--; + dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n", + tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate, + omap_ssi->max_speed); + + return tx_fckrate / omap_ssi->max_speed; +} + +static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl) +{ + struct list_head *node, *tmp; + struct hsi_msg *msg; + + list_for_each_safe(node, tmp, queue) { + msg = list_entry(node, struct hsi_msg, link); + if ((cl) && (cl != msg->cl)) + continue; + list_del(node); + pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", + msg->channel, msg, msg->sgt.sgl->length, + msg->ttype, msg->context); + if (msg->destructor) + msg->destructor(msg); + else + hsi_free_msg(msg); + } +} + +static int ssi_setup(struct hsi_client *cl) +{ + struct hsi_port *port = to_hsi_port(cl->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sst = omap_port->sst_base; + void __iomem *ssr = omap_port->ssr_base; + u32 div; + u32 val; + int err = 0; + + pm_runtime_get_sync(omap_port->pdev); + spin_lock_bh(&omap_port->lock); + if (cl->tx_cfg.speed) + omap_ssi->max_speed = cl->tx_cfg.speed; + div = ssi_calculate_div(ssi); + if (div > SSI_MAX_DIVISOR) { + dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n", + cl->tx_cfg.speed, div); + err = -EINVAL; + goto out; + } + /* Set TX/RX module to sleep to stop TX/RX during cfg update */ + writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG); + writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG); + /* Flush posted write */ + val = readl(ssr + SSI_SSR_MODE_REG); + /* TX */ + writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG); + writel_relaxed(div, sst + SSI_SST_DIVISOR_REG); + writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG); + writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG); + writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG); + /* RX */ + writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG); + writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG); + writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG); + /* Cleanup the break queue if we leave FRAME mode */ + if ((omap_port->ssr.mode == SSI_MODE_FRAME) && + (cl->rx_cfg.mode != SSI_MODE_FRAME)) + ssi_flush_queue(&omap_port->brkqueue, cl); + writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG); + omap_port->channels = max(cl->rx_cfg.num_hw_channels, + cl->tx_cfg.num_hw_channels); + /* Shadow registering for OFF mode */ + /* SST */ + omap_port->sst.divisor = div; + omap_port->sst.frame_size = 31; + omap_port->sst.channels = cl->tx_cfg.num_hw_channels; + omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; + omap_port->sst.mode = cl->tx_cfg.mode; + /* SSR */ + omap_port->ssr.frame_size = 31; + omap_port->ssr.timeout = 0; + omap_port->ssr.channels = cl->rx_cfg.num_hw_channels; + omap_port->ssr.mode = cl->rx_cfg.mode; +out: + spin_unlock_bh(&omap_port->lock); + pm_runtime_put_sync(omap_port->pdev); + + return err; +} + +static int ssi_flush(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + void __iomem *sst = omap_port->sst_base; + void __iomem *ssr = omap_port->ssr_base; + unsigned int i; + u32 err; + + pm_runtime_get_sync(omap_port->pdev); + spin_lock_bh(&omap_port->lock); + /* Stop all DMA transfers */ + for (i = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if (!msg || (port != hsi_get_port(msg->cl))) + continue; + writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + if (msg->ttype == HSI_MSG_READ) + pm_runtime_put_sync(omap_port->pdev); + omap_ssi->gdd_trn[i].msg = NULL; + } + /* Flush all SST buffers */ + writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG); + writel_relaxed(0, sst + SSI_SST_TXSTATE_REG); + /* Flush all SSR buffers */ + writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG); + writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG); + /* Flush all errors */ + err = readl(ssr + SSI_SSR_ERROR_REG); + writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG); + /* Flush break */ + writel_relaxed(0, ssr + SSI_SSR_BREAK_REG); + /* Clear interrupts */ + writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel_relaxed(0xffffff00, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); + /* Dequeue all pending requests */ + for (i = 0; i < omap_port->channels; i++) { + /* Release write clocks */ + if (!list_empty(&omap_port->txqueue[i])) + pm_runtime_put_sync(omap_port->pdev); + ssi_flush_queue(&omap_port->txqueue[i], NULL); + ssi_flush_queue(&omap_port->rxqueue[i], NULL); + } + ssi_flush_queue(&omap_port->brkqueue, NULL); + spin_unlock_bh(&omap_port->lock); + pm_runtime_put_sync(omap_port->pdev); + + return 0; +} + +static int ssi_start_tx(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); + + spin_lock_bh(&omap_port->wk_lock); + if (omap_port->wk_refcount++) { + spin_unlock_bh(&omap_port->wk_lock); + return 0; + } + pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ + writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); + spin_unlock_bh(&omap_port->wk_lock); + + return 0; +} + +static int ssi_stop_tx(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); + + spin_lock_bh(&omap_port->wk_lock); + BUG_ON(!omap_port->wk_refcount); + if (--omap_port->wk_refcount) { + spin_unlock_bh(&omap_port->wk_lock); + return 0; + } + writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); + pm_runtime_put_sync(omap_port->pdev); /* Release clocks */ + spin_unlock_bh(&omap_port->wk_lock); + + return 0; +} + +static void ssi_transfer(struct omap_ssi_port *omap_port, + struct list_head *queue) +{ + struct hsi_msg *msg; + int err = -1; + + spin_lock_bh(&omap_port->lock); + while (err < 0) { + err = ssi_start_transfer(queue); + if (err < 0) { + msg = list_first_entry(queue, struct hsi_msg, link); + msg->status = HSI_STATUS_ERROR; + msg->actual_len = 0; + list_del(&msg->link); + spin_unlock_bh(&omap_port->lock); + msg->complete(msg); + spin_lock_bh(&omap_port->lock); + } + } + spin_unlock_bh(&omap_port->lock); +} + +static void ssi_cleanup_queues(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + unsigned int i; + u32 rxbufstate = 0; + u32 txbufstate = 0; + u32 status = SSI_ERROROCCURED; + u32 tmp; + + ssi_flush_queue(&omap_port->brkqueue, cl); + if (list_empty(&omap_port->brkqueue)) + status |= SSI_BREAKDETECTED; + + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->txqueue[i])) + continue; + msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, + link); + if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { + txbufstate |= (1 << i); + status |= SSI_DATAACCEPT(i); + /* Release the clocks writes, also GDD ones */ + pm_runtime_put_sync(omap_port->pdev); + } + ssi_flush_queue(&omap_port->txqueue[i], cl); + } + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->rxqueue[i])) + continue; + msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, + link); + if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { + rxbufstate |= (1 << i); + status |= SSI_DATAAVAILABLE(i); + } + ssi_flush_queue(&omap_port->rxqueue[i], cl); + /* Check if we keep the error detection interrupt armed */ + if (!list_empty(&omap_port->rxqueue[i])) + status &= ~SSI_ERROROCCURED; + } + /* Cleanup write buffers */ + tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); + tmp &= ~txbufstate; + writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); + /* Cleanup read buffers */ + tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); + tmp &= ~rxbufstate; + writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); + /* Disarm and ack pending interrupts */ + tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + tmp &= ~status; + writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel_relaxed(status, omap_ssi->sys + + SSI_MPU_STATUS_REG(port->num, 0)); +} + +static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) +{ + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_msg *msg; + unsigned int i; + u32 val = 0; + u32 tmp; + + for (i = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if ((!msg) || (msg->cl != cl)) + continue; + writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + val |= (1 << i); + /* + * Clock references for write will be handled in + * ssi_cleanup_queues + */ + if (msg->ttype == HSI_MSG_READ) + pm_runtime_put_sync(omap_port->pdev); + omap_ssi->gdd_trn[i].msg = NULL; + } + tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp &= ~val; + writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); +} + +static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode) +{ + writel(mode, omap_port->sst_base + SSI_SST_MODE_REG); + writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG); + /* OCP barrier */ + mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); + + return 0; +} + +static int ssi_release(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + + spin_lock_bh(&omap_port->lock); + pm_runtime_get_sync(omap_port->pdev); + /* Stop all the pending DMA requests for that client */ + ssi_cleanup_gdd(ssi, cl); + /* Now cleanup all the queues */ + ssi_cleanup_queues(cl); + pm_runtime_put_sync(omap_port->pdev); + /* If it is the last client of the port, do extra checks and cleanup */ + if (port->claimed <= 1) { + /* + * Drop the clock reference for the incoming wake line + * if it is still kept high by the other side. + */ + if (omap_port->wkin_cken) { + pm_runtime_put_sync(omap_port->pdev); + omap_port->wkin_cken = 0; + } + pm_runtime_get_sync(omap_port->pdev); + /* Stop any SSI TX/RX without a client */ + ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); + omap_port->sst.mode = SSI_MODE_SLEEP; + omap_port->ssr.mode = SSI_MODE_SLEEP; + pm_runtime_put_sync(omap_port->pdev); + WARN_ON(omap_port->wk_refcount != 0); + } + spin_unlock_bh(&omap_port->lock); + + return 0; +} + + + +static void ssi_error(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + unsigned int i; + u32 err; + u32 val; + u32 tmp; + + /* ACK error */ + err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); + dev_err(&port->device, "SSI error: 0x%02x\n", err); + if (!err) { + dev_dbg(&port->device, "spurious SSI error ignored!\n"); + return; + } + spin_lock(&omap_ssi->lock); + /* Cancel all GDD read transfers */ + for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) { + msg = omap_ssi->gdd_trn[i].msg; + if ((msg) && (msg->ttype == HSI_MSG_READ)) { + writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); + val |= (1 << i); + omap_ssi->gdd_trn[i].msg = NULL; + } + } + tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + tmp &= ~val; + writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); + spin_unlock(&omap_ssi->lock); + /* Cancel all PIO read transfers */ + spin_lock(&omap_port->lock); + tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */ + writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + /* ACK error */ + writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); + writel_relaxed(SSI_ERROROCCURED, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + /* Signal the error all current pending read requests */ + for (i = 0; i < omap_port->channels; i++) { + if (list_empty(&omap_port->rxqueue[i])) + continue; + msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, + link); + list_del(&msg->link); + msg->status = HSI_STATUS_ERROR; + spin_unlock(&omap_port->lock); + msg->complete(msg); + /* Now restart queued reads if any */ + ssi_transfer(omap_port, &omap_port->rxqueue[i]); + spin_lock(&omap_port->lock); + } + spin_unlock(&omap_port->lock); +} + +static void ssi_break_complete(struct hsi_port *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct hsi_msg *msg; + struct hsi_msg *tmp; + u32 val; + + dev_dbg(&port->device, "HWBREAK received\n"); + + spin_lock(&omap_port->lock); + val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + val &= ~SSI_BREAKDETECTED; + writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); + writel(SSI_BREAKDETECTED, + omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { + msg->status = HSI_STATUS_COMPLETED; + spin_lock(&omap_port->lock); + list_del(&msg->link); + spin_unlock(&omap_port->lock); + msg->complete(msg); + } + +} + +static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) +{ + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_msg *msg; + u32 *buf; + u32 reg; + u32 val; + + spin_lock(&omap_port->lock); + msg = list_first_entry(queue, struct hsi_msg, link); + if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { + msg->actual_len = 0; + msg->status = HSI_STATUS_PENDING; + } + if (msg->ttype == HSI_MSG_WRITE) + val = SSI_DATAACCEPT(msg->channel); + else + val = SSI_DATAAVAILABLE(msg->channel); + if (msg->status == HSI_STATUS_PROCEEDING) { + buf = sg_virt(msg->sgt.sgl) + msg->actual_len; + if (msg->ttype == HSI_MSG_WRITE) + writel(*buf, omap_port->sst_base + + SSI_SST_BUFFER_CH_REG(msg->channel)); + else + *buf = readl(omap_port->ssr_base + + SSI_SSR_BUFFER_CH_REG(msg->channel)); + dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel, + msg->ttype, *buf); + msg->actual_len += sizeof(*buf); + if (msg->actual_len >= msg->sgt.sgl->length) + msg->status = HSI_STATUS_COMPLETED; + /* + * Wait for the last written frame to be really sent before + * we call the complete callback + */ + if ((msg->status == HSI_STATUS_PROCEEDING) || + ((msg->status == HSI_STATUS_COMPLETED) && + (msg->ttype == HSI_MSG_WRITE))) { + writel(val, omap_ssi->sys + + SSI_MPU_STATUS_REG(port->num, 0)); + spin_unlock(&omap_port->lock); + + return; + } + + } + /* Transfer completed at this point */ + reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + if (msg->ttype == HSI_MSG_WRITE) { + /* Release clocks for write transfer */ + pm_runtime_put_sync(omap_port->pdev); + } + reg &= ~val; + writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); + list_del(&msg->link); + spin_unlock(&omap_port->lock); + msg->complete(msg); + ssi_transfer(omap_port, queue); +} + +static void ssi_pio_tasklet(unsigned long ssi_port) +{ + struct hsi_port *port = (struct hsi_port *)ssi_port; + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *sys = omap_ssi->sys; + unsigned int ch; + u32 status_reg; + + pm_runtime_get_sync(omap_port->pdev); + status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); + + for (ch = 0; ch < omap_port->channels; ch++) { + if (status_reg & SSI_DATAACCEPT(ch)) + ssi_pio_complete(port, &omap_port->txqueue[ch]); + if (status_reg & SSI_DATAAVAILABLE(ch)) + ssi_pio_complete(port, &omap_port->rxqueue[ch]); + } + if (status_reg & SSI_BREAKDETECTED) + ssi_break_complete(port); + if (status_reg & SSI_ERROROCCURED) + ssi_error(port); + + status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); + pm_runtime_put_sync(omap_port->pdev); + + if (status_reg) + tasklet_hi_schedule(&omap_port->pio_tasklet); + else + enable_irq(omap_port->irq); +} + +static irqreturn_t ssi_pio_isr(int irq, void *port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + + tasklet_hi_schedule(&omap_port->pio_tasklet); + disable_irq_nosync(irq); + + return IRQ_HANDLED; +} + +static void ssi_wake_tasklet(unsigned long ssi_port) +{ + struct hsi_port *port = (struct hsi_port *)ssi_port; + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + if (ssi_wakein(port)) { + /** + * We can have a quick High-Low-High transition in the line. + * In such a case if we have long interrupt latencies, + * we can miss the low event or get twice a high event. + * This workaround will avoid breaking the clock reference + * count when such a situation ocurrs. + */ + spin_lock(&omap_port->lock); + if (!omap_port->wkin_cken) { + omap_port->wkin_cken = 1; + pm_runtime_get_sync(omap_port->pdev); + } + spin_unlock(&omap_port->lock); + dev_dbg(&ssi->device, "Wake in high\n"); + if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ + writel(SSI_WAKE(0), + omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); + } + hsi_event(port, HSI_EVENT_START_RX); + } else { + dev_dbg(&ssi->device, "Wake in low\n"); + if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ + writel(SSI_WAKE(0), + omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); + } + hsi_event(port, HSI_EVENT_STOP_RX); + spin_lock(&omap_port->lock); + if (omap_port->wkin_cken) { + pm_runtime_put_sync(omap_port->pdev); + omap_port->wkin_cken = 0; + } + spin_unlock(&omap_port->lock); + } +} + +static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port); + + tasklet_hi_schedule(&omap_port->wake_tasklet); + + return IRQ_HANDLED; +} + +static int __init ssi_port_irq(struct hsi_port *port, + struct platform_device *pd) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + int err; + + omap_port->irq = platform_get_irq(pd, 0); + if (omap_port->irq < 0) { + dev_err(&port->device, "Port IRQ resource missing\n"); + return omap_port->irq; + } + tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet, + (unsigned long)port); + err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr, + 0, "mpu_irq0", port); + if (err < 0) + dev_err(&port->device, "Request IRQ %d failed (%d)\n", + omap_port->irq, err); + return err; +} + +static int __init ssi_wake_irq(struct hsi_port *port, + struct platform_device *pd) +{ + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + int cawake_irq; + int err; + + if (omap_port->wake_gpio == -1) { + omap_port->wake_irq = -1; + return 0; + } + + cawake_irq = gpio_to_irq(omap_port->wake_gpio); + + omap_port->wake_irq = cawake_irq; + tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet, + (unsigned long)port); + err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + "cawake", port); + if (err < 0) + dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", + cawake_irq, err); + err = enable_irq_wake(cawake_irq); + if (err < 0) + dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n", + cawake_irq, err); + + return err; +} + +static void __init ssi_queues_init(struct omap_ssi_port *omap_port) +{ + unsigned int ch; + + for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) { + INIT_LIST_HEAD(&omap_port->txqueue[ch]); + INIT_LIST_HEAD(&omap_port->rxqueue[ch]); + } + INIT_LIST_HEAD(&omap_port->brkqueue); +} + +static int __init ssi_port_get_iomem(struct platform_device *pd, + const char *name, void __iomem **pbase, dma_addr_t *phy) +{ + struct hsi_port *port = platform_get_drvdata(pd); + struct resource *mem; + struct resource *ioarea; + void __iomem *base; + + mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); + if (!mem) { + dev_err(&pd->dev, "IO memory region missing (%s)\n", name); + return -ENXIO; + } + ioarea = devm_request_mem_region(&port->device, mem->start, + resource_size(mem), dev_name(&pd->dev)); + if (!ioarea) { + dev_err(&pd->dev, "%s IO memory region request failed\n", + mem->name); + return -ENXIO; + } + base = devm_ioremap(&port->device, mem->start, resource_size(mem)); + if (!base) { + dev_err(&pd->dev, "%s IO remap failed\n", mem->name); + return -ENXIO; + } + *pbase = base; + + if (phy) + *phy = mem->start; + + return 0; +} + +static int __init ssi_port_probe(struct platform_device *pd) +{ + struct device_node *np = pd->dev.of_node; + struct hsi_port *port; + struct omap_ssi_port *omap_port; + struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + u32 cawake_gpio = 0; + u32 port_id; + int err; + + dev_dbg(&pd->dev, "init ssi port...\n"); + + err = ref_module(THIS_MODULE, ssi->owner); + if (err) { + dev_err(&pd->dev, "could not increment parent module refcount (err=%d)\n", + err); + return -ENODEV; + } + + if (!ssi->port || !omap_ssi->port) { + dev_err(&pd->dev, "ssi controller not initialized!\n"); + err = -ENODEV; + goto error; + } + + /* get id of first uninitialized port in controller */ + for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id]; + port_id++) + ; + + if (port_id >= ssi->num_ports) { + dev_err(&pd->dev, "port id out of range!\n"); + err = -ENODEV; + goto error; + } + + port = ssi->port[port_id]; + + if (!np) { + dev_err(&pd->dev, "missing device tree data\n"); + err = -EINVAL; + goto error; + } + + cawake_gpio = of_get_named_gpio(np, "ti,ssi-cawake-gpio", 0); + if (cawake_gpio < 0) { + dev_err(&pd->dev, "DT data is missing cawake gpio (err=%d)\n", + cawake_gpio); + err = -ENODEV; + goto error; + } + + err = devm_gpio_request_one(&port->device, cawake_gpio, GPIOF_DIR_IN, + "cawake"); + if (err) { + dev_err(&pd->dev, "could not request cawake gpio (err=%d)!\n", + err); + err = -ENXIO; + goto error; + } + + omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL); + if (!omap_port) { + err = -ENOMEM; + goto error; + } + omap_port->wake_gpio = cawake_gpio; + omap_port->pdev = &pd->dev; + omap_port->port_id = port_id; + + /* initialize HSI port */ + port->async = ssi_async; + port->setup = ssi_setup; + port->flush = ssi_flush; + port->start_tx = ssi_start_tx; + port->stop_tx = ssi_stop_tx; + port->release = ssi_release; + hsi_port_set_drvdata(port, omap_port); + omap_ssi->port[port_id] = omap_port; + + platform_set_drvdata(pd, port); + + err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base, + &omap_port->sst_dma); + if (err < 0) + goto error; + err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base, + &omap_port->ssr_dma); + if (err < 0) + goto error; + + err = ssi_port_irq(port, pd); + if (err < 0) + goto error; + err = ssi_wake_irq(port, pd); + if (err < 0) + goto error; + + ssi_queues_init(omap_port); + spin_lock_init(&omap_port->lock); + spin_lock_init(&omap_port->wk_lock); + omap_port->dev = &port->device; + + pm_runtime_irq_safe(omap_port->pdev); + pm_runtime_enable(omap_port->pdev); + +#ifdef CONFIG_DEBUG_FS + err = ssi_debug_add_port(omap_port, omap_ssi->dir); + if (err < 0) { + pm_runtime_disable(omap_port->pdev); + goto error; + } +#endif + + hsi_add_clients_from_dt(port, np); + + dev_info(&pd->dev, "ssi port %u successfully initialized (cawake=%d)\n", + port_id, cawake_gpio); + + return 0; + +error: + return err; +} + +static int __exit ssi_port_remove(struct platform_device *pd) +{ + struct hsi_port *port = platform_get_drvdata(pd); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + +#ifdef CONFIG_DEBUG_FS + ssi_debug_remove_port(port); +#endif + + hsi_port_unregister_clients(port); + + tasklet_kill(&omap_port->wake_tasklet); + tasklet_kill(&omap_port->pio_tasklet); + + port->async = hsi_dummy_msg; + port->setup = hsi_dummy_cl; + port->flush = hsi_dummy_cl; + port->start_tx = hsi_dummy_cl; + port->stop_tx = hsi_dummy_cl; + port->release = hsi_dummy_cl; + + omap_ssi->port[omap_port->port_id] = NULL; + platform_set_drvdata(pd, NULL); + pm_runtime_disable(&pd->dev); + + return 0; +} + +#ifdef CONFIG_PM_RUNTIME +static int ssi_save_port_ctx(struct omap_ssi_port *omap_port) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + omap_port->sys_mpu_enable = readl(omap_ssi->sys + + SSI_MPU_ENABLE_REG(port->num, 0)); + + return 0; +} + +static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port) +{ + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + void __iomem *base; + + writel_relaxed(omap_port->sys_mpu_enable, + omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); + + /* SST context */ + base = omap_port->sst_base; + writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); + writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); + writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); + + /* SSR context */ + base = omap_port->ssr_base; + writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); + writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); + writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); + + return 0; +} + +static int ssi_restore_port_mode(struct omap_ssi_port *omap_port) +{ + u32 mode; + + writel_relaxed(omap_port->sst.mode, + omap_port->sst_base + SSI_SST_MODE_REG); + writel_relaxed(omap_port->ssr.mode, + omap_port->ssr_base + SSI_SSR_MODE_REG); + /* OCP barrier */ + mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); + + return 0; +} + +static int ssi_restore_divisor(struct omap_ssi_port *omap_port) +{ + writel_relaxed(omap_port->sst.divisor, + omap_port->sst_base + SSI_SST_DIVISOR_REG); + + return 0; +} + +static int omap_ssi_port_runtime_suspend(struct device *dev) +{ + struct hsi_port *port = dev_get_drvdata(dev); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(dev, "port runtime suspend!\n"); + + ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); + if (omap_ssi->get_loss) + omap_port->loss_count = + omap_ssi->get_loss(ssi->device.parent); + ssi_save_port_ctx(omap_port); + + return 0; +} + +static int omap_ssi_port_runtime_resume(struct device *dev) +{ + struct hsi_port *port = dev_get_drvdata(dev); + struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + dev_dbg(dev, "port runtime resume!\n"); + + if ((omap_ssi->get_loss) && (omap_port->loss_count == + omap_ssi->get_loss(ssi->device.parent))) + goto mode; /* We always need to restore the mode & TX divisor */ + + ssi_restore_port_ctx(omap_port); + +mode: + ssi_restore_divisor(omap_port); + ssi_restore_port_mode(omap_port); + + return 0; +} + +static const struct dev_pm_ops omap_ssi_port_pm_ops = { + SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend, + omap_ssi_port_runtime_resume, NULL) +}; + +#define DEV_PM_OPS (&omap_ssi_port_pm_ops) +#else +#define DEV_PM_OPS NULL +#endif + + +#ifdef CONFIG_OF +static const struct of_device_id omap_ssi_port_of_match[] = { + { .compatible = "ti,omap3-ssi-port", }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match); +#else +#define omap_ssi_port_of_match NULL +#endif + +static struct platform_driver ssi_port_pdriver = { + .remove = __exit_p(ssi_port_remove), + .driver = { + .name = "omap_ssi_port", + .owner = THIS_MODULE, + .of_match_table = omap_ssi_port_of_match, + .pm = DEV_PM_OPS, + }, +}; + +module_platform_driver_probe(ssi_port_pdriver, ssi_port_probe); + +MODULE_ALIAS("platform:omap_ssi_port"); +MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); +MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>"); +MODULE_DESCRIPTION("Synchronous Serial Interface Port Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hsi/controllers/omap_ssi_regs.h b/drivers/hsi/controllers/omap_ssi_regs.h new file mode 100644 index 00000000000..08f98dd1d01 --- /dev/null +++ b/drivers/hsi/controllers/omap_ssi_regs.h @@ -0,0 +1,171 @@ +/* Hardware definitions for SSI. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea <carlos.chinea@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#ifndef __OMAP_SSI_REGS_H__ +#define __OMAP_SSI_REGS_H__ + +/* + * SSI SYS registers + */ +#define SSI_REVISION_REG 0 +# define SSI_REV_MAJOR 0xf0 +# define SSI_REV_MINOR 0xf +#define SSI_SYSCONFIG_REG 0x10 +# define SSI_AUTOIDLE (1 << 0) +# define SSI_SOFTRESET (1 << 1) +# define SSI_SIDLEMODE_FORCE 0 +# define SSI_SIDLEMODE_NO (1 << 3) +# define SSI_SIDLEMODE_SMART (1 << 4) +# define SSI_SIDLEMODE_MASK 0x18 +# define SSI_MIDLEMODE_FORCE 0 +# define SSI_MIDLEMODE_NO (1 << 12) +# define SSI_MIDLEMODE_SMART (1 << 13) +# define SSI_MIDLEMODE_MASK 0x3000 +#define SSI_SYSSTATUS_REG 0x14 +# define SSI_RESETDONE 1 +#define SSI_MPU_STATUS_REG(port, irq) (0x808 + ((port) * 0x10) + ((irq) * 2)) +#define SSI_MPU_ENABLE_REG(port, irq) (0x80c + ((port) * 0x10) + ((irq) * 8)) +# define SSI_DATAACCEPT(channel) (1 << (channel)) +# define SSI_DATAAVAILABLE(channel) (1 << ((channel) + 8)) +# define SSI_DATAOVERRUN(channel) (1 << ((channel) + 16)) +# define SSI_ERROROCCURED (1 << 24) +# define SSI_BREAKDETECTED (1 << 25) +#define SSI_GDD_MPU_IRQ_STATUS_REG 0x0800 +#define SSI_GDD_MPU_IRQ_ENABLE_REG 0x0804 +# define SSI_GDD_LCH(channel) (1 << (channel)) +#define SSI_WAKE_REG(port) (0xc00 + ((port) * 0x10)) +#define SSI_CLEAR_WAKE_REG(port) (0xc04 + ((port) * 0x10)) +#define SSI_SET_WAKE_REG(port) (0xc08 + ((port) * 0x10)) +# define SSI_WAKE(channel) (1 << (channel)) +# define SSI_WAKE_MASK 0xff + +/* + * SSI SST registers + */ +#define SSI_SST_ID_REG 0 +#define SSI_SST_MODE_REG 4 +# define SSI_MODE_VAL_MASK 3 +# define SSI_MODE_SLEEP 0 +# define SSI_MODE_STREAM 1 +# define SSI_MODE_FRAME 2 +# define SSI_MODE_MULTIPOINTS 3 +#define SSI_SST_FRAMESIZE_REG 8 +# define SSI_FRAMESIZE_DEFAULT 31 +#define SSI_SST_TXSTATE_REG 0xc +# define SSI_TXSTATE_IDLE 0 +#define SSI_SST_BUFSTATE_REG 0x10 +# define SSI_FULL(channel) (1 << (channel)) +#define SSI_SST_DIVISOR_REG 0x18 +# define SSI_MAX_DIVISOR 127 +#define SSI_SST_BREAK_REG 0x20 +#define SSI_SST_CHANNELS_REG 0x24 +# define SSI_CHANNELS_DEFAULT 4 +#define SSI_SST_ARBMODE_REG 0x28 +# define SSI_ARBMODE_ROUNDROBIN 0 +# define SSI_ARBMODE_PRIORITY 1 +#define SSI_SST_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4)) +#define SSI_SST_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4)) + +/* + * SSI SSR registers + */ +#define SSI_SSR_ID_REG 0 +#define SSI_SSR_MODE_REG 4 +#define SSI_SSR_FRAMESIZE_REG 8 +#define SSI_SSR_RXSTATE_REG 0xc +#define SSI_SSR_BUFSTATE_REG 0x10 +# define SSI_NOTEMPTY(channel) (1 << (channel)) +#define SSI_SSR_BREAK_REG 0x1c +#define SSI_SSR_ERROR_REG 0x20 +#define SSI_SSR_ERRORACK_REG 0x24 +#define SSI_SSR_OVERRUN_REG 0x2c +#define SSI_SSR_OVERRUNACK_REG 0x30 +#define SSI_SSR_TIMEOUT_REG 0x34 +# define SSI_TIMEOUT_DEFAULT 0 +#define SSI_SSR_CHANNELS_REG 0x28 +#define SSI_SSR_BUFFER_CH_REG(channel) (0x80 + ((channel) * 4)) +#define SSI_SSR_SWAPBUF_CH_REG(channel) (0xc0 + ((channel) * 4)) + +/* + * SSI GDD registers + */ +#define SSI_GDD_HW_ID_REG 0 +#define SSI_GDD_PPORT_ID_REG 0x10 +#define SSI_GDD_MPORT_ID_REG 0x14 +#define SSI_GDD_PPORT_SR_REG 0x20 +#define SSI_GDD_MPORT_SR_REG 0x24 +# define SSI_ACTIVE_LCH_NUM_MASK 0xff +#define SSI_GDD_TEST_REG 0x40 +# define SSI_TEST 1 +#define SSI_GDD_GCR_REG 0x100 +# define SSI_CLK_AUTOGATING_ON (1 << 3) +# define SSI_FREE (1 << 2) +# define SSI_SWITCH_OFF (1 << 0) +#define SSI_GDD_GRST_REG 0x200 +# define SSI_SWRESET 1 +#define SSI_GDD_CSDP_REG(channel) (0x800 + ((channel) * 0x40)) +# define SSI_DST_BURST_EN_MASK 0xc000 +# define SSI_DST_SINGLE_ACCESS0 0 +# define SSI_DST_SINGLE_ACCESS (1 << 14) +# define SSI_DST_BURST_4x32_BIT (2 << 14) +# define SSI_DST_BURST_8x32_BIT (3 << 14) +# define SSI_DST_MASK 0x1e00 +# define SSI_DST_MEMORY_PORT (8 << 9) +# define SSI_DST_PERIPHERAL_PORT (9 << 9) +# define SSI_SRC_BURST_EN_MASK 0x180 +# define SSI_SRC_SINGLE_ACCESS0 0 +# define SSI_SRC_SINGLE_ACCESS (1 << 7) +# define SSI_SRC_BURST_4x32_BIT (2 << 7) +# define SSI_SRC_BURST_8x32_BIT (3 << 7) +# define SSI_SRC_MASK 0x3c +# define SSI_SRC_MEMORY_PORT (8 << 2) +# define SSI_SRC_PERIPHERAL_PORT (9 << 2) +# define SSI_DATA_TYPE_MASK 3 +# define SSI_DATA_TYPE_S32 2 +#define SSI_GDD_CCR_REG(channel) (0x802 + ((channel) * 0x40)) +# define SSI_DST_AMODE_MASK (3 << 14) +# define SSI_DST_AMODE_CONST 0 +# define SSI_DST_AMODE_POSTINC (1 << 12) +# define SSI_SRC_AMODE_MASK (3 << 12) +# define SSI_SRC_AMODE_CONST 0 +# define SSI_SRC_AMODE_POSTINC (1 << 12) +# define SSI_CCR_ENABLE (1 << 7) +# define SSI_CCR_SYNC_MASK 0x1f +#define SSI_GDD_CICR_REG(channel) (0x804 + ((channel) * 0x40)) +# define SSI_BLOCK_IE (1 << 5) +# define SSI_HALF_IE (1 << 2) +# define SSI_TOUT_IE (1 << 0) +#define SSI_GDD_CSR_REG(channel) (0x806 + ((channel) * 0x40)) +# define SSI_CSR_SYNC (1 << 6) +# define SSI_CSR_BLOCK (1 << 5) +# define SSI_CSR_HALF (1 << 2) +# define SSI_CSR_TOUR (1 << 0) +#define SSI_GDD_CSSA_REG(channel) (0x808 + ((channel) * 0x40)) +#define SSI_GDD_CDSA_REG(channel) (0x80c + ((channel) * 0x40)) +#define SSI_GDD_CEN_REG(channel) (0x810 + ((channel) * 0x40)) +#define SSI_GDD_CSAC_REG(channel) (0x818 + ((channel) * 0x40)) +#define SSI_GDD_CDAC_REG(channel) (0x81a + ((channel) * 0x40)) +#define SSI_GDD_CLNK_CTRL_REG(channel) (0x828 + ((channel) * 0x40)) +# define SSI_ENABLE_LNK (1 << 15) +# define SSI_STOP_LNK (1 << 14) +# define SSI_NEXT_CH_ID_MASK 0xf + +#endif /* __OMAP_SSI_REGS_H__ */ diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c index 749f7b5c817..fe9371271ce 100644 --- a/drivers/hsi/hsi.c +++ b/drivers/hsi/hsi.c @@ -26,6 +26,8 @@ #include <linux/slab.h> #include <linux/string.h> #include <linux/notifier.h> +#include <linux/of.h> +#include <linux/of_device.h> #include "hsi_core.h" static ssize_t modalias_show(struct device *dev, @@ -50,7 +52,13 @@ static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) static int hsi_bus_match(struct device *dev, struct device_driver *driver) { - return strcmp(dev_name(dev), driver->name) == 0; + if (of_driver_match_device(dev, driver)) + return true; + + if (strcmp(dev_name(dev), driver->name) == 0) + return true; + + return false; } static struct bus_type hsi_bus_type = { @@ -62,18 +70,37 @@ static struct bus_type hsi_bus_type = { static void hsi_client_release(struct device *dev) { - kfree(to_hsi_client(dev)); + struct hsi_client *cl = to_hsi_client(dev); + + kfree(cl->tx_cfg.channels); + kfree(cl->rx_cfg.channels); + kfree(cl); } -static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) +struct hsi_client *hsi_new_client(struct hsi_port *port, + struct hsi_board_info *info) { struct hsi_client *cl; + size_t size; cl = kzalloc(sizeof(*cl), GFP_KERNEL); if (!cl) - return; + return NULL; + cl->tx_cfg = info->tx_cfg; + if (cl->tx_cfg.channels) { + size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels); + cl->tx_cfg.channels = kzalloc(size , GFP_KERNEL); + memcpy(cl->tx_cfg.channels, info->tx_cfg.channels, size); + } + cl->rx_cfg = info->rx_cfg; + if (cl->rx_cfg.channels) { + size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels); + cl->rx_cfg.channels = kzalloc(size , GFP_KERNEL); + memcpy(cl->rx_cfg.channels, info->rx_cfg.channels, size); + } + cl->device.bus = &hsi_bus_type; cl->device.parent = &port->device; cl->device.release = hsi_client_release; @@ -85,7 +112,10 @@ static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) pr_err("hsi: failed to register client: %s\n", info->name); put_device(&cl->device); } + + return cl; } +EXPORT_SYMBOL_GPL(hsi_new_client); static void hsi_scan_board_info(struct hsi_controller *hsi) { @@ -101,12 +131,209 @@ static void hsi_scan_board_info(struct hsi_controller *hsi) } } -static int hsi_remove_client(struct device *dev, void *data __maybe_unused) +#ifdef CONFIG_OF +static struct hsi_board_info hsi_char_dev_info = { + .name = "hsi_char", +}; + +static int hsi_of_property_parse_mode(struct device_node *client, char *name, + unsigned int *result) +{ + const char *mode; + int err; + + err = of_property_read_string(client, name, &mode); + if (err < 0) + return err; + + if (strcmp(mode, "stream") == 0) + *result = HSI_MODE_STREAM; + else if (strcmp(mode, "frame") == 0) + *result = HSI_MODE_FRAME; + else + return -EINVAL; + + return 0; +} + +static int hsi_of_property_parse_flow(struct device_node *client, char *name, + unsigned int *result) +{ + const char *flow; + int err; + + err = of_property_read_string(client, name, &flow); + if (err < 0) + return err; + + if (strcmp(flow, "synchronized") == 0) + *result = HSI_FLOW_SYNC; + else if (strcmp(flow, "pipeline") == 0) + *result = HSI_FLOW_PIPE; + else + return -EINVAL; + + return 0; +} + +static int hsi_of_property_parse_arb_mode(struct device_node *client, + char *name, unsigned int *result) +{ + const char *arb_mode; + int err; + + err = of_property_read_string(client, name, &arb_mode); + if (err < 0) + return err; + + if (strcmp(arb_mode, "round-robin") == 0) + *result = HSI_ARB_RR; + else if (strcmp(arb_mode, "priority") == 0) + *result = HSI_ARB_PRIO; + else + return -EINVAL; + + return 0; +} + +static void hsi_add_client_from_dt(struct hsi_port *port, + struct device_node *client) +{ + struct hsi_client *cl; + struct hsi_channel channel; + struct property *prop; + char name[32]; + int length, cells, err, i, max_chan, mode; + + cl = kzalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return; + + err = of_modalias_node(client, name, sizeof(name)); + if (err) + goto err; + + dev_set_name(&cl->device, "%s", name); + + err = hsi_of_property_parse_mode(client, "hsi-mode", &mode); + if (err) { + err = hsi_of_property_parse_mode(client, "hsi-rx-mode", + &cl->rx_cfg.mode); + if (err) + goto err; + + err = hsi_of_property_parse_mode(client, "hsi-tx-mode", + &cl->tx_cfg.mode); + if (err) + goto err; + } else { + cl->rx_cfg.mode = mode; + cl->tx_cfg.mode = mode; + } + + err = of_property_read_u32(client, "hsi-speed-kbps", + &cl->tx_cfg.speed); + if (err) + goto err; + cl->rx_cfg.speed = cl->tx_cfg.speed; + + err = hsi_of_property_parse_flow(client, "hsi-flow", + &cl->rx_cfg.flow); + if (err) + goto err; + + err = hsi_of_property_parse_arb_mode(client, "hsi-arb-mode", + &cl->rx_cfg.arb_mode); + if (err) + goto err; + + prop = of_find_property(client, "hsi-channel-ids", &length); + if (!prop) { + err = -EINVAL; + goto err; + } + + cells = length / sizeof(u32); + + cl->rx_cfg.num_channels = cells; + cl->tx_cfg.num_channels = cells; + + cl->rx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL); + if (!cl->rx_cfg.channels) { + err = -ENOMEM; + goto err; + } + + cl->tx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL); + if (!cl->tx_cfg.channels) { + err = -ENOMEM; + goto err2; + } + + max_chan = 0; + for (i = 0; i < cells; i++) { + err = of_property_read_u32_index(client, "hsi-channel-ids", i, + &channel.id); + if (err) + goto err3; + + err = of_property_read_string_index(client, "hsi-channel-names", + i, &channel.name); + if (err) + channel.name = NULL; + + if (channel.id > max_chan) + max_chan = channel.id; + + cl->rx_cfg.channels[i] = channel; + cl->tx_cfg.channels[i] = channel; + } + + cl->rx_cfg.num_hw_channels = max_chan + 1; + cl->tx_cfg.num_hw_channels = max_chan + 1; + + cl->device.bus = &hsi_bus_type; + cl->device.parent = &port->device; + cl->device.release = hsi_client_release; + cl->device.of_node = client; + + if (device_register(&cl->device) < 0) { + pr_err("hsi: failed to register client: %s\n", name); + put_device(&cl->device); + goto err3; + } + + return; + +err3: + kfree(cl->tx_cfg.channels); +err2: + kfree(cl->rx_cfg.channels); +err: + kfree(cl); + pr_err("hsi client: missing or incorrect of property: err=%d\n", err); +} + +void hsi_add_clients_from_dt(struct hsi_port *port, struct device_node *clients) +{ + struct device_node *child; + + /* register hsi-char device */ + hsi_new_client(port, &hsi_char_dev_info); + + for_each_available_child_of_node(clients, child) + hsi_add_client_from_dt(port, child); +} +EXPORT_SYMBOL_GPL(hsi_add_clients_from_dt); +#endif + +int hsi_remove_client(struct device *dev, void *data __maybe_unused) { device_unregister(dev); return 0; } +EXPORT_SYMBOL_GPL(hsi_remove_client); static int hsi_remove_port(struct device *dev, void *data __maybe_unused) { @@ -130,6 +357,16 @@ static void hsi_port_release(struct device *dev) } /** + * hsi_unregister_port - Unregister an HSI port + * @port: The HSI port to unregister + */ +void hsi_port_unregister_clients(struct hsi_port *port) +{ + device_for_each_child(&port->device, NULL, hsi_remove_client); +} +EXPORT_SYMBOL_GPL(hsi_port_unregister_clients); + +/** * hsi_unregister_controller - Unregister an HSI controller * @hsi: The HSI controller to register */ @@ -472,7 +709,7 @@ int hsi_unregister_port_event(struct hsi_client *cl) EXPORT_SYMBOL_GPL(hsi_unregister_port_event); /** - * hsi_event -Notifies clients about port events + * hsi_event - Notifies clients about port events * @port: Port where the event occurred * @event: The event type * @@ -492,6 +729,32 @@ int hsi_event(struct hsi_port *port, unsigned long event) } EXPORT_SYMBOL_GPL(hsi_event); +/** + * hsi_get_channel_id_by_name - acquire channel id by channel name + * @cl: HSI client, which uses the channel + * @name: name the channel is known under + * + * Clients can call this function to get the hsi channel ids similar to + * requesting IRQs or GPIOs by name. This function assumes the same + * channel configuration is used for RX and TX. + * + * Returns -errno on error or channel id on success. + */ +int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name) +{ + int i; + + if (!cl->rx_cfg.channels) + return -ENOENT; + + for (i = 0; i < cl->rx_cfg.num_channels; i++) + if (!strcmp(cl->rx_cfg.channels[i].name, name)) + return cl->rx_cfg.channels[i].id; + + return -ENXIO; +} +EXPORT_SYMBOL_GPL(hsi_get_channel_id_by_name); + static int __init hsi_init(void) { return bus_register(&hsi_bus_type); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index bc196f49ec5..4af0da96c2e 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1053,7 +1053,7 @@ config SENSORS_PC87427 config SENSORS_NTC_THERMISTOR tristate "NTC thermistor support" - depends on (!OF && !IIO) || (OF && IIO) + depends on !OF || IIO=n || IIO help This driver supports NTC thermistors sensor reading and its interpretation. The driver can also monitor the temperature and diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 6d02e3b0637..d76f0b70c6e 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) if (cpu_has_tjmax(c)) dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); } else { - val = (eax >> 16) & 0x7f; + val = (eax >> 16) & 0xff; /* * If the TjMax is not plausible, an assumption * will be used */ - if (val >= 85) { + if (val) { dev_dbg(dev, "TjMax is %d degrees C\n", val); return val * 1000; } diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c index 90ec1173b8a..01723f04fe4 100644 --- a/drivers/hwmon/emc1403.c +++ b/drivers/hwmon/emc1403.c @@ -163,7 +163,7 @@ static ssize_t store_hyst(struct device *dev, if (retval < 0) goto fail; - hyst = val - retval * 1000; + hyst = retval * 1000 - val; hyst = DIV_ROUND_CLOSEST(hyst, 1000); if (hyst < 0 || hyst > 255) { retval = -ERANGE; @@ -330,7 +330,7 @@ static int emc1403_detect(struct i2c_client *client, } id = i2c_smbus_read_byte_data(client, THERMAL_REVISION_REG); - if (id != 0x01) + if (id < 0x01 || id > 0x04) return -ENODEV; return 0; @@ -355,9 +355,9 @@ static int emc1403_probe(struct i2c_client *client, if (id->driver_data) data->groups[1] = &emc1404_group; - hwmon_dev = hwmon_device_register_with_groups(&client->dev, - client->name, data, - data->groups); + hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev, + client->name, data, + data->groups); if (IS_ERR(hwmon_dev)) return PTR_ERR(hwmon_dev); diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 8a17f01e867..e76feb86a1d 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -44,6 +44,7 @@ struct ntc_compensation { unsigned int ohm; }; +/* Order matters, ntc_match references the entries by index */ static const struct platform_device_id ntc_thermistor_id[] = { { "ncp15wb473", TYPE_NCPXXWB473 }, { "ncp18wb473", TYPE_NCPXXWB473 }, @@ -141,7 +142,7 @@ struct ntc_data { char name[PLATFORM_NAME_SIZE]; }; -#ifdef CONFIG_OF +#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO) static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) { struct iio_channel *channel = pdata->chan; @@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) static const struct of_device_id ntc_match[] = { { .compatible = "ntc,ncp15wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[0] }, { .compatible = "ntc,ncp18wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[1] }, { .compatible = "ntc,ncp21wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[2] }, { .compatible = "ntc,ncp03wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[3] }, { .compatible = "ntc,ncp15wl333", - .data = &ntc_thermistor_id[TYPE_NCPXXWL333] }, + .data = &ntc_thermistor_id[4] }, { }, }; MODULE_DEVICE_TABLE(of, ntc_match); @@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev) return NULL; } +#define ntc_match NULL + static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) { } #endif diff --git a/drivers/hwmon/vexpress.c b/drivers/hwmon/vexpress.c index 8242b75d96c..611f34c7333 100644 --- a/drivers/hwmon/vexpress.c +++ b/drivers/hwmon/vexpress.c @@ -26,7 +26,7 @@ struct vexpress_hwmon_data { struct device *hwmon_dev; - struct vexpress_config_func *func; + struct regmap *reg; const char *name; }; @@ -53,7 +53,7 @@ static ssize_t vexpress_hwmon_u32_show(struct device *dev, int err; u32 value; - err = vexpress_config_read(data->func, 0, &value); + err = regmap_read(data->reg, 0, &value); if (err) return err; @@ -68,11 +68,11 @@ static ssize_t vexpress_hwmon_u64_show(struct device *dev, int err; u32 value_hi, value_lo; - err = vexpress_config_read(data->func, 0, &value_lo); + err = regmap_read(data->reg, 0, &value_lo); if (err) return err; - err = vexpress_config_read(data->func, 1, &value_hi); + err = regmap_read(data->reg, 1, &value_hi); if (err) return err; @@ -234,9 +234,9 @@ static int vexpress_hwmon_probe(struct platform_device *pdev) type = match->data; data->name = type->name; - data->func = vexpress_config_func_get_by_dev(&pdev->dev); - if (!data->func) - return -ENODEV; + data->reg = devm_regmap_init_vexpress_config(&pdev->dev); + if (IS_ERR(data->reg)) + return PTR_ERR(data->reg); err = sysfs_create_groups(&pdev->dev.kobj, type->attr_groups); if (err) @@ -252,7 +252,6 @@ static int vexpress_hwmon_probe(struct platform_device *pdev) error: sysfs_remove_group(&pdev->dev.kobj, match->data); - vexpress_config_func_put(data->func); return err; } @@ -266,8 +265,6 @@ static int vexpress_hwmon_remove(struct platform_device *pdev) match = of_match_device(vexpress_hwmon_of_match, &pdev->dev); sysfs_remove_group(&pdev->dev.kobj, match->data); - vexpress_config_func_put(data->func); - return 0; } diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 22e92c3d3d0..3c20e4bd6dd 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -422,6 +422,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) */ dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); + /* enforce disabled interrupts (due to HW issues) */ + i2c_dw_disable_int(dev); + /* Enable the adapter */ __i2c_dw_enable(dev, true); diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 28cbe1b2a2e..32c85e9ecda 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -999,7 +999,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, resource_size(&adev->res)); - if (IS_ERR(dev->virtbase)) { + if (!dev->virtbase) { ret = -ENOMEM; goto err_no_mem; } diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index 1b4cf14f110..2a5efb5b487 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -479,7 +479,7 @@ static int qup_i2c_xfer(struct i2c_adapter *adap, int ret, idx; ret = pm_runtime_get_sync(qup->dev); - if (ret) + if (ret < 0) goto out; writel(1, qup->base + QUP_SW_RESET); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index d4fa8eba6e9..06d47aafbb7 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -561,6 +561,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, ret = -EINVAL; for (i = 0; i < num; i++) { + /* This HW can't send STOP after address phase */ + if (msgs[i].len == 0) { + ret = -EOPNOTSUPP; + break; + } + /*-------------- spin lock -----------------*/ spin_lock_irqsave(&priv->lock, flags); @@ -625,7 +631,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, static u32 rcar_i2c_func(struct i2c_adapter *adap) { - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + /* This HW can't do SMBUS_QUICK and NOSTART */ + return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } static const struct i2c_algorithm rcar_i2c_algo = { diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c index ae4491062e4..bb3a9964f7e 100644 --- a/drivers/i2c/busses/i2c-s3c2410.c +++ b/drivers/i2c/busses/i2c-s3c2410.c @@ -1276,10 +1276,10 @@ static int s3c24xx_i2c_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev); - i2c->suspended = 0; clk_prepare_enable(i2c->clk); s3c24xx_i2c_init(i2c); clk_disable_unprepare(i2c->clk); + i2c->suspended = 0; return 0; } diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 16f69be820c..ee880382e3b 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -188,10 +188,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, ledtrig_ide_activity(); - pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n", + pr_debug("%s: %sing: block=%llu, sectors=%u\n", drive->name, rq_data_dir(rq) == READ ? "read" : "writ", - (unsigned long long)block, blk_rq_sectors(rq), - (unsigned long)rq->buffer); + (unsigned long long)block, blk_rq_sectors(rq)); if (hwif->rw_disk) hwif->rw_disk(drive, rq); diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index d86196cfe4b..24c28e3f93a 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -106,7 +106,7 @@ config AT91_ADC Say yes here to build support for Atmel AT91 ADC. config EXYNOS_ADC - bool "Exynos ADC driver support" + tristate "Exynos ADC driver support" depends on OF help Core support for the ADC block found in the Samsung EXYNOS series @@ -114,7 +114,7 @@ config EXYNOS_ADC this resource. config LP8788_ADC - bool "LP8788 ADC driver" + tristate "LP8788 ADC driver" depends on MFD_LP8788 help Say yes here to build support for TI LP8788 ADC. diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 89777ed9abd..3b5bacd4d8d 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -31,7 +31,108 @@ #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> -#include <mach/at91_adc.h> +/* Registers */ +#define AT91_ADC_CR 0x00 /* Control Register */ +#define AT91_ADC_SWRST (1 << 0) /* Software Reset */ +#define AT91_ADC_START (1 << 1) /* Start Conversion */ + +#define AT91_ADC_MR 0x04 /* Mode Register */ +#define AT91_ADC_TSAMOD (3 << 0) /* ADC mode */ +#define AT91_ADC_TSAMOD_ADC_ONLY_MODE (0 << 0) /* ADC Mode */ +#define AT91_ADC_TSAMOD_TS_ONLY_MODE (1 << 0) /* Touch Screen Only Mode */ +#define AT91_ADC_TRGEN (1 << 0) /* Trigger Enable */ +#define AT91_ADC_TRGSEL (7 << 1) /* Trigger Selection */ +#define AT91_ADC_TRGSEL_TC0 (0 << 1) +#define AT91_ADC_TRGSEL_TC1 (1 << 1) +#define AT91_ADC_TRGSEL_TC2 (2 << 1) +#define AT91_ADC_TRGSEL_EXTERNAL (6 << 1) +#define AT91_ADC_LOWRES (1 << 4) /* Low Resolution */ +#define AT91_ADC_SLEEP (1 << 5) /* Sleep Mode */ +#define AT91_ADC_PENDET (1 << 6) /* Pen contact detection enable */ +#define AT91_ADC_PRESCAL_9260 (0x3f << 8) /* Prescalar Rate Selection */ +#define AT91_ADC_PRESCAL_9G45 (0xff << 8) +#define AT91_ADC_PRESCAL_(x) ((x) << 8) +#define AT91_ADC_STARTUP_9260 (0x1f << 16) /* Startup Up Time */ +#define AT91_ADC_STARTUP_9G45 (0x7f << 16) +#define AT91_ADC_STARTUP_9X5 (0xf << 16) +#define AT91_ADC_STARTUP_(x) ((x) << 16) +#define AT91_ADC_SHTIM (0xf << 24) /* Sample & Hold Time */ +#define AT91_ADC_SHTIM_(x) ((x) << 24) +#define AT91_ADC_PENDBC (0x0f << 28) /* Pen Debounce time */ +#define AT91_ADC_PENDBC_(x) ((x) << 28) + +#define AT91_ADC_TSR 0x0C +#define AT91_ADC_TSR_SHTIM (0xf << 24) /* Sample & Hold Time */ +#define AT91_ADC_TSR_SHTIM_(x) ((x) << 24) + +#define AT91_ADC_CHER 0x10 /* Channel Enable Register */ +#define AT91_ADC_CHDR 0x14 /* Channel Disable Register */ +#define AT91_ADC_CHSR 0x18 /* Channel Status Register */ +#define AT91_ADC_CH(n) (1 << (n)) /* Channel Number */ + +#define AT91_ADC_SR 0x1C /* Status Register */ +#define AT91_ADC_EOC(n) (1 << (n)) /* End of Conversion on Channel N */ +#define AT91_ADC_OVRE(n) (1 << ((n) + 8))/* Overrun Error on Channel N */ +#define AT91_ADC_DRDY (1 << 16) /* Data Ready */ +#define AT91_ADC_GOVRE (1 << 17) /* General Overrun Error */ +#define AT91_ADC_ENDRX (1 << 18) /* End of RX Buffer */ +#define AT91_ADC_RXFUFF (1 << 19) /* RX Buffer Full */ + +#define AT91_ADC_SR_9X5 0x30 /* Status Register for 9x5 */ +#define AT91_ADC_SR_DRDY_9X5 (1 << 24) /* Data Ready */ + +#define AT91_ADC_LCDR 0x20 /* Last Converted Data Register */ +#define AT91_ADC_LDATA (0x3ff) + +#define AT91_ADC_IER 0x24 /* Interrupt Enable Register */ +#define AT91_ADC_IDR 0x28 /* Interrupt Disable Register */ +#define AT91_ADC_IMR 0x2C /* Interrupt Mask Register */ +#define AT91RL_ADC_IER_PEN (1 << 20) +#define AT91RL_ADC_IER_NOPEN (1 << 21) +#define AT91_ADC_IER_PEN (1 << 29) +#define AT91_ADC_IER_NOPEN (1 << 30) +#define AT91_ADC_IER_XRDY (1 << 20) +#define AT91_ADC_IER_YRDY (1 << 21) +#define AT91_ADC_IER_PRDY (1 << 22) +#define AT91_ADC_ISR_PENS (1 << 31) + +#define AT91_ADC_CHR(n) (0x30 + ((n) * 4)) /* Channel Data Register N */ +#define AT91_ADC_DATA (0x3ff) + +#define AT91_ADC_CDR0_9X5 (0x50) /* Channel Data Register 0 for 9X5 */ + +#define AT91_ADC_ACR 0x94 /* Analog Control Register */ +#define AT91_ADC_ACR_PENDETSENS (0x3 << 0) /* pull-up resistor */ + +#define AT91_ADC_TSMR 0xB0 +#define AT91_ADC_TSMR_TSMODE (3 << 0) /* Touch Screen Mode */ +#define AT91_ADC_TSMR_TSMODE_NONE (0 << 0) +#define AT91_ADC_TSMR_TSMODE_4WIRE_NO_PRESS (1 << 0) +#define AT91_ADC_TSMR_TSMODE_4WIRE_PRESS (2 << 0) +#define AT91_ADC_TSMR_TSMODE_5WIRE (3 << 0) +#define AT91_ADC_TSMR_TSAV (3 << 4) /* Averages samples */ +#define AT91_ADC_TSMR_TSAV_(x) ((x) << 4) +#define AT91_ADC_TSMR_SCTIM (0x0f << 16) /* Switch closure time */ +#define AT91_ADC_TSMR_PENDBC (0x0f << 28) /* Pen Debounce time */ +#define AT91_ADC_TSMR_PENDBC_(x) ((x) << 28) +#define AT91_ADC_TSMR_NOTSDMA (1 << 22) /* No Touchscreen DMA */ +#define AT91_ADC_TSMR_PENDET_DIS (0 << 24) /* Pen contact detection disable */ +#define AT91_ADC_TSMR_PENDET_ENA (1 << 24) /* Pen contact detection enable */ + +#define AT91_ADC_TSXPOSR 0xB4 +#define AT91_ADC_TSYPOSR 0xB8 +#define AT91_ADC_TSPRESSR 0xBC + +#define AT91_ADC_TRGR_9260 AT91_ADC_MR +#define AT91_ADC_TRGR_9G45 0x08 +#define AT91_ADC_TRGR_9X5 0xC0 + +/* Trigger Register bit field */ +#define AT91_ADC_TRGR_TRGPER (0xffff << 16) +#define AT91_ADC_TRGR_TRGPER_(x) ((x) << 16) +#define AT91_ADC_TRGR_TRGMOD (0x7 << 0) +#define AT91_ADC_TRGR_NONE (0 << 0) +#define AT91_ADC_TRGR_MOD_PERIOD_TRIG (5 << 0) #define AT91_ADC_CHAN(st, ch) \ (st->registers->channel_base + (ch * 4)) @@ -46,6 +147,29 @@ #define TOUCH_SAMPLE_PERIOD_US 2000 /* 2ms */ #define TOUCH_PEN_DETECT_DEBOUNCE_US 200 +#define MAX_RLPOS_BITS 10 +#define TOUCH_SAMPLE_PERIOD_US_RL 10000 /* 10ms, the SoC can't keep up with 2ms */ +#define TOUCH_SHTIM 0xa + +/** + * struct at91_adc_reg_desc - Various informations relative to registers + * @channel_base: Base offset for the channel data registers + * @drdy_mask: Mask of the DRDY field in the relevant registers + (Interruptions registers mostly) + * @status_register: Offset of the Interrupt Status Register + * @trigger_register: Offset of the Trigger setup register + * @mr_prescal_mask: Mask of the PRESCAL field in the adc MR register + * @mr_startup_mask: Mask of the STARTUP field in the adc MR register + */ +struct at91_adc_reg_desc { + u8 channel_base; + u32 drdy_mask; + u8 status_register; + u8 trigger_register; + u32 mr_prescal_mask; + u32 mr_startup_mask; +}; + struct at91_adc_caps { bool has_ts; /* Support touch screen */ bool has_tsmr; /* only at91sam9x5, sama5d3 have TSMR reg */ @@ -64,12 +188,6 @@ struct at91_adc_caps { struct at91_adc_reg_desc registers; }; -enum atmel_adc_ts_type { - ATMEL_ADC_TOUCHSCREEN_NONE = 0, - ATMEL_ADC_TOUCHSCREEN_4WIRE = 4, - ATMEL_ADC_TOUCHSCREEN_5WIRE = 5, -}; - struct at91_adc_state { struct clk *adc_clk; u16 *buffer; @@ -114,6 +232,11 @@ struct at91_adc_state { u16 ts_sample_period_val; u32 ts_pressure_threshold; + u16 ts_pendbc; + + bool ts_bufferedmeasure; + u32 ts_prev_absx; + u32 ts_prev_absy; }; static irqreturn_t at91_adc_trigger_handler(int irq, void *p) @@ -220,7 +343,72 @@ static int at91_ts_sample(struct at91_adc_state *st) return 0; } -static irqreturn_t at91_adc_interrupt(int irq, void *private) +static irqreturn_t at91_adc_rl_interrupt(int irq, void *private) +{ + struct iio_dev *idev = private; + struct at91_adc_state *st = iio_priv(idev); + u32 status = at91_adc_readl(st, st->registers->status_register); + unsigned int reg; + + status &= at91_adc_readl(st, AT91_ADC_IMR); + if (status & st->registers->drdy_mask) + handle_adc_eoc_trigger(irq, idev); + + if (status & AT91RL_ADC_IER_PEN) { + /* Disabling pen debounce is required to get a NOPEN irq */ + reg = at91_adc_readl(st, AT91_ADC_MR); + reg &= ~AT91_ADC_PENDBC; + at91_adc_writel(st, AT91_ADC_MR, reg); + + at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_PEN); + at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_NOPEN + | AT91_ADC_EOC(3)); + /* Set up period trigger for sampling */ + at91_adc_writel(st, st->registers->trigger_register, + AT91_ADC_TRGR_MOD_PERIOD_TRIG | + AT91_ADC_TRGR_TRGPER_(st->ts_sample_period_val)); + } else if (status & AT91RL_ADC_IER_NOPEN) { + reg = at91_adc_readl(st, AT91_ADC_MR); + reg |= AT91_ADC_PENDBC_(st->ts_pendbc) & AT91_ADC_PENDBC; + at91_adc_writel(st, AT91_ADC_MR, reg); + at91_adc_writel(st, st->registers->trigger_register, + AT91_ADC_TRGR_NONE); + + at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_NOPEN + | AT91_ADC_EOC(3)); + at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_PEN); + st->ts_bufferedmeasure = false; + input_report_key(st->ts_input, BTN_TOUCH, 0); + input_sync(st->ts_input); + } else if (status & AT91_ADC_EOC(3)) { + /* Conversion finished */ + if (st->ts_bufferedmeasure) { + /* + * Last measurement is always discarded, since it can + * be erroneous. + * Always report previous measurement + */ + input_report_abs(st->ts_input, ABS_X, st->ts_prev_absx); + input_report_abs(st->ts_input, ABS_Y, st->ts_prev_absy); + input_report_key(st->ts_input, BTN_TOUCH, 1); + input_sync(st->ts_input); + } else + st->ts_bufferedmeasure = true; + + /* Now make new measurement */ + st->ts_prev_absx = at91_adc_readl(st, AT91_ADC_CHAN(st, 3)) + << MAX_RLPOS_BITS; + st->ts_prev_absx /= at91_adc_readl(st, AT91_ADC_CHAN(st, 2)); + + st->ts_prev_absy = at91_adc_readl(st, AT91_ADC_CHAN(st, 1)) + << MAX_RLPOS_BITS; + st->ts_prev_absy /= at91_adc_readl(st, AT91_ADC_CHAN(st, 0)); + } + + return IRQ_HANDLED; +} + +static irqreturn_t at91_adc_9x5_interrupt(int irq, void *private) { struct iio_dev *idev = private; struct at91_adc_state *st = iio_priv(idev); @@ -653,6 +841,8 @@ static int at91_adc_probe_dt_ts(struct device_node *node, return -EINVAL; } + if (!st->caps->has_tsmr) + return 0; prop = 0; of_property_read_u32(node, "atmel,adc-ts-pressure-threshold", &prop); st->ts_pressure_threshold = prop; @@ -776,6 +966,7 @@ static int at91_adc_probe_pdata(struct at91_adc_state *st, st->trigger_number = pdata->trigger_number; st->trigger_list = pdata->trigger_list; st->registers = &st->caps->registers; + st->touchscreen_type = pdata->touchscreen_type; return 0; } @@ -790,7 +981,10 @@ static int atmel_ts_open(struct input_dev *dev) { struct at91_adc_state *st = input_get_drvdata(dev); - at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_PEN); + if (st->caps->has_tsmr) + at91_adc_writel(st, AT91_ADC_IER, AT91_ADC_IER_PEN); + else + at91_adc_writel(st, AT91_ADC_IER, AT91RL_ADC_IER_PEN); return 0; } @@ -798,45 +992,61 @@ static void atmel_ts_close(struct input_dev *dev) { struct at91_adc_state *st = input_get_drvdata(dev); - at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_PEN); + if (st->caps->has_tsmr) + at91_adc_writel(st, AT91_ADC_IDR, AT91_ADC_IER_PEN); + else + at91_adc_writel(st, AT91_ADC_IDR, AT91RL_ADC_IER_PEN); } static int at91_ts_hw_init(struct at91_adc_state *st, u32 adc_clk_khz) { - u32 reg = 0, pendbc; + u32 reg = 0; int i = 0; - if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE) - reg = AT91_ADC_TSMR_TSMODE_4WIRE_PRESS; - else - reg = AT91_ADC_TSMR_TSMODE_5WIRE; - /* a Pen Detect Debounce Time is necessary for the ADC Touch to avoid * pen detect noise. * The formula is : Pen Detect Debounce Time = (2 ^ pendbc) / ADCClock */ - pendbc = round_up(TOUCH_PEN_DETECT_DEBOUNCE_US * adc_clk_khz / 1000, 1); + st->ts_pendbc = round_up(TOUCH_PEN_DETECT_DEBOUNCE_US * adc_clk_khz / + 1000, 1); - while (pendbc >> ++i) + while (st->ts_pendbc >> ++i) ; /* Empty! Find the shift offset */ - if (abs(pendbc - (1 << i)) < abs(pendbc - (1 << (i - 1)))) - pendbc = i; + if (abs(st->ts_pendbc - (1 << i)) < abs(st->ts_pendbc - (1 << (i - 1)))) + st->ts_pendbc = i; else - pendbc = i - 1; + st->ts_pendbc = i - 1; - if (st->caps->has_tsmr) { - reg |= AT91_ADC_TSMR_TSAV_(st->caps->ts_filter_average) - & AT91_ADC_TSMR_TSAV; - reg |= AT91_ADC_TSMR_PENDBC_(pendbc) & AT91_ADC_TSMR_PENDBC; - reg |= AT91_ADC_TSMR_NOTSDMA; - reg |= AT91_ADC_TSMR_PENDET_ENA; - reg |= 0x03 << 8; /* TSFREQ, need bigger than TSAV */ - - at91_adc_writel(st, AT91_ADC_TSMR, reg); - } else { - /* TODO: for 9g45 which has no TSMR */ + if (!st->caps->has_tsmr) { + reg = at91_adc_readl(st, AT91_ADC_MR); + reg |= AT91_ADC_TSAMOD_TS_ONLY_MODE | AT91_ADC_PENDET; + + reg |= AT91_ADC_PENDBC_(st->ts_pendbc) & AT91_ADC_PENDBC; + at91_adc_writel(st, AT91_ADC_MR, reg); + + reg = AT91_ADC_TSR_SHTIM_(TOUCH_SHTIM) & AT91_ADC_TSR_SHTIM; + at91_adc_writel(st, AT91_ADC_TSR, reg); + + st->ts_sample_period_val = round_up((TOUCH_SAMPLE_PERIOD_US_RL * + adc_clk_khz / 1000) - 1, 1); + + return 0; } + if (st->touchscreen_type == ATMEL_ADC_TOUCHSCREEN_4WIRE) + reg = AT91_ADC_TSMR_TSMODE_4WIRE_PRESS; + else + reg = AT91_ADC_TSMR_TSMODE_5WIRE; + + reg |= AT91_ADC_TSMR_TSAV_(st->caps->ts_filter_average) + & AT91_ADC_TSMR_TSAV; + reg |= AT91_ADC_TSMR_PENDBC_(st->ts_pendbc) & AT91_ADC_TSMR_PENDBC; + reg |= AT91_ADC_TSMR_NOTSDMA; + reg |= AT91_ADC_TSMR_PENDET_ENA; + reg |= 0x03 << 8; /* TSFREQ, needs to be bigger than TSAV */ + + at91_adc_writel(st, AT91_ADC_TSMR, reg); + /* Change adc internal resistor value for better pen detection, * default value is 100 kOhm. * 0 = 200 kOhm, 1 = 150 kOhm, 2 = 100 kOhm, 3 = 50 kOhm @@ -845,7 +1055,7 @@ static int at91_ts_hw_init(struct at91_adc_state *st, u32 adc_clk_khz) at91_adc_writel(st, AT91_ADC_ACR, st->caps->ts_pen_detect_sensitivity & AT91_ADC_ACR_PENDETSENS); - /* Sample Peroid Time = (TRGPER + 1) / ADCClock */ + /* Sample Period Time = (TRGPER + 1) / ADCClock */ st->ts_sample_period_val = round_up((TOUCH_SAMPLE_PERIOD_US * adc_clk_khz / 1000) - 1, 1); @@ -874,18 +1084,38 @@ static int at91_ts_register(struct at91_adc_state *st, __set_bit(EV_ABS, input->evbit); __set_bit(EV_KEY, input->evbit); __set_bit(BTN_TOUCH, input->keybit); - input_set_abs_params(input, ABS_X, 0, (1 << MAX_POS_BITS) - 1, 0, 0); - input_set_abs_params(input, ABS_Y, 0, (1 << MAX_POS_BITS) - 1, 0, 0); - input_set_abs_params(input, ABS_PRESSURE, 0, 0xffffff, 0, 0); + if (st->caps->has_tsmr) { + input_set_abs_params(input, ABS_X, 0, (1 << MAX_POS_BITS) - 1, + 0, 0); + input_set_abs_params(input, ABS_Y, 0, (1 << MAX_POS_BITS) - 1, + 0, 0); + input_set_abs_params(input, ABS_PRESSURE, 0, 0xffffff, 0, 0); + } else { + if (st->touchscreen_type != ATMEL_ADC_TOUCHSCREEN_4WIRE) { + dev_err(&pdev->dev, + "This touchscreen controller only support 4 wires\n"); + ret = -EINVAL; + goto err; + } + + input_set_abs_params(input, ABS_X, 0, (1 << MAX_RLPOS_BITS) - 1, + 0, 0); + input_set_abs_params(input, ABS_Y, 0, (1 << MAX_RLPOS_BITS) - 1, + 0, 0); + } st->ts_input = input; input_set_drvdata(input, st); ret = input_register_device(input); if (ret) - input_free_device(st->ts_input); + goto err; return ret; + +err: + input_free_device(st->ts_input); + return ret; } static void at91_ts_unregister(struct at91_adc_state *st) @@ -943,11 +1173,13 @@ static int at91_adc_probe(struct platform_device *pdev) */ at91_adc_writel(st, AT91_ADC_CR, AT91_ADC_SWRST); at91_adc_writel(st, AT91_ADC_IDR, 0xFFFFFFFF); - ret = request_irq(st->irq, - at91_adc_interrupt, - 0, - pdev->dev.driver->name, - idev); + + if (st->caps->has_tsmr) + ret = request_irq(st->irq, at91_adc_9x5_interrupt, 0, + pdev->dev.driver->name, idev); + else + ret = request_irq(st->irq, at91_adc_rl_interrupt, 0, + pdev->dev.driver->name, idev); if (ret) { dev_err(&pdev->dev, "Failed to allocate IRQ.\n"); return ret; @@ -1051,12 +1283,6 @@ static int at91_adc_probe(struct platform_device *pdev) goto error_disable_adc_clk; } } else { - if (!st->caps->has_tsmr) { - dev_err(&pdev->dev, "We don't support non-TSMR adc\n"); - ret = -ENODEV; - goto error_disable_adc_clk; - } - ret = at91_ts_register(st, pdev); if (ret) goto error_disable_adc_clk; @@ -1120,6 +1346,20 @@ static struct at91_adc_caps at91sam9260_caps = { }, }; +static struct at91_adc_caps at91sam9rl_caps = { + .has_ts = true, + .calc_startup_ticks = calc_startup_ticks_9260, /* same as 9260 */ + .num_channels = 6, + .registers = { + .channel_base = AT91_ADC_CHR(0), + .drdy_mask = AT91_ADC_DRDY, + .status_register = AT91_ADC_SR, + .trigger_register = AT91_ADC_TRGR_9G45, + .mr_prescal_mask = AT91_ADC_PRESCAL_9260, + .mr_startup_mask = AT91_ADC_STARTUP_9G45, + }, +}; + static struct at91_adc_caps at91sam9g45_caps = { .has_ts = true, .calc_startup_ticks = calc_startup_ticks_9260, /* same as 9260 */ @@ -1154,6 +1394,7 @@ static struct at91_adc_caps at91sam9x5_caps = { static const struct of_device_id at91_adc_dt_ids[] = { { .compatible = "atmel,at91sam9260-adc", .data = &at91sam9260_caps }, + { .compatible = "atmel,at91sam9rl-adc", .data = &at91sam9rl_caps }, { .compatible = "atmel,at91sam9g45-adc", .data = &at91sam9g45_caps }, { .compatible = "atmel,at91sam9x5-adc", .data = &at91sam9x5_caps }, {}, @@ -1165,6 +1406,9 @@ static const struct platform_device_id at91_adc_ids[] = { .name = "at91sam9260-adc", .driver_data = (unsigned long)&at91sam9260_caps, }, { + .name = "at91sam9rl-adc", + .driver_data = (unsigned long)&at91sam9rl_caps, + }, { .name = "at91sam9g45-adc", .driver_data = (unsigned long)&at91sam9g45_caps, }, { diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c index d25b262193a..affa93f5178 100644 --- a/drivers/iio/adc/exynos_adc.c +++ b/drivers/iio/adc/exynos_adc.c @@ -344,7 +344,7 @@ static int exynos_adc_probe(struct platform_device *pdev) exynos_adc_hw_init(info); - ret = of_platform_populate(np, exynos_adc_match, NULL, &pdev->dev); + ret = of_platform_populate(np, exynos_adc_match, NULL, &indio_dev->dev); if (ret < 0) { dev_err(&pdev->dev, "failed adding child nodes\n"); goto err_of_populate; @@ -353,7 +353,7 @@ static int exynos_adc_probe(struct platform_device *pdev) return 0; err_of_populate: - device_for_each_child(&pdev->dev, NULL, + device_for_each_child(&indio_dev->dev, NULL, exynos_adc_remove_devices); regulator_disable(info->vdd); clk_disable_unprepare(info->clk); @@ -369,7 +369,7 @@ static int exynos_adc_remove(struct platform_device *pdev) struct iio_dev *indio_dev = platform_get_drvdata(pdev); struct exynos_adc *info = iio_priv(indio_dev); - device_for_each_child(&pdev->dev, NULL, + device_for_each_child(&indio_dev->dev, NULL, exynos_adc_remove_devices); regulator_disable(info->vdd); clk_disable_unprepare(info->clk); diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index cb9f96b446a..d8ad606c7cd 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -660,6 +660,7 @@ static int inv_mpu_probe(struct i2c_client *client, { struct inv_mpu6050_state *st; struct iio_dev *indio_dev; + struct inv_mpu6050_platform_data *pdata; int result; if (!i2c_check_functionality(client->adapter, @@ -672,8 +673,10 @@ static int inv_mpu_probe(struct i2c_client *client, st = iio_priv(indio_dev); st->client = client; - st->plat_data = *(struct inv_mpu6050_platform_data - *)dev_get_platdata(&client->dev); + pdata = (struct inv_mpu6050_platform_data + *)dev_get_platdata(&client->dev); + if (pdata) + st->plat_data = *pdata; /* power is turned on inside check chip type*/ result = inv_check_and_setup_chip(st, id); if (result) diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig index d4e8983fba5..23f38cf2c5c 100644 --- a/drivers/infiniband/hw/cxgb4/Kconfig +++ b/drivers/infiniband/hw/cxgb4/Kconfig @@ -1,10 +1,10 @@ config INFINIBAND_CXGB4 - tristate "Chelsio T4 RDMA Driver" + tristate "Chelsio T4/T5 RDMA Driver" depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) select GENERIC_ALLOCATOR ---help--- - This is an iWARP/RDMA driver for the Chelsio T4 1GbE and - 10GbE adapters. + This is an iWARP/RDMA driver for the Chelsio T4 and T5 + 1GbE, 10GbE adapters and T5 40GbE adapter. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 185452abf32..1f863a96a48 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep) opt2 |= SACK_EN(1); if (wscale && enable_tcp_window_scaling) opt2 |= WND_SCALE_EN(1); + if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { + opt2 |= T5_OPT_2_VALID; + opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); + } t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { @@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status) static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) { PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); - state_set(&ep->com, ABORTING); + __state_set(&ep->com, ABORTING); set_bit(ABORT_CONN, &ep->com.history); return send_abort(ep, skb, gfp); } @@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) return credits; } -static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) +static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; struct mpa_v2_conn_params *mpa_v2_params; @@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) struct c4iw_qp_attributes attrs; enum c4iw_qp_attr_mask mask; int err; + int disconnect = 0; PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); @@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * will abort the connection. */ if (stop_ep_timer(ep)) - return; + return 0; /* * If we get more than the supported amount of private data @@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * if we don't even have the mpa message, then bail. */ if (ep->mpa_pkt_len < sizeof(*mpa)) - return; + return 0; mpa = (struct mpa_message *) ep->mpa_pkt; /* Validate MPA header. */ @@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * We'll continue process when more data arrives. */ if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) - return; + return 0; if (mpa->flags & MPA_REJECT) { err = -ECONNREFUSED; @@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_NOMATCH_RTR; attrs.next_state = C4IW_QP_STATE_TERMINATE; + attrs.send_term = 1; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, - C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); err = -ENOMEM; + disconnect = 1; goto out; } @@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) attrs.layer_etype = LAYER_MPA | DDP_LLP; attrs.ecode = MPA_INSUFF_IRD; attrs.next_state = C4IW_QP_STATE_TERMINATE; + attrs.send_term = 1; err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, - C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); err = -ENOMEM; + disconnect = 1; goto out; } goto out; @@ -1366,7 +1375,7 @@ err: send_abort(ep, skb, GFP_KERNEL); out: connect_reply_upcall(ep, err); - return; + return disconnect; } static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) @@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) unsigned int tid = GET_TID(hdr); struct tid_info *t = dev->rdev.lldi.tids; __u8 status = hdr->status; + int disconnect = 0; ep = lookup_tid(t, tid); if (!ep) @@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) switch (ep->com.state) { case MPA_REQ_SENT: ep->rcv_seq += dlen; - process_mpa_reply(ep, skb); + disconnect = process_mpa_reply(ep, skb); break; case MPA_REQ_WAIT: ep->rcv_seq += dlen; @@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) ep->com.state, ep->hwtid, status); attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, - C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); + disconnect = 1; break; } default: break; } mutex_unlock(&ep->com.mutex); + if (disconnect) + c4iw_ep_disconnect(ep, 0, GFP_KERNEL); return 0; } @@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, if (tcph->ece && tcph->cwr) opt2 |= CCTRL_ECN(1); } + if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { + opt2 |= T5_OPT_2_VALID; + opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); + } rpl = cplhdr(skb); INIT_TP_WR(rpl, ep->hwtid); @@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep) __func__, ep, ep->hwtid, ep->com.state); abort = 0; } - mutex_unlock(&ep->com.mutex); if (abort) abort_connection(ep, NULL, GFP_KERNEL); + mutex_unlock(&ep->com.mutex); c4iw_put_ep(&ep->com); } diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7b8c5806a09..7474b490760 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -435,6 +435,7 @@ struct c4iw_qp_attributes { u8 ecode; u16 sq_db_inc; u16 rq_db_inc; + u8 send_term; }; struct c4iw_qp { diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 7b5114cb486..086f62f5dc9 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, qhp->attr.layer_etype = attrs->layer_etype; qhp->attr.ecode = attrs->ecode; ep = qhp->ep; - disconnect = 1; - c4iw_get_ep(&qhp->ep->com); - if (!internal) + if (!internal) { + c4iw_get_ep(&qhp->ep->com); terminate = 1; - else { + disconnect = 1; + } else { + terminate = qhp->attr.send_term; ret = rdma_fini(rhp, qhp, ep); if (ret) goto err; @@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, /* * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for * ringing the queue db when we're in DB_FULL mode. + * Only allow this on T4 devices. */ attrs.sq_db_inc = attr->sq_psn; attrs.rq_db_inc = attr->rq_psn; mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; + if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && + (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) + return -EINVAL; return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); } diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index dc193c29267..6121ca08fe5 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -836,4 +836,18 @@ struct ulptx_idata { #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) +enum { /* TCP congestion control algorithms */ + CONG_ALG_RENO, + CONG_ALG_TAHOE, + CONG_ALG_NEWRENO, + CONG_ALG_HIGHSPEED +}; + +#define S_CONG_CNTRL 14 +#define M_CONG_CNTRL 0x3 +#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) +#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) + +#define T5_OPT_2_VALID (1 << 31) + #endif /* _T4FW_RI_API_H_ */ diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1b6dbe156a3..199c7896f08 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -48,6 +48,7 @@ #include <linux/mlx4/driver.h> #include <linux/mlx4/cmd.h> +#include <linux/mlx4/qp.h> #include "mlx4_ib.h" #include "user.h" @@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event, } #endif +#define MLX4_IB_INVALID_MAC ((u64)-1) +static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, + struct net_device *dev, + int port) +{ + u64 new_smac = 0; + u64 release_mac = MLX4_IB_INVALID_MAC; + struct mlx4_ib_qp *qp; + + read_lock(&dev_base_lock); + new_smac = mlx4_mac_to_u64(dev->dev_addr); + read_unlock(&dev_base_lock); + + mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); + qp = ibdev->qp1_proxy[port - 1]; + if (qp) { + int new_smac_index; + u64 old_smac = qp->pri.smac; + struct mlx4_update_qp_params update_params; + + if (new_smac == old_smac) + goto unlock; + + new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); + + if (new_smac_index < 0) + goto unlock; + + update_params.smac_index = new_smac_index; + if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, + &update_params)) { + release_mac = new_smac; + goto unlock; + } + + qp->pri.smac = new_smac; + qp->pri.smac_index = new_smac_index; + + release_mac = old_smac; + } + +unlock: + mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); + if (release_mac != MLX4_IB_INVALID_MAC) + mlx4_unregister_mac(ibdev->dev, port, release_mac); +} + static void mlx4_ib_get_dev_addr(struct net_device *dev, struct mlx4_ib_dev *ibdev, u8 port) { @@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) return 0; } -static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) +static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, + struct net_device *dev, + unsigned long event) + { struct mlx4_ib_iboe *iboe; + int update_qps_port = -1; int port; iboe = &ibdev->iboe; @@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) } curr_master = iboe->masters[port - 1]; + if (dev == iboe->netdevs[port - 1] && + (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || + event == NETDEV_UP || event == NETDEV_CHANGE)) + update_qps_port = port; + if (curr_netdev) { port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; @@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) } spin_unlock(&iboe->lock); + + if (update_qps_port > 0) + mlx4_ib_update_qps(ibdev, dev, update_qps_port); } static int mlx4_ib_netdev_event(struct notifier_block *this, @@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, return NOTIFY_DONE; ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); - mlx4_ib_scan_netdevs(ibdev); + mlx4_ib_scan_netdevs(ibdev, dev, event); return NOTIFY_DONE; } @@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) goto err_map; for (i = 0; i < ibdev->num_ports; ++i) { + mutex_init(&ibdev->qp1_proxy_lock[i]); if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == IB_LINK_LAYER_ETHERNET) { err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); @@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) for (i = 1 ; i <= ibdev->num_ports ; ++i) reset_gid_table(ibdev, i); rtnl_lock(); - mlx4_ib_scan_netdevs(ibdev); + mlx4_ib_scan_netdevs(ibdev, NULL, 0); rtnl_unlock(); mlx4_ib_init_gid_table(ibdev); } diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index f589522fddf..66b0b7dbd9f 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -522,6 +522,9 @@ struct mlx4_ib_dev { int steer_qpn_count; int steer_qpn_base; int steering_support; + struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; + /* lock when destroying qp1_proxy and getting netdev events */ + struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; }; struct ib_event_work { diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 41308af4163..dc57482ae7a 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp) if (is_qp0(dev, mqp)) mlx4_CLOSE_PORT(dev->dev, mqp->port); + if (dev->qp1_proxy[mqp->port - 1] == mqp) { + mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); + dev->qp1_proxy[mqp->port - 1] = NULL; + mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); + } + pd = get_pd(mqp); destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); @@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); if (err) return -EINVAL; + if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) + dev->qp1_proxy[qp->port - 1] = qp; } } } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index c98fdb18593..a1710465faa 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -28,6 +28,7 @@ #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/iscsi/iscsi_transport.h> +#include <linux/semaphore.h> #include "isert_proto.h" #include "ib_isert.h" @@ -561,7 +562,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) struct isert_device *device; struct ib_device *ib_dev = cma_id->device; int ret = 0; - u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; + u8 pi_support; + + spin_lock_bh(&np->np_thread_lock); + if (!np->enabled) { + spin_unlock_bh(&np->np_thread_lock); + pr_debug("iscsi_np is not enabled, reject connect request\n"); + return rdma_reject(cma_id, NULL, 0); + } + spin_unlock_bh(&np->np_thread_lock); pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n", cma_id, cma_id->context); @@ -652,6 +661,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) goto out_mr; } + pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi; if (pi_support && !device->pi_capable) { pr_err("Protection information requested but not supported\n"); ret = -EINVAL; @@ -663,11 +673,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) goto out_conn_dev; mutex_lock(&isert_np->np_accept_mutex); - list_add_tail(&isert_np->np_accept_list, &isert_conn->conn_accept_node); + list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list); mutex_unlock(&isert_np->np_accept_mutex); - pr_debug("isert_connect_request() waking up np_accept_wq: %p\n", np); - wake_up(&isert_np->np_accept_wq); + pr_debug("isert_connect_request() up np_sem np: %p\n", np); + up(&isert_np->np_sem); return 0; out_conn_dev: @@ -2999,7 +3009,7 @@ isert_setup_np(struct iscsi_np *np, pr_err("Unable to allocate struct isert_np\n"); return -ENOMEM; } - init_waitqueue_head(&isert_np->np_accept_wq); + sema_init(&isert_np->np_sem, 0); mutex_init(&isert_np->np_accept_mutex); INIT_LIST_HEAD(&isert_np->np_accept_list); init_completion(&isert_np->np_login_comp); @@ -3048,18 +3058,6 @@ out: } static int -isert_check_accept_queue(struct isert_np *isert_np) -{ - int empty; - - mutex_lock(&isert_np->np_accept_mutex); - empty = list_empty(&isert_np->np_accept_list); - mutex_unlock(&isert_np->np_accept_mutex); - - return empty; -} - -static int isert_rdma_accept(struct isert_conn *isert_conn) { struct rdma_cm_id *cm_id = isert_conn->conn_cm_id; @@ -3151,16 +3149,14 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn) int max_accept = 0, ret; accept_wait: - ret = wait_event_interruptible(isert_np->np_accept_wq, - !isert_check_accept_queue(isert_np) || - np->np_thread_state == ISCSI_NP_THREAD_RESET); + ret = down_interruptible(&isert_np->np_sem); if (max_accept > 5) return -ENODEV; spin_lock_bh(&np->np_thread_lock); if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { spin_unlock_bh(&np->np_thread_lock); - pr_err("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); + pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n"); return -ENODEV; } spin_unlock_bh(&np->np_thread_lock); diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 4c072ae34c0..da6612e6800 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -182,7 +182,7 @@ struct isert_device { }; struct isert_np { - wait_queue_head_t np_accept_wq; + struct semaphore np_sem; struct rdma_cm_id *np_cm_id; struct mutex np_accept_mutex; struct list_head np_accept_list; diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 76842d7dc2e..ffc7ad3a2c8 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -71,7 +71,7 @@ config KEYBOARD_ATKBD default y select SERIO select SERIO_LIBPS2 - select SERIO_I8042 if X86 + select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO select SERIO_GSCPS2 if GSC help Say Y here if you want to use a standard AT or PS/2 keyboard. Usually diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 2626773ff29..2dd1d0dd4f7 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -243,6 +243,12 @@ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data); static void *atkbd_platform_fixup_data; static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int); +/* + * Certain keyboards to not like ATKBD_CMD_RESET_DIS and stop responding + * to many commands until full reset (ATKBD_CMD_RESET_BAT) is performed. + */ +static bool atkbd_skip_deactivate; + static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, ssize_t (*handler)(struct atkbd *, char *)); static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count, @@ -768,7 +774,8 @@ static int atkbd_probe(struct atkbd *atkbd) * Make sure nothing is coming from the keyboard and disturbs our * internal state. */ - atkbd_deactivate(atkbd); + if (!atkbd_skip_deactivate) + atkbd_deactivate(atkbd); return 0; } @@ -1638,6 +1645,12 @@ static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id) return 1; } +static int __init atkbd_deactivate_fixup(const struct dmi_system_id *id) +{ + atkbd_skip_deactivate = true; + return 1; +} + static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { { .matches = { @@ -1775,6 +1788,20 @@ static const struct dmi_system_id atkbd_dmi_quirk_table[] __initconst = { .callback = atkbd_setup_scancode_fixup, .driver_data = atkbd_oqo_01plus_scancode_fixup, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), + DMI_MATCH(DMI_PRODUCT_NAME, "LW25-B7HV"), + }, + .callback = atkbd_deactivate_fixup, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"), + DMI_MATCH(DMI_PRODUCT_NAME, "P1-J273B"), + }, + .callback = atkbd_deactivate_fixup, + }, { } }; diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index d8241ba0afa..a15063bea70 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c @@ -111,6 +111,8 @@ struct pxa27x_keypad { unsigned short keycodes[MAX_KEYPAD_KEYS]; int rotary_rel_code[2]; + unsigned int row_shift; + /* state row bits of each column scan */ uint32_t matrix_key_state[MAX_MATRIX_KEY_COLS]; uint32_t direct_key_state; @@ -467,7 +469,8 @@ scan: if ((bits_changed & (1 << row)) == 0) continue; - code = MATRIX_SCAN_CODE(row, col, MATRIX_ROW_SHIFT); + code = MATRIX_SCAN_CODE(row, col, keypad->row_shift); + input_event(input_dev, EV_MSC, MSC_SCAN, code); input_report_key(input_dev, keypad->keycodes[code], new_state[col] & (1 << row)); @@ -802,6 +805,8 @@ static int pxa27x_keypad_probe(struct platform_device *pdev) goto failed_put_clk; } + keypad->row_shift = get_count_order(pdata->matrix_key_cols); + if ((pdata->enable_rotary0 && keypad->rotary_rel_code[0] != -1) || (pdata->enable_rotary1 && keypad->rotary_rel_code[1] != -1)) { input_dev->evbit[0] |= BIT_MASK(EV_REL); diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c index 55c15304ddb..4e491c1762c 100644 --- a/drivers/input/keyboard/tca8418_keypad.c +++ b/drivers/input/keyboard/tca8418_keypad.c @@ -392,6 +392,13 @@ static const struct of_device_id tca8418_dt_ids[] = { { } }; MODULE_DEVICE_TABLE(of, tca8418_dt_ids); + +/* + * The device tree based i2c loader looks for + * "i2c:" + second_component_of(property("compatible")) + * and therefore we need an alias to be found. + */ +MODULE_ALIAS("i2c:tca8418"); #endif static struct i2c_driver tca8418_keypad_driver = { diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c index 52d3a9b28f0..b36831c828d 100644 --- a/drivers/input/misc/bma150.c +++ b/drivers/input/misc/bma150.c @@ -70,6 +70,7 @@ #define BMA150_CFG_5_REG 0x11 #define BMA150_CHIP_ID 2 +#define BMA180_CHIP_ID 3 #define BMA150_CHIP_ID_REG BMA150_DATA_0_REG #define BMA150_ACC_X_LSB_REG BMA150_DATA_2_REG @@ -539,7 +540,7 @@ static int bma150_probe(struct i2c_client *client, } chip_id = i2c_smbus_read_byte_data(client, BMA150_CHIP_ID_REG); - if (chip_id != BMA150_CHIP_ID) { + if (chip_id != BMA150_CHIP_ID && chip_id != BMA180_CHIP_ID) { dev_err(&client->dev, "BMA150 chip id error: %d\n", chip_id); return -EINVAL; } @@ -643,6 +644,7 @@ static UNIVERSAL_DEV_PM_OPS(bma150_pm, bma150_suspend, bma150_resume, NULL); static const struct i2c_device_id bma150_id[] = { { "bma150", 0 }, + { "bma180", 0 }, { "smb380", 0 }, { "bma023", 0 }, { } diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index effa9c5f2c5..6b8441f7bc3 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig @@ -17,7 +17,7 @@ config MOUSE_PS2 default y select SERIO select SERIO_LIBPS2 - select SERIO_I8042 if X86 + select SERIO_I8042 if ARCH_MIGHT_HAVE_PC_SERIO select SERIO_GSCPS2 if GSC help Say Y here if you have a PS/2 mouse connected to your system. This diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 088d3541c7d..b96e978a37b 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -11,6 +11,7 @@ */ #include <linux/delay.h> +#include <linux/dmi.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/input.h> @@ -831,7 +832,11 @@ static int elantech_set_absolute_mode(struct psmouse *psmouse) break; case 3: - etd->reg_10 = 0x0b; + if (etd->set_hw_resolution) + etd->reg_10 = 0x0b; + else + etd->reg_10 = 0x03; + if (elantech_write_reg(psmouse, 0x10, etd->reg_10)) rc = -1; @@ -1331,6 +1336,22 @@ static int elantech_reconnect(struct psmouse *psmouse) } /* + * Some hw_version 3 models go into error state when we try to set bit 3 of r10 + */ +static const struct dmi_system_id no_hw_res_dmi_table[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + /* Gigabyte U2442 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "U2442"), + }, + }, +#endif + { } +}; + +/* * determine hardware version and set some properties according to it. */ static int elantech_set_properties(struct elantech_data *etd) @@ -1390,6 +1411,9 @@ static int elantech_set_properties(struct elantech_data *etd) */ etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); + /* Enable real hardware resolution on hw_version 3 ? */ + etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table); + return 0; } diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index 036a04abaef..9e0e2a1f340 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h @@ -130,6 +130,7 @@ struct elantech_data { bool jumpy_cursor; bool reports_pressure; bool crc_enabled; + bool set_hw_resolution; unsigned char hw_version; unsigned int fw_version; unsigned int single_finger_reports; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index ef9f4913450..c5ec703c727 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -117,6 +117,31 @@ void synaptics_reset(struct psmouse *psmouse) } #ifdef CONFIG_MOUSE_PS2_SYNAPTICS +struct min_max_quirk { + const char * const *pnp_ids; + int x_min, x_max, y_min, y_max; +}; + +static const struct min_max_quirk min_max_pnpid_table[] = { + { + (const char * const []){"LEN0033", NULL}, + 1024, 5052, 2258, 4832 + }, + { + (const char * const []){"LEN0035", "LEN0042", NULL}, + 1232, 5710, 1156, 4696 + }, + { + (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL}, + 1024, 5112, 2024, 4832 + }, + { + (const char * const []){"LEN2001", NULL}, + 1024, 5022, 2508, 4832 + }, + { } +}; + /* This list has been kindly provided by Synaptics. */ static const char * const topbuttonpad_pnp_ids[] = { "LEN0017", @@ -129,7 +154,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN002D", "LEN002E", "LEN0033", /* Helix */ - "LEN0034", /* T431s, T540, X1 Carbon 2nd */ + "LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */ "LEN0035", /* X240 */ "LEN0036", /* T440 */ "LEN0037", @@ -142,7 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = { "LEN0048", "LEN0049", "LEN2000", - "LEN2001", + "LEN2001", /* Edge E431 */ "LEN2002", "LEN2003", "LEN2004", /* L440 */ @@ -156,6 +181,18 @@ static const char * const topbuttonpad_pnp_ids[] = { NULL }; +static bool matches_pnp_id(struct psmouse *psmouse, const char * const ids[]) +{ + int i; + + if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) + for (i = 0; ids[i]; i++) + if (strstr(psmouse->ps2dev.serio->firmware_id, ids[i])) + return true; + + return false; +} + /***************************************************************************** * Synaptics communications functions ****************************************************************************/ @@ -304,20 +341,20 @@ static int synaptics_identify(struct psmouse *psmouse) * Resolution is left zero if touchpad does not support the query */ -static const int *quirk_min_max; - static int synaptics_resolution(struct psmouse *psmouse) { struct synaptics_data *priv = psmouse->private; unsigned char resp[3]; + int i; - if (quirk_min_max) { - priv->x_min = quirk_min_max[0]; - priv->x_max = quirk_min_max[1]; - priv->y_min = quirk_min_max[2]; - priv->y_max = quirk_min_max[3]; - return 0; - } + for (i = 0; min_max_pnpid_table[i].pnp_ids; i++) + if (matches_pnp_id(psmouse, min_max_pnpid_table[i].pnp_ids)) { + priv->x_min = min_max_pnpid_table[i].x_min; + priv->x_max = min_max_pnpid_table[i].x_max; + priv->y_min = min_max_pnpid_table[i].y_min; + priv->y_max = min_max_pnpid_table[i].y_max; + return 0; + } if (SYN_ID_MAJOR(priv->identity) < 4) return 0; @@ -1365,17 +1402,8 @@ static void set_input_params(struct psmouse *psmouse, if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit); - /* See if this buttonpad has a top button area */ - if (!strncmp(psmouse->ps2dev.serio->firmware_id, "PNP:", 4)) { - for (i = 0; topbuttonpad_pnp_ids[i]; i++) { - if (strstr(psmouse->ps2dev.serio->firmware_id, - topbuttonpad_pnp_ids[i])) { - __set_bit(INPUT_PROP_TOPBUTTONPAD, - dev->propbit); - break; - } - } - } + if (matches_pnp_id(psmouse, topbuttonpad_pnp_ids)) + __set_bit(INPUT_PROP_TOPBUTTONPAD, dev->propbit); /* Clickpads report only left button */ __clear_bit(BTN_RIGHT, dev->keybit); __clear_bit(BTN_MIDDLE, dev->keybit); @@ -1547,96 +1575,10 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = { { } }; -static const struct dmi_system_id min_max_dmi_table[] __initconst = { -#if defined(CONFIG_DMI) - { - /* Lenovo ThinkPad Helix */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"), - }, - .driver_data = (int []){1024, 5052, 2258, 4832}, - }, - { - /* Lenovo ThinkPad X240 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"), - }, - .driver_data = (int []){1232, 5710, 1156, 4696}, - }, - { - /* Lenovo ThinkPad T431s */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T431"), - }, - .driver_data = (int []){1024, 5112, 2024, 4832}, - }, - { - /* Lenovo ThinkPad T440s */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"), - }, - .driver_data = (int []){1024, 5112, 2024, 4832}, - }, - { - /* Lenovo ThinkPad L440 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L440"), - }, - .driver_data = (int []){1024, 5112, 2024, 4832}, - }, - { - /* Lenovo ThinkPad T540p */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"), - }, - .driver_data = (int []){1024, 5056, 2058, 4832}, - }, - { - /* Lenovo ThinkPad L540 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L540"), - }, - .driver_data = (int []){1024, 5112, 2024, 4832}, - }, - { - /* Lenovo Yoga S1 */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, - "ThinkPad S1 Yoga"), - }, - .driver_data = (int []){1232, 5710, 1156, 4696}, - }, - { - /* Lenovo ThinkPad X1 Carbon Haswell (3rd generation) */ - .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, - "ThinkPad X1 Carbon 2nd"), - }, - .driver_data = (int []){1024, 5112, 2024, 4832}, - }, -#endif - { } -}; - void __init synaptics_module_init(void) { - const struct dmi_system_id *min_max_dmi; - impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table); broken_olpc_ec = dmi_check_system(olpc_dmi_table); - - min_max_dmi = dmi_first_match(min_max_dmi_table); - if (min_max_dmi) - quirk_min_max = min_max_dmi->driver_data; } static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode) diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c index 762b08432de..8b748d99b93 100644 --- a/drivers/input/serio/ambakmi.c +++ b/drivers/input/serio/ambakmi.c @@ -79,7 +79,8 @@ static int amba_kmi_open(struct serio *io) writeb(divisor, KMICLKDIV); writeb(KMICR_EN, KMICR); - ret = request_irq(kmi->irq, amba_kmi_int, 0, "kmi-pl050", kmi); + ret = request_irq(kmi->irq, amba_kmi_int, IRQF_SHARED, "kmi-pl050", + kmi); if (ret) { printk(KERN_ERR "kmi: failed to claim IRQ%d\n", kmi->irq); writeb(0, KMICR); diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 68edc9db2c6..d4e5ab57909 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -550,18 +550,6 @@ config TOUCHSCREEN_TI_AM335X_TSC To compile this driver as a module, choose M here: the module will be called ti_am335x_tsc. -config TOUCHSCREEN_ATMEL_TSADCC - tristate "Atmel Touchscreen Interface" - depends on ARCH_AT91 - help - Say Y here if you have a 4-wire touchscreen connected to the - ADC Controller on your Atmel SoC. - - If unsure, say N. - - To compile this driver as a module, choose M here: the - module will be called atmel_tsadcc. - config TOUCHSCREEN_UCB1400 tristate "Philips UCB1400 touchscreen" depends on AC97_BUS @@ -640,7 +628,7 @@ config TOUCHSCREEN_WM9713 config TOUCHSCREEN_WM97XX_ATMEL tristate "WM97xx Atmel accelerated touch" - depends on TOUCHSCREEN_WM97XX && (AVR32 || ARCH_AT91) + depends on TOUCHSCREEN_WM97XX && AVR32 help Say Y here for support for streaming mode with WM97xx touchscreens on Atmel AT91 or AVR32 systems with an AC97C module. diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index 4bc954b7c7c..03f12a1f221 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -13,7 +13,6 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879_I2C) += ad7879-i2c.o obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o -obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c deleted file mode 100644 index a7c9d6967d1..00000000000 --- a/drivers/input/touchscreen/atmel_tsadcc.c +++ /dev/null @@ -1,358 +0,0 @@ -/* - * Atmel Touch Screen Driver - * - * Copyright (c) 2008 ATMEL - * Copyright (c) 2008 Dan Liang - * Copyright (c) 2008 TimeSys Corporation - * Copyright (c) 2008 Justin Waters - * - * Based on touchscreen code from Atmel Corporation. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include <linux/err.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/input.h> -#include <linux/slab.h> -#include <linux/interrupt.h> -#include <linux/clk.h> -#include <linux/platform_device.h> -#include <linux/io.h> -#include <linux/platform_data/atmel.h> -#include <mach/cpu.h> - -/* Register definitions based on AT91SAM9RL64 preliminary draft datasheet */ - -#define ATMEL_TSADCC_CR 0x00 /* Control register */ -#define ATMEL_TSADCC_SWRST (1 << 0) /* Software Reset*/ -#define ATMEL_TSADCC_START (1 << 1) /* Start conversion */ - -#define ATMEL_TSADCC_MR 0x04 /* Mode register */ -#define ATMEL_TSADCC_TSAMOD (3 << 0) /* ADC mode */ -#define ATMEL_TSADCC_TSAMOD_ADC_ONLY_MODE (0x0) /* ADC Mode */ -#define ATMEL_TSADCC_TSAMOD_TS_ONLY_MODE (0x1) /* Touch Screen Only Mode */ -#define ATMEL_TSADCC_LOWRES (1 << 4) /* Resolution selection */ -#define ATMEL_TSADCC_SLEEP (1 << 5) /* Sleep mode */ -#define ATMEL_TSADCC_PENDET (1 << 6) /* Pen Detect selection */ -#define ATMEL_TSADCC_PRES (1 << 7) /* Pressure Measurement Selection */ -#define ATMEL_TSADCC_PRESCAL (0x3f << 8) /* Prescalar Rate Selection */ -#define ATMEL_TSADCC_EPRESCAL (0xff << 8) /* Prescalar Rate Selection (Extended) */ -#define ATMEL_TSADCC_STARTUP (0x7f << 16) /* Start Up time */ -#define ATMEL_TSADCC_SHTIM (0xf << 24) /* Sample & Hold time */ -#define ATMEL_TSADCC_PENDBC (0xf << 28) /* Pen Detect debouncing time */ - -#define ATMEL_TSADCC_TRGR 0x08 /* Trigger register */ -#define ATMEL_TSADCC_TRGMOD (7 << 0) /* Trigger mode */ -#define ATMEL_TSADCC_TRGMOD_NONE (0 << 0) -#define ATMEL_TSADCC_TRGMOD_EXT_RISING (1 << 0) -#define ATMEL_TSADCC_TRGMOD_EXT_FALLING (2 << 0) -#define ATMEL_TSADCC_TRGMOD_EXT_ANY (3 << 0) -#define ATMEL_TSADCC_TRGMOD_PENDET (4 << 0) -#define ATMEL_TSADCC_TRGMOD_PERIOD (5 << 0) -#define ATMEL_TSADCC_TRGMOD_CONTINUOUS (6 << 0) -#define ATMEL_TSADCC_TRGPER (0xffff << 16) /* Trigger period */ - -#define ATMEL_TSADCC_TSR 0x0C /* Touch Screen register */ -#define ATMEL_TSADCC_TSFREQ (0xf << 0) /* TS Frequency in Interleaved mode */ -#define ATMEL_TSADCC_TSSHTIM (0xf << 24) /* Sample & Hold time */ - -#define ATMEL_TSADCC_CHER 0x10 /* Channel Enable register */ -#define ATMEL_TSADCC_CHDR 0x14 /* Channel Disable register */ -#define ATMEL_TSADCC_CHSR 0x18 /* Channel Status register */ -#define ATMEL_TSADCC_CH(n) (1 << (n)) /* Channel number */ - -#define ATMEL_TSADCC_SR 0x1C /* Status register */ -#define ATMEL_TSADCC_EOC(n) (1 << ((n)+0)) /* End of conversion for channel N */ -#define ATMEL_TSADCC_OVRE(n) (1 << ((n)+8)) /* Overrun error for channel N */ -#define ATMEL_TSADCC_DRDY (1 << 16) /* Data Ready */ -#define ATMEL_TSADCC_GOVRE (1 << 17) /* General Overrun Error */ -#define ATMEL_TSADCC_ENDRX (1 << 18) /* End of RX Buffer */ -#define ATMEL_TSADCC_RXBUFF (1 << 19) /* TX Buffer full */ -#define ATMEL_TSADCC_PENCNT (1 << 20) /* Pen contact */ -#define ATMEL_TSADCC_NOCNT (1 << 21) /* No contact */ - -#define ATMEL_TSADCC_LCDR 0x20 /* Last Converted Data register */ -#define ATMEL_TSADCC_DATA (0x3ff << 0) /* Channel data */ - -#define ATMEL_TSADCC_IER 0x24 /* Interrupt Enable register */ -#define ATMEL_TSADCC_IDR 0x28 /* Interrupt Disable register */ -#define ATMEL_TSADCC_IMR 0x2C /* Interrupt Mask register */ -#define ATMEL_TSADCC_CDR0 0x30 /* Channel Data 0 */ -#define ATMEL_TSADCC_CDR1 0x34 /* Channel Data 1 */ -#define ATMEL_TSADCC_CDR2 0x38 /* Channel Data 2 */ -#define ATMEL_TSADCC_CDR3 0x3C /* Channel Data 3 */ -#define ATMEL_TSADCC_CDR4 0x40 /* Channel Data 4 */ -#define ATMEL_TSADCC_CDR5 0x44 /* Channel Data 5 */ - -#define ATMEL_TSADCC_XPOS 0x50 -#define ATMEL_TSADCC_Z1DAT 0x54 -#define ATMEL_TSADCC_Z2DAT 0x58 - -#define PRESCALER_VAL(x) ((x) >> 8) - -#define ADC_DEFAULT_CLOCK 100000 - -struct atmel_tsadcc { - struct input_dev *input; - char phys[32]; - struct clk *clk; - int irq; - unsigned int prev_absx; - unsigned int prev_absy; - unsigned char bufferedmeasure; -}; - -static void __iomem *tsc_base; - -#define atmel_tsadcc_read(reg) __raw_readl(tsc_base + (reg)) -#define atmel_tsadcc_write(reg, val) __raw_writel((val), tsc_base + (reg)) - -static irqreturn_t atmel_tsadcc_interrupt(int irq, void *dev) -{ - struct atmel_tsadcc *ts_dev = (struct atmel_tsadcc *)dev; - struct input_dev *input_dev = ts_dev->input; - - unsigned int status; - unsigned int reg; - - status = atmel_tsadcc_read(ATMEL_TSADCC_SR); - status &= atmel_tsadcc_read(ATMEL_TSADCC_IMR); - - if (status & ATMEL_TSADCC_NOCNT) { - /* Contact lost */ - reg = atmel_tsadcc_read(ATMEL_TSADCC_MR) | ATMEL_TSADCC_PENDBC; - - atmel_tsadcc_write(ATMEL_TSADCC_MR, reg); - atmel_tsadcc_write(ATMEL_TSADCC_TRGR, ATMEL_TSADCC_TRGMOD_NONE); - atmel_tsadcc_write(ATMEL_TSADCC_IDR, - ATMEL_TSADCC_EOC(3) | ATMEL_TSADCC_NOCNT); - atmel_tsadcc_write(ATMEL_TSADCC_IER, ATMEL_TSADCC_PENCNT); - - input_report_key(input_dev, BTN_TOUCH, 0); - ts_dev->bufferedmeasure = 0; - input_sync(input_dev); - - } else if (status & ATMEL_TSADCC_PENCNT) { - /* Pen detected */ - reg = atmel_tsadcc_read(ATMEL_TSADCC_MR); - reg &= ~ATMEL_TSADCC_PENDBC; - - atmel_tsadcc_write(ATMEL_TSADCC_IDR, ATMEL_TSADCC_PENCNT); - atmel_tsadcc_write(ATMEL_TSADCC_MR, reg); - atmel_tsadcc_write(ATMEL_TSADCC_IER, - ATMEL_TSADCC_EOC(3) | ATMEL_TSADCC_NOCNT); - atmel_tsadcc_write(ATMEL_TSADCC_TRGR, - ATMEL_TSADCC_TRGMOD_PERIOD | (0x0FFF << 16)); - - } else if (status & ATMEL_TSADCC_EOC(3)) { - /* Conversion finished */ - - if (ts_dev->bufferedmeasure) { - /* Last measurement is always discarded, since it can - * be erroneous. - * Always report previous measurement */ - input_report_abs(input_dev, ABS_X, ts_dev->prev_absx); - input_report_abs(input_dev, ABS_Y, ts_dev->prev_absy); - input_report_key(input_dev, BTN_TOUCH, 1); - input_sync(input_dev); - } else - ts_dev->bufferedmeasure = 1; - - /* Now make new measurement */ - ts_dev->prev_absx = atmel_tsadcc_read(ATMEL_TSADCC_CDR3) << 10; - ts_dev->prev_absx /= atmel_tsadcc_read(ATMEL_TSADCC_CDR2); - - ts_dev->prev_absy = atmel_tsadcc_read(ATMEL_TSADCC_CDR1) << 10; - ts_dev->prev_absy /= atmel_tsadcc_read(ATMEL_TSADCC_CDR0); - } - - return IRQ_HANDLED; -} - -/* - * The functions for inserting/removing us as a module. - */ - -static int atmel_tsadcc_probe(struct platform_device *pdev) -{ - struct atmel_tsadcc *ts_dev; - struct input_dev *input_dev; - struct resource *res; - struct at91_tsadcc_data *pdata = dev_get_platdata(&pdev->dev); - int err; - unsigned int prsc; - unsigned int reg; - - if (!pdata) - return -EINVAL; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "no mmio resource defined.\n"); - return -ENXIO; - } - - /* Allocate memory for device */ - ts_dev = kzalloc(sizeof(struct atmel_tsadcc), GFP_KERNEL); - if (!ts_dev) { - dev_err(&pdev->dev, "failed to allocate memory.\n"); - return -ENOMEM; - } - platform_set_drvdata(pdev, ts_dev); - - input_dev = input_allocate_device(); - if (!input_dev) { - dev_err(&pdev->dev, "failed to allocate input device.\n"); - err = -EBUSY; - goto err_free_mem; - } - - ts_dev->irq = platform_get_irq(pdev, 0); - if (ts_dev->irq < 0) { - dev_err(&pdev->dev, "no irq ID is designated.\n"); - err = -ENODEV; - goto err_free_dev; - } - - if (!request_mem_region(res->start, resource_size(res), - "atmel tsadcc regs")) { - dev_err(&pdev->dev, "resources is unavailable.\n"); - err = -EBUSY; - goto err_free_dev; - } - - tsc_base = ioremap(res->start, resource_size(res)); - if (!tsc_base) { - dev_err(&pdev->dev, "failed to map registers.\n"); - err = -ENOMEM; - goto err_release_mem; - } - - err = request_irq(ts_dev->irq, atmel_tsadcc_interrupt, 0, - pdev->dev.driver->name, ts_dev); - if (err) { - dev_err(&pdev->dev, "failed to allocate irq.\n"); - goto err_unmap_regs; - } - - ts_dev->clk = clk_get(&pdev->dev, "tsc_clk"); - if (IS_ERR(ts_dev->clk)) { - dev_err(&pdev->dev, "failed to get ts_clk\n"); - err = PTR_ERR(ts_dev->clk); - goto err_free_irq; - } - - ts_dev->input = input_dev; - ts_dev->bufferedmeasure = 0; - - snprintf(ts_dev->phys, sizeof(ts_dev->phys), - "%s/input0", dev_name(&pdev->dev)); - - input_dev->name = "atmel touch screen controller"; - input_dev->phys = ts_dev->phys; - input_dev->dev.parent = &pdev->dev; - - __set_bit(EV_ABS, input_dev->evbit); - input_set_abs_params(input_dev, ABS_X, 0, 0x3FF, 0, 0); - input_set_abs_params(input_dev, ABS_Y, 0, 0x3FF, 0, 0); - - input_set_capability(input_dev, EV_KEY, BTN_TOUCH); - - /* clk_enable() always returns 0, no need to check it */ - clk_enable(ts_dev->clk); - - prsc = clk_get_rate(ts_dev->clk); - dev_info(&pdev->dev, "Master clock is set at: %d Hz\n", prsc); - - if (!pdata->adc_clock) - pdata->adc_clock = ADC_DEFAULT_CLOCK; - - prsc = (prsc / (2 * pdata->adc_clock)) - 1; - - /* saturate if this value is too high */ - if (cpu_is_at91sam9rl()) { - if (prsc > PRESCALER_VAL(ATMEL_TSADCC_PRESCAL)) - prsc = PRESCALER_VAL(ATMEL_TSADCC_PRESCAL); - } else { - if (prsc > PRESCALER_VAL(ATMEL_TSADCC_EPRESCAL)) - prsc = PRESCALER_VAL(ATMEL_TSADCC_EPRESCAL); - } - - dev_info(&pdev->dev, "Prescaler is set at: %d\n", prsc); - - reg = ATMEL_TSADCC_TSAMOD_TS_ONLY_MODE | - ((0x00 << 5) & ATMEL_TSADCC_SLEEP) | /* Normal Mode */ - ((0x01 << 6) & ATMEL_TSADCC_PENDET) | /* Enable Pen Detect */ - (prsc << 8) | - ((0x26 << 16) & ATMEL_TSADCC_STARTUP) | - ((pdata->pendet_debounce << 28) & ATMEL_TSADCC_PENDBC); - - atmel_tsadcc_write(ATMEL_TSADCC_CR, ATMEL_TSADCC_SWRST); - atmel_tsadcc_write(ATMEL_TSADCC_MR, reg); - atmel_tsadcc_write(ATMEL_TSADCC_TRGR, ATMEL_TSADCC_TRGMOD_NONE); - atmel_tsadcc_write(ATMEL_TSADCC_TSR, - (pdata->ts_sample_hold_time << 24) & ATMEL_TSADCC_TSSHTIM); - - atmel_tsadcc_read(ATMEL_TSADCC_SR); - atmel_tsadcc_write(ATMEL_TSADCC_IER, ATMEL_TSADCC_PENCNT); - - /* All went ok, so register to the input system */ - err = input_register_device(input_dev); - if (err) - goto err_fail; - - return 0; - -err_fail: - clk_disable(ts_dev->clk); - clk_put(ts_dev->clk); -err_free_irq: - free_irq(ts_dev->irq, ts_dev); -err_unmap_regs: - iounmap(tsc_base); -err_release_mem: - release_mem_region(res->start, resource_size(res)); -err_free_dev: - input_free_device(input_dev); -err_free_mem: - kfree(ts_dev); - return err; -} - -static int atmel_tsadcc_remove(struct platform_device *pdev) -{ - struct atmel_tsadcc *ts_dev = platform_get_drvdata(pdev); - struct resource *res; - - free_irq(ts_dev->irq, ts_dev); - - input_unregister_device(ts_dev->input); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - iounmap(tsc_base); - release_mem_region(res->start, resource_size(res)); - - clk_disable(ts_dev->clk); - clk_put(ts_dev->clk); - - kfree(ts_dev); - - return 0; -} - -static struct platform_driver atmel_tsadcc_driver = { - .probe = atmel_tsadcc_probe, - .remove = atmel_tsadcc_remove, - .driver = { - .name = "atmel_tsadcc", - }, -}; -module_platform_driver(atmel_tsadcc_driver); - -MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("Atmel TouchScreen Driver"); -MODULE_AUTHOR("Dan Liang <dan.liang@atmel.com>"); - diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index c949520bd19..57068e8035b 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -3999,7 +3999,7 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) iommu_flush_dte(iommu, devid); if (devid != alias) { irq_lookup_table[alias] = table; - set_dte_irq_entry(devid, table); + set_dte_irq_entry(alias, table); iommu_flush_dte(iommu, alias); } diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index b76c58dbe30..0e08545d729 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -788,7 +788,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) * per device. But we can enable the exclusion range per * device. This is done here */ - set_dev_entry_bit(m->devid, DEV_ENTRY_EX); + set_dev_entry_bit(devid, DEV_ENTRY_EX); iommu->exclusion_start = m->range_start; iommu->exclusion_length = m->range_length; } diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c index 5208828792e..203b2e6a91c 100644 --- a/drivers/iommu/amd_iommu_v2.c +++ b/drivers/iommu/amd_iommu_v2.c @@ -504,8 +504,10 @@ static void do_fault(struct work_struct *work) write = !!(fault->flags & PPR_FAULT_WRITE); + down_read(&fault->state->mm->mmap_sem); npages = get_user_pages(fault->state->task, fault->state->mm, fault->address, 1, write, 0, &page, NULL); + up_read(&fault->state->mm->mmap_sem); if (npages == 1) { put_page(page); diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 074018979cd..2ca0744b0a4 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1011,13 +1011,13 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, } static struct iommu_ops exynos_iommu_ops = { - .domain_init = &exynos_iommu_domain_init, - .domain_destroy = &exynos_iommu_domain_destroy, - .attach_dev = &exynos_iommu_attach_device, - .detach_dev = &exynos_iommu_detach_device, - .map = &exynos_iommu_map, - .unmap = &exynos_iommu_unmap, - .iova_to_phys = &exynos_iommu_iova_to_phys, + .domain_init = exynos_iommu_domain_init, + .domain_destroy = exynos_iommu_domain_destroy, + .attach_dev = exynos_iommu_attach_device, + .detach_dev = exynos_iommu_detach_device, + .map = exynos_iommu_map, + .unmap = exynos_iommu_unmap, + .iova_to_phys = exynos_iommu_iova_to_phys, .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, }; diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 41be897df8d..c887e6eebc4 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -19,6 +19,7 @@ #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> +#include <linux/cpu.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/of_irq.h> @@ -41,6 +42,7 @@ #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) +#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) #define ARMADA_375_PPI_CAUSE (0x10) @@ -132,8 +134,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, struct msi_desc *desc) { struct msi_msg msg; - irq_hw_number_t hwirq; - int virq; + int virq, hwirq; hwirq = armada_370_xp_alloc_msi(); if (hwirq < 0) @@ -159,8 +160,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, unsigned int irq) { struct irq_data *d = irq_get_irq_data(irq); + unsigned long hwirq = d->hwirq; + irq_dispose_mapping(irq); - armada_370_xp_free_msi(d->hwirq); + armada_370_xp_free_msi(hwirq); +} + +static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev, + int nvec, int type) +{ + /* We support MSI, but not MSI-X */ + if (type == PCI_CAP_ID_MSI) + return 0; + return -EINVAL; } static struct irq_chip armada_370_xp_msi_irq_chip = { @@ -201,6 +213,7 @@ static int armada_370_xp_msi_init(struct device_node *node, msi_chip->setup_irq = armada_370_xp_setup_msi_irq; msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; + msi_chip->check_device = armada_370_xp_check_msi_device; msi_chip->of_node = node; armada_370_xp_msi_domain = @@ -244,35 +257,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock); static int armada_xp_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) { - unsigned long reg; - unsigned long new_mask = 0; - unsigned long online_mask = 0; - unsigned long count = 0; irq_hw_number_t hwirq = irqd_to_hwirq(d); + unsigned long reg, mask; int cpu; - for_each_cpu(cpu, mask_val) { - new_mask |= 1 << cpu_logical_map(cpu); - count++; - } - - /* - * Forbid mutlicore interrupt affinity - * This is required since the MPIC HW doesn't limit - * several CPUs from acknowledging the same interrupt. - */ - if (count > 1) - return -EINVAL; - - for_each_cpu(cpu, cpu_online_mask) - online_mask |= 1 << cpu_logical_map(cpu); + /* Select a single core from the affinity mask which is online */ + cpu = cpumask_any_and(mask_val, cpu_online_mask); + mask = 1UL << cpu_logical_map(cpu); raw_spin_lock(&irq_controller_lock); - reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); - reg = (reg & (~online_mask)) | new_mask; + reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask; writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); - raw_spin_unlock(&irq_controller_lock); return 0; @@ -315,7 +311,8 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, } #ifdef CONFIG_SMP -void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq) +static void armada_mpic_send_doorbell(const struct cpumask *mask, + unsigned int irq) { int cpu; unsigned long map = 0; @@ -335,7 +332,7 @@ void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq) ARMADA_370_XP_SW_TRIG_INT_OFFS); } -void armada_xp_mpic_smp_cpu_init(void) +static void armada_xp_mpic_smp_cpu_init(void) { /* Clear pending IPIs */ writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); @@ -347,6 +344,20 @@ void armada_xp_mpic_smp_cpu_init(void) /* Unmask IPI interrupt */ writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); } + +static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + armada_xp_mpic_smp_cpu_init(); + return NOTIFY_OK; +} + +static struct notifier_block armada_370_xp_mpic_cpu_notifier = { + .notifier_call = armada_xp_mpic_secondary_init, + .priority = 100, +}; + #endif /* CONFIG_SMP */ static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { @@ -494,15 +505,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, #ifdef CONFIG_SMP armada_xp_mpic_smp_cpu_init(); - - /* - * Set the default affinity from all CPUs to the boot cpu. - * This is required since the MPIC doesn't limit several CPUs - * from acknowledging the same interrupt. - */ - cpumask_clear(irq_default_affinity); - cpumask_set_cpu(smp_processor_id(), irq_default_affinity); - #endif armada_370_xp_msi_init(node, main_int_res.start); @@ -511,6 +513,10 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, if (parent_irq <= 0) { irq_set_default_host(armada_370_xp_mpic_domain); set_handle_irq(armada_370_xp_handle_irq); +#ifdef CONFIG_SMP + set_smp_cross_call(armada_mpic_send_doorbell); + register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); +#endif } else { irq_set_chained_handler(parent_irq, armada_370_xp_mpic_handle_cascade_irq); diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index fc817d28d1f..3d15d16a708 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c @@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node) int i, size, max, reserved = 0, entry; const __be32 *irqsr; - cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); + cb = kzalloc(sizeof(*cb), GFP_KERNEL); if (!cb) return -ENOMEM; diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c index e25f246cd2f..34d18b48bb7 100644 --- a/drivers/irqchip/irq-orion.c +++ b/drivers/irqchip/irq-orion.c @@ -42,7 +42,7 @@ __exception_irq_entry orion_handle_irq(struct pt_regs *regs) u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) & gc->mask_cache; while (stat) { - u32 hwirq = ffs(stat) - 1; + u32 hwirq = __fls(stat); u32 irq = irq_find_mapping(orion_irq_domain, gc->irq_base + hwirq); handle_IRQ(irq, regs); @@ -117,7 +117,7 @@ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc) gc->mask_cache; while (stat) { - u32 hwirq = ffs(stat) - 1; + u32 hwirq = __fls(stat); generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq)); stat &= ~(1 << hwirq); diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c index 581eefe331a..5e54f6d71e7 100644 --- a/drivers/irqchip/irq-sirfsoc.c +++ b/drivers/irqchip/irq-sirfsoc.c @@ -58,7 +58,8 @@ static void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) handle_IRQ(irqnr, regs); } -static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *parent) +static int __init sirfsoc_irq_init(struct device_node *np, + struct device_node *parent) { void __iomem *base = of_iomap(np, 0); if (!base) diff --git a/drivers/isdn/hisax/icc.c b/drivers/isdn/hisax/icc.c index 51dae916723..96d1df05044 100644 --- a/drivers/isdn/hisax/icc.c +++ b/drivers/isdn/hisax/icc.c @@ -425,7 +425,7 @@ afterXPR: if (cs->debug & L1_DEB_MONITOR) debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]); } - AfterMOX1: + AfterMOX1: ; #endif } } diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 6de9dfbf61c..39e717797cc 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -487,6 +487,14 @@ config LEDS_BLINKM This option enables support for the BlinkM RGB LED connected through I2C. Say Y to enable support for the BlinkM LED. +config LEDS_VERSATILE + tristate "LED support for the ARM Versatile and RealView" + depends on ARCH_REALVIEW || ARCH_VERSATILE + depends on LEDS_CLASS + help + This option enabled support for the LEDs on the ARM Versatile + and RealView boards. Say Y to enabled these. + comment "LED Triggers" source "drivers/leds/trigger/Kconfig" diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 3cd76dbd9be..8b4c956e11b 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -54,6 +54,7 @@ obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o +obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o diff --git a/drivers/leds/leds-versatile.c b/drivers/leds/leds-versatile.c new file mode 100644 index 00000000000..80553022d66 --- /dev/null +++ b/drivers/leds/leds-versatile.c @@ -0,0 +1,110 @@ +/* + * Driver for the 8 user LEDs found on the RealViews and Versatiles + * Based on DaVinci's DM365 board code + * + * License terms: GNU General Public License (GPL) version 2 + * Author: Linus Walleij <triad@df.lth.se> + */ +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/leds.h> +#include <linux/platform_device.h> + +struct versatile_led { + void __iomem *base; + struct led_classdev cdev; + u8 mask; +}; + +/* + * The triggers lines up below will only be used if the + * LED triggers are compiled in. + */ +static const struct { + const char *name; + const char *trigger; +} versatile_leds[] = { + { "versatile:0", "heartbeat", }, + { "versatile:1", "mmc0", }, + { "versatile:2", "cpu0" }, + { "versatile:3", "cpu1" }, + { "versatile:4", "cpu2" }, + { "versatile:5", "cpu3" }, + { "versatile:6", }, + { "versatile:7", }, +}; + +static void versatile_led_set(struct led_classdev *cdev, + enum led_brightness b) +{ + struct versatile_led *led = container_of(cdev, + struct versatile_led, cdev); + u32 reg = readl(led->base); + + if (b != LED_OFF) + reg |= led->mask; + else + reg &= ~led->mask; + writel(reg, led->base); +} + +static enum led_brightness versatile_led_get(struct led_classdev *cdev) +{ + struct versatile_led *led = container_of(cdev, + struct versatile_led, cdev); + u32 reg = readl(led->base); + + return (reg & led->mask) ? LED_FULL : LED_OFF; +} + +static int versatile_leds_probe(struct platform_device *dev) +{ + int i; + struct resource *res; + void __iomem *base; + + res = platform_get_resource(dev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&dev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + /* All off */ + writel(0, base); + for (i = 0; i < ARRAY_SIZE(versatile_leds); i++) { + struct versatile_led *led; + + led = kzalloc(sizeof(*led), GFP_KERNEL); + if (!led) + break; + + led->base = base; + led->cdev.name = versatile_leds[i].name; + led->cdev.brightness_set = versatile_led_set; + led->cdev.brightness_get = versatile_led_get; + led->cdev.default_trigger = versatile_leds[i].trigger; + led->mask = BIT(i); + + if (led_classdev_register(NULL, &led->cdev) < 0) { + kfree(led); + break; + } + } + + return 0; +} + +static struct platform_driver versatile_leds_driver = { + .driver = { + .name = "versatile-leds", + }, + .probe = versatile_leds_probe, +}; + +module_platform_driver(versatile_leds_driver); + +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("ARM Versatile LED driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1bf4a71919e..5f054c44b48 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -2178,6 +2178,8 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->num_discard_bios = 1; ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; + /* Discard bios must be split on a block boundary */ + ti->split_discard_bios = true; cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); @@ -2488,6 +2490,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio) } else { inc_hit_counter(cache, bio); + pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds); if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) && !is_dirty(cache, lookup_result.cblock)) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 784695d22fd..53b213226c0 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -19,7 +19,6 @@ #include <linux/crypto.h> #include <linux/workqueue.h> #include <linux/backing-dev.h> -#include <linux/percpu.h> #include <linux/atomic.h> #include <linux/scatterlist.h> #include <asm/page.h> @@ -43,6 +42,7 @@ struct convert_context { struct bvec_iter iter_out; sector_t cc_sector; atomic_t cc_pending; + struct ablkcipher_request *req; }; /* @@ -111,15 +111,7 @@ struct iv_tcw_private { enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; /* - * Duplicated per-CPU state for cipher. - */ -struct crypt_cpu { - struct ablkcipher_request *req; -}; - -/* - * The fields in here must be read only after initialization, - * changing state should be in crypt_cpu. + * The fields in here must be read only after initialization. */ struct crypt_config { struct dm_dev *dev; @@ -150,12 +142,6 @@ struct crypt_config { sector_t iv_offset; unsigned int iv_size; - /* - * Duplicated per cpu state. Access through - * per_cpu_ptr() only. - */ - struct crypt_cpu __percpu *cpu; - /* ESSIV: struct crypto_cipher *essiv_tfm */ void *iv_private; struct crypto_ablkcipher **tfms; @@ -192,11 +178,6 @@ static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); -static struct crypt_cpu *this_crypt_config(struct crypt_config *cc) -{ - return this_cpu_ptr(cc->cpu); -} - /* * Use this to access cipher attributes that are the same for each CPU. */ @@ -903,16 +884,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, static void crypt_alloc_req(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); - if (!this_cc->req) - this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); + if (!ctx->req) + ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO); - ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]); - ablkcipher_request_set_callback(this_cc->req, + ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]); + ablkcipher_request_set_callback(ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, - kcryptd_async_done, dmreq_of_req(cc, this_cc->req)); + kcryptd_async_done, dmreq_of_req(cc, ctx->req)); } /* @@ -921,7 +901,6 @@ static void crypt_alloc_req(struct crypt_config *cc, static int crypt_convert(struct crypt_config *cc, struct convert_context *ctx) { - struct crypt_cpu *this_cc = this_crypt_config(cc); int r; atomic_set(&ctx->cc_pending, 1); @@ -932,7 +911,7 @@ static int crypt_convert(struct crypt_config *cc, atomic_inc(&ctx->cc_pending); - r = crypt_convert_block(cc, ctx, this_cc->req); + r = crypt_convert_block(cc, ctx, ctx->req); switch (r) { /* async */ @@ -941,7 +920,7 @@ static int crypt_convert(struct crypt_config *cc, reinit_completion(&ctx->restart); /* fall through*/ case -EINPROGRESS: - this_cc->req = NULL; + ctx->req = NULL; ctx->cc_sector++; continue; @@ -1040,6 +1019,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc, io->sector = sector; io->error = 0; io->base_io = NULL; + io->ctx.req = NULL; atomic_set(&io->io_pending, 0); return io; @@ -1065,6 +1045,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (!atomic_dec_and_test(&io->io_pending)) return; + if (io->ctx.req) + mempool_free(io->ctx.req, cc->req_pool); mempool_free(io, cc->io_pool); if (likely(!base_io)) @@ -1492,8 +1474,6 @@ static int crypt_wipe_key(struct crypt_config *cc) static void crypt_dtr(struct dm_target *ti) { struct crypt_config *cc = ti->private; - struct crypt_cpu *cpu_cc; - int cpu; ti->private = NULL; @@ -1505,13 +1485,6 @@ static void crypt_dtr(struct dm_target *ti) if (cc->crypt_queue) destroy_workqueue(cc->crypt_queue); - if (cc->cpu) - for_each_possible_cpu(cpu) { - cpu_cc = per_cpu_ptr(cc->cpu, cpu); - if (cpu_cc->req) - mempool_free(cpu_cc->req, cc->req_pool); - } - crypt_free_tfms(cc); if (cc->bs) @@ -1530,9 +1503,6 @@ static void crypt_dtr(struct dm_target *ti) if (cc->dev) dm_put_device(ti, cc->dev); - if (cc->cpu) - free_percpu(cc->cpu); - kzfree(cc->cipher); kzfree(cc->cipher_string); @@ -1588,13 +1558,6 @@ static int crypt_ctr_cipher(struct dm_target *ti, if (tmp) DMWARN("Ignoring unexpected additional cipher options"); - cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)), - __alignof__(struct crypt_cpu)); - if (!cc->cpu) { - ti->error = "Cannot allocate per cpu state"; - goto bad_mem; - } - /* * For compatibility with the original dm-crypt mapping format, if * only the cipher name is supplied, use cbc-plain. diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aa009e86587..ebfa411d1a7 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -445,11 +445,11 @@ static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, else m->saved_queue_if_no_path = queue_if_no_path; m->queue_if_no_path = queue_if_no_path; - if (!m->queue_if_no_path) - dm_table_run_md_queue_async(m->ti->table); - spin_unlock_irqrestore(&m->lock, flags); + if (!queue_if_no_path) + dm_table_run_md_queue_async(m->ti->table); + return 0; } @@ -954,7 +954,7 @@ out: */ static int reinstate_path(struct pgpath *pgpath) { - int r = 0; + int r = 0, run_queue = 0; unsigned long flags; struct multipath *m = pgpath->pg->m; @@ -978,7 +978,7 @@ static int reinstate_path(struct pgpath *pgpath) if (!m->nr_valid_paths++) { m->current_pgpath = NULL; - dm_table_run_md_queue_async(m->ti->table); + run_queue = 1; } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) m->pg_init_in_progress++; @@ -991,6 +991,8 @@ static int reinstate_path(struct pgpath *pgpath) out: spin_unlock_irqrestore(&m->lock, flags); + if (run_queue) + dm_table_run_md_queue_async(m->ti->table); return r; } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 53728be84de..242ac2ea5f2 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -27,6 +27,9 @@ #define MAPPING_POOL_SIZE 1024 #define PRISON_CELLS 1024 #define COMMIT_PERIOD HZ +#define NO_SPACE_TIMEOUT_SECS 60 + +static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS; DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle, "A percentage of time allocated for copy on write"); @@ -175,6 +178,7 @@ struct pool { struct workqueue_struct *wq; struct work_struct worker; struct delayed_work waker; + struct delayed_work no_space_timeout; unsigned long last_commit_jiffies; unsigned ref_count; @@ -232,6 +236,13 @@ struct thin_c { struct bio_list deferred_bio_list; struct bio_list retry_on_resume_list; struct rb_root sort_bio_list; /* sorted list of deferred bios */ + + /* + * Ensures the thin is not destroyed until the worker has finished + * iterating the active_thins list. + */ + atomic_t refcount; + struct completion can_destroy; }; /*----------------------------------------------------------------*/ @@ -928,7 +939,7 @@ static int commit(struct pool *pool) { int r; - if (get_pool_mode(pool) != PM_WRITE) + if (get_pool_mode(pool) >= PM_READ_ONLY) return -EINVAL; r = dm_pool_commit_metadata(pool->pmd); @@ -1486,6 +1497,45 @@ static void process_thin_deferred_bios(struct thin_c *tc) blk_finish_plug(&plug); } +static void thin_get(struct thin_c *tc); +static void thin_put(struct thin_c *tc); + +/* + * We can't hold rcu_read_lock() around code that can block. So we + * find a thin with the rcu lock held; bump a refcount; then drop + * the lock. + */ +static struct thin_c *get_first_thin(struct pool *pool) +{ + struct thin_c *tc = NULL; + + rcu_read_lock(); + if (!list_empty(&pool->active_thins)) { + tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); + thin_get(tc); + } + rcu_read_unlock(); + + return tc; +} + +static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) +{ + struct thin_c *old_tc = tc; + + rcu_read_lock(); + list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { + thin_get(tc); + thin_put(old_tc); + rcu_read_unlock(); + return tc; + } + thin_put(old_tc); + rcu_read_unlock(); + + return NULL; +} + static void process_deferred_bios(struct pool *pool) { unsigned long flags; @@ -1493,10 +1543,11 @@ static void process_deferred_bios(struct pool *pool) struct bio_list bios; struct thin_c *tc; - rcu_read_lock(); - list_for_each_entry_rcu(tc, &pool->active_thins, list) + tc = get_first_thin(pool); + while (tc) { process_thin_deferred_bios(tc); - rcu_read_unlock(); + tc = get_next_thin(pool, tc); + } /* * If there are any deferred flush bios, we must commit @@ -1543,6 +1594,20 @@ static void do_waker(struct work_struct *ws) queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); } +/* + * We're holding onto IO to allow userland time to react. After the + * timeout either the pool will have been resized (and thus back in + * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO. + */ +static void do_no_space_timeout(struct work_struct *ws) +{ + struct pool *pool = container_of(to_delayed_work(ws), struct pool, + no_space_timeout); + + if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) + set_pool_mode(pool, PM_READ_ONLY); +} + /*----------------------------------------------------------------*/ struct noflush_work { @@ -1578,7 +1643,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) { struct noflush_work w; - INIT_WORK(&w.worker, fn); + INIT_WORK_ONSTACK(&w.worker, fn); w.tc = tc; atomic_set(&w.complete, 0); init_waitqueue_head(&w.wait); @@ -1607,6 +1672,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) struct pool_c *pt = pool->ti->private; bool needs_check = dm_pool_metadata_needs_check(pool->pmd); enum pool_mode old_mode = get_pool_mode(pool); + unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; /* * Never allow the pool to transition to PM_WRITE mode if user @@ -1668,6 +1734,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) pool->process_discard = process_discard; pool->process_prepared_mapping = process_prepared_mapping; pool->process_prepared_discard = process_prepared_discard_passdown; + + if (!pool->pf.error_if_no_space && no_space_timeout) + queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); break; case PM_WRITE: @@ -2053,6 +2122,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, INIT_WORK(&pool->worker, do_worker); INIT_DELAYED_WORK(&pool->waker, do_waker); + INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); spin_lock_init(&pool->lock); bio_list_init(&pool->deferred_flush_bios); INIT_LIST_HEAD(&pool->prepared_mappings); @@ -2615,6 +2685,7 @@ static void pool_postsuspend(struct dm_target *ti) struct pool *pool = pt->pool; cancel_delayed_work(&pool->waker); + cancel_delayed_work(&pool->no_space_timeout); flush_workqueue(pool->wq); (void) commit(pool); } @@ -3061,11 +3132,25 @@ static struct target_type pool_target = { /*---------------------------------------------------------------- * Thin target methods *--------------------------------------------------------------*/ +static void thin_get(struct thin_c *tc) +{ + atomic_inc(&tc->refcount); +} + +static void thin_put(struct thin_c *tc) +{ + if (atomic_dec_and_test(&tc->refcount)) + complete(&tc->can_destroy); +} + static void thin_dtr(struct dm_target *ti) { struct thin_c *tc = ti->private; unsigned long flags; + thin_put(tc); + wait_for_completion(&tc->can_destroy); + spin_lock_irqsave(&tc->pool->lock, flags); list_del_rcu(&tc->list); spin_unlock_irqrestore(&tc->pool->lock, flags); @@ -3101,6 +3186,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) struct thin_c *tc; struct dm_dev *pool_dev, *origin_dev; struct mapped_device *pool_md; + unsigned long flags; mutex_lock(&dm_thin_pool_table.mutex); @@ -3191,9 +3277,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv) mutex_unlock(&dm_thin_pool_table.mutex); - spin_lock(&tc->pool->lock); + atomic_set(&tc->refcount, 1); + init_completion(&tc->can_destroy); + + spin_lock_irqsave(&tc->pool->lock, flags); list_add_tail_rcu(&tc->list, &tc->pool->active_thins); - spin_unlock(&tc->pool->lock); + spin_unlock_irqrestore(&tc->pool->lock, flags); /* * This synchronize_rcu() call is needed here otherwise we risk a * wake_worker() call finding no bios to process (because the newly @@ -3422,6 +3511,9 @@ static void dm_thin_exit(void) module_init(dm_thin_init); module_exit(dm_thin_exit); +module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds"); + MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 796007a5e0e..7a7bab8947a 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c @@ -330,15 +330,17 @@ test_block_hash: return r; } } - todo = 1 << v->data_dev_block_bits; - while (io->iter.bi_size) { + do { u8 *page; + unsigned len; struct bio_vec bv = bio_iter_iovec(bio, io->iter); page = kmap_atomic(bv.bv_page); - r = crypto_shash_update(desc, page + bv.bv_offset, - bv.bv_len); + len = bv.bv_len; + if (likely(len >= todo)) + len = todo; + r = crypto_shash_update(desc, page + bv.bv_offset, len); kunmap_atomic(page); if (r < 0) { @@ -346,8 +348,9 @@ test_block_hash: return r; } - bio_advance_iter(bio, &io->iter, bv.bv_len); - } + bio_advance_iter(bio, &io->iter, len); + todo -= len; + } while (todo); if (!v->version) { r = crypto_shash_update(desc, v->salt, v->salt_size); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 455e6491649..6a71bc7c913 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1544,7 +1544,6 @@ static int setup_clone(struct request *clone, struct request *rq, clone->cmd = rq->cmd; clone->cmd_len = rq->cmd_len; clone->sense = rq->sense; - clone->buffer = rq->buffer; clone->end_io = end_clone_request; clone->end_io_data = tio; diff --git a/drivers/md/md.c b/drivers/md/md.c index 8fda38d23e3..237b7e0ddc7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8516,7 +8516,8 @@ static int md_notify_reboot(struct notifier_block *this, if (mddev_trylock(mddev)) { if (mddev->pers) __md_stop_writes(mddev); - mddev->safemode = 2; + if (mddev->persistent) + mddev->safemode = 2; mddev_unlock(mddev); } need_delay = 1; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 33fc408e5ea..cb882aae9e2 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1172,6 +1172,13 @@ static void __make_request(struct mddev *mddev, struct bio *bio) int max_sectors; int sectors; + /* + * Register the new request and wait if the reconstruction + * thread has put up a bar for new requests. + * Continue immediately if no resync is active currently. + */ + wait_barrier(conf); + sectors = bio_sectors(bio); while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && bio->bi_iter.bi_sector < conf->reshape_progress && @@ -1552,12 +1559,6 @@ static void make_request(struct mddev *mddev, struct bio *bio) md_write_start(mddev, bio); - /* - * Register the new request and wait if the reconstruction - * thread has put up a bar for new requests. - * Continue immediately if no resync is active currently. - */ - wait_barrier(conf); do { diff --git a/drivers/media/i2c/ov7670.c b/drivers/media/i2c/ov7670.c index e8a1ce20403..cdd7c1b7259 100644 --- a/drivers/media/i2c/ov7670.c +++ b/drivers/media/i2c/ov7670.c @@ -1109,7 +1109,7 @@ static int ov7670_enum_framesizes(struct v4l2_subdev *sd, * windows that fall outside that. */ for (i = 0; i < n_win_sizes; i++) { - struct ov7670_win_size *win = &info->devtype->win_sizes[index]; + struct ov7670_win_size *win = &info->devtype->win_sizes[i]; if (info->min_width && win->width < info->min_width) continue; if (info->min_height && win->height < info->min_height) diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c index a4459301b5f..ee0f57e01b5 100644 --- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c +++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c @@ -1616,7 +1616,7 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state) if (ret < 0) return -EINVAL; - node_ep = v4l2_of_get_next_endpoint(node, NULL); + node_ep = of_graph_get_next_endpoint(node, NULL); if (!node_ep) { dev_warn(dev, "no endpoint defined for node: %s\n", node->full_name); diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c index d5a7a135f75..703560fa5e7 100644 --- a/drivers/media/media-device.c +++ b/drivers/media/media-device.c @@ -93,6 +93,7 @@ static long media_device_enum_entities(struct media_device *mdev, struct media_entity *ent; struct media_entity_desc u_ent; + memset(&u_ent, 0, sizeof(u_ent)); if (copy_from_user(&u_ent.id, &uent->id, sizeof(u_ent.id))) return -EFAULT; diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c index b4f12d00be0..65670825296 100644 --- a/drivers/media/platform/davinci/vpbe_display.c +++ b/drivers/media/platform/davinci/vpbe_display.c @@ -372,18 +372,32 @@ static int vpbe_stop_streaming(struct vb2_queue *vq) { struct vpbe_fh *fh = vb2_get_drv_priv(vq); struct vpbe_layer *layer = fh->layer; + struct vpbe_display *disp = fh->disp_dev; + unsigned long flags; if (!vb2_is_streaming(vq)) return 0; /* release all active buffers */ + spin_lock_irqsave(&disp->dma_queue_lock, flags); + if (layer->cur_frm == layer->next_frm) { + vb2_buffer_done(&layer->cur_frm->vb, VB2_BUF_STATE_ERROR); + } else { + if (layer->cur_frm != NULL) + vb2_buffer_done(&layer->cur_frm->vb, + VB2_BUF_STATE_ERROR); + if (layer->next_frm != NULL) + vb2_buffer_done(&layer->next_frm->vb, + VB2_BUF_STATE_ERROR); + } + while (!list_empty(&layer->dma_queue)) { layer->next_frm = list_entry(layer->dma_queue.next, struct vpbe_disp_buffer, list); list_del(&layer->next_frm->list); vb2_buffer_done(&layer->next_frm->vb, VB2_BUF_STATE_ERROR); } - + spin_unlock_irqrestore(&disp->dma_queue_lock, flags); return 0; } diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c index d762246eabf..0379cb9f9a9 100644 --- a/drivers/media/platform/davinci/vpfe_capture.c +++ b/drivers/media/platform/davinci/vpfe_capture.c @@ -734,6 +734,8 @@ static int vpfe_release(struct file *file) } vpfe_dev->io_usrs = 0; vpfe_dev->numbuffers = config_params.numbuffers; + videobuf_stop(&vpfe_dev->buffer_queue); + videobuf_mmap_free(&vpfe_dev->buffer_queue); } /* Decrement device usrs counter */ diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c index 756da78bac2..8dea0b84a3a 100644 --- a/drivers/media/platform/davinci/vpif_capture.c +++ b/drivers/media/platform/davinci/vpif_capture.c @@ -358,8 +358,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq) common = &ch->common[VPIF_VIDEO_INDEX]; + /* Disable channel as per its device type and channel id */ + if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { + enable_channel0(0); + channel0_intr_enable(0); + } + if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || + (2 == common->started)) { + enable_channel1(0); + channel1_intr_enable(0); + } + common->started = 0; + /* release all active buffers */ spin_lock_irqsave(&common->irqlock, flags); + if (common->cur_frm == common->next_frm) { + vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); + } else { + if (common->cur_frm != NULL) + vb2_buffer_done(&common->cur_frm->vb, + VB2_BUF_STATE_ERROR); + if (common->next_frm != NULL) + vb2_buffer_done(&common->next_frm->vb, + VB2_BUF_STATE_ERROR); + } + while (!list_empty(&common->dma_queue)) { common->next_frm = list_entry(common->dma_queue.next, struct vpif_cap_buffer, list); @@ -933,17 +956,6 @@ static int vpif_release(struct file *filep) if (fh->io_allowed[VPIF_VIDEO_INDEX]) { /* Reset io_usrs member of channel object */ common->io_usrs = 0; - /* Disable channel as per its device type and channel id */ - if (VPIF_CHANNEL0_VIDEO == ch->channel_id) { - enable_channel0(0); - channel0_intr_enable(0); - } - if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) || - (2 == common->started)) { - enable_channel1(0); - channel1_intr_enable(0); - } - common->started = 0; /* Free buffers allocated */ vb2_queue_release(&common->buffer_queue); vb2_dma_contig_cleanup_ctx(common->alloc_ctx); diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c index 0ac841e35aa..aed41edd050 100644 --- a/drivers/media/platform/davinci/vpif_display.c +++ b/drivers/media/platform/davinci/vpif_display.c @@ -320,8 +320,31 @@ static int vpif_stop_streaming(struct vb2_queue *vq) common = &ch->common[VPIF_VIDEO_INDEX]; + /* Disable channel */ + if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { + enable_channel2(0); + channel2_intr_enable(0); + } + if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) || + (2 == common->started)) { + enable_channel3(0); + channel3_intr_enable(0); + } + common->started = 0; + /* release all active buffers */ spin_lock_irqsave(&common->irqlock, flags); + if (common->cur_frm == common->next_frm) { + vb2_buffer_done(&common->cur_frm->vb, VB2_BUF_STATE_ERROR); + } else { + if (common->cur_frm != NULL) + vb2_buffer_done(&common->cur_frm->vb, + VB2_BUF_STATE_ERROR); + if (common->next_frm != NULL) + vb2_buffer_done(&common->next_frm->vb, + VB2_BUF_STATE_ERROR); + } + while (!list_empty(&common->dma_queue)) { common->next_frm = list_entry(common->dma_queue.next, struct vpif_disp_buffer, list); @@ -773,18 +796,6 @@ static int vpif_release(struct file *filep) if (fh->io_allowed[VPIF_VIDEO_INDEX]) { /* Reset io_usrs member of channel object */ common->io_usrs = 0; - /* Disable channel */ - if (VPIF_CHANNEL2_VIDEO == ch->channel_id) { - enable_channel2(0); - channel2_intr_enable(0); - } - if ((VPIF_CHANNEL3_VIDEO == ch->channel_id) || - (2 == common->started)) { - enable_channel3(0); - channel3_intr_enable(0); - } - common->started = 0; - /* Free buffers allocated */ vb2_queue_release(&common->buffer_queue); vb2_dma_contig_cleanup_ctx(common->alloc_ctx); diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c index da2fc86cc52..25dbf5b05a9 100644 --- a/drivers/media/platform/exynos4-is/fimc-core.c +++ b/drivers/media/platform/exynos4-is/fimc-core.c @@ -122,7 +122,7 @@ static struct fimc_fmt fimc_formats[] = { }, { .name = "YUV 4:2:2 planar, Y/Cb/Cr", .fourcc = V4L2_PIX_FMT_YUV422P, - .depth = { 12 }, + .depth = { 16 }, .color = FIMC_FMT_YCBYCR422, .memplanes = 1, .colplanes = 3, diff --git a/drivers/media/tuners/fc2580.c b/drivers/media/tuners/fc2580.c index 3aecaf46509..f0c9c42867d 100644 --- a/drivers/media/tuners/fc2580.c +++ b/drivers/media/tuners/fc2580.c @@ -195,7 +195,7 @@ static int fc2580_set_params(struct dvb_frontend *fe) f_ref = 2UL * priv->cfg->clock / r_val; n_val = div_u64_rem(f_vco, f_ref, &k_val); - k_val_reg = 1UL * k_val * (1 << 20) / f_ref; + k_val_reg = div_u64(1ULL * k_val * (1 << 20), f_ref); ret = fc2580_wr_reg(priv, 0x18, r18_val | ((k_val_reg >> 16) & 0xff)); if (ret < 0) @@ -348,8 +348,8 @@ static int fc2580_set_params(struct dvb_frontend *fe) if (ret < 0) goto err; - ret = fc2580_wr_reg(priv, 0x37, 1UL * priv->cfg->clock * \ - fc2580_if_filter_lut[i].mul / 1000000000); + ret = fc2580_wr_reg(priv, 0x37, div_u64(1ULL * priv->cfg->clock * + fc2580_if_filter_lut[i].mul, 1000000000)); if (ret < 0) goto err; diff --git a/drivers/media/tuners/fc2580_priv.h b/drivers/media/tuners/fc2580_priv.h index be38a9e637e..646c9945213 100644 --- a/drivers/media/tuners/fc2580_priv.h +++ b/drivers/media/tuners/fc2580_priv.h @@ -22,6 +22,7 @@ #define FC2580_PRIV_H #include "fc2580.h" +#include <linux/math64.h> struct fc2580_reg_val { u8 reg; diff --git a/drivers/media/usb/dvb-usb-v2/Makefile b/drivers/media/usb/dvb-usb-v2/Makefile index 7407b8338cc..bc38f03394c 100644 --- a/drivers/media/usb/dvb-usb-v2/Makefile +++ b/drivers/media/usb/dvb-usb-v2/Makefile @@ -41,4 +41,3 @@ ccflags-y += -I$(srctree)/drivers/media/dvb-core ccflags-y += -I$(srctree)/drivers/media/dvb-frontends ccflags-y += -I$(srctree)/drivers/media/tuners ccflags-y += -I$(srctree)/drivers/media/common -ccflags-y += -I$(srctree)/drivers/staging/media/rtl2832u_sdr diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c index 61d196e8b3a..dcbd392e6ef 100644 --- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c +++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c @@ -24,7 +24,6 @@ #include "rtl2830.h" #include "rtl2832.h" -#include "rtl2832_sdr.h" #include "qt1010.h" #include "mt2060.h" @@ -36,6 +35,45 @@ #include "tua9001.h" #include "r820t.h" +/* + * RTL2832_SDR module is in staging. That logic is added in order to avoid any + * hard dependency to drivers/staging/ directory as we want compile mainline + * driver even whole staging directory is missing. + */ +#include <media/v4l2-subdev.h> + +#if IS_ENABLED(CONFIG_DVB_RTL2832_SDR) +struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe, + struct i2c_adapter *i2c, const struct rtl2832_config *cfg, + struct v4l2_subdev *sd); +#else +static inline struct dvb_frontend *rtl2832_sdr_attach(struct dvb_frontend *fe, + struct i2c_adapter *i2c, const struct rtl2832_config *cfg, + struct v4l2_subdev *sd) +{ + return NULL; +} +#endif + +#ifdef CONFIG_MEDIA_ATTACH +#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \ + void *__r = NULL; \ + typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ + if (__a) { \ + __r = (void *) __a(ARGS); \ + if (__r == NULL) \ + symbol_put(FUNCTION); \ + } \ + __r; \ +}) + +#else +#define dvb_attach_sdr(FUNCTION, ARGS...) ({ \ + FUNCTION(ARGS); \ +}) + +#endif + static int rtl28xxu_disable_rc; module_param_named(disable_rc, rtl28xxu_disable_rc, int, 0644); MODULE_PARM_DESC(disable_rc, "disable RTL2832U remote controller"); @@ -908,7 +946,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) adap->fe[0]->ops.tuner_ops.get_rf_strength; /* attach SDR */ - dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, &rtl28xxu_rtl2832_fc0012_config, NULL); break; case TUNER_RTL2832_FC0013: @@ -920,7 +958,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) adap->fe[0]->ops.tuner_ops.get_rf_strength; /* attach SDR */ - dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, &rtl28xxu_rtl2832_fc0013_config, NULL); break; case TUNER_RTL2832_E4000: { @@ -951,7 +989,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) i2c_set_adapdata(i2c_adap_internal, d); /* attach SDR */ - dvb_attach(rtl2832_sdr_attach, adap->fe[0], + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], i2c_adap_internal, &rtl28xxu_rtl2832_e4000_config, sd); } @@ -982,7 +1020,7 @@ static int rtl2832u_tuner_attach(struct dvb_usb_adapter *adap) adap->fe[0]->ops.tuner_ops.get_rf_strength; /* attach SDR */ - dvb_attach(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, + dvb_attach_sdr(rtl2832_sdr_attach, adap->fe[0], &d->i2c_adap, &rtl28xxu_rtl2832_r820t_config, NULL); break; case TUNER_RTL2832_R828D: diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c index 7277dbd2afc..ecbcb39feb7 100644 --- a/drivers/media/usb/gspca/sonixb.c +++ b/drivers/media/usb/gspca/sonixb.c @@ -1430,10 +1430,8 @@ static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0c45, 0x600d), SB(PAS106, 101)}, {USB_DEVICE(0x0c45, 0x6011), SB(OV6650, 101)}, {USB_DEVICE(0x0c45, 0x6019), SB(OV7630, 101)}, -#if !IS_ENABLED(CONFIG_USB_SN9C102) {USB_DEVICE(0x0c45, 0x6024), SB(TAS5130CXX, 102)}, {USB_DEVICE(0x0c45, 0x6025), SB(TAS5130CXX, 102)}, -#endif {USB_DEVICE(0x0c45, 0x6027), SB(OV7630, 101)}, /* Genius Eye 310 */ {USB_DEVICE(0x0c45, 0x6028), SB(PAS202, 102)}, {USB_DEVICE(0x0c45, 0x6029), SB(PAS106, 102)}, diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 04b2daf567b..7e2411c3641 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -178,6 +178,9 @@ struct v4l2_create_buffers32 { static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { + if (get_user(kp->type, &up->type)) + return -EFAULT; + switch (kp->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE: case V4L2_BUF_TYPE_VIDEO_OUTPUT: @@ -204,17 +207,16 @@ static int __get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __us static int get_v4l2_format32(struct v4l2_format *kp, struct v4l2_format32 __user *up) { - if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32)) || - get_user(kp->type, &up->type)) - return -EFAULT; + if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_format32))) + return -EFAULT; return __get_v4l2_format32(kp, up); } static int get_v4l2_create32(struct v4l2_create_buffers *kp, struct v4l2_create_buffers32 __user *up) { if (!access_ok(VERIFY_READ, up, sizeof(struct v4l2_create_buffers32)) || - copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format.fmt))) - return -EFAULT; + copy_from_user(kp, up, offsetof(struct v4l2_create_buffers32, format))) + return -EFAULT; return __get_v4l2_format32(&kp->format, &up->format); } diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c index 110c0362705..ff7138fd66d 100644 --- a/drivers/memory/mvebu-devbus.c +++ b/drivers/memory/mvebu-devbus.c @@ -2,7 +2,7 @@ * Marvell EBU SoC Device Bus Controller * (memory controller for NOR/NAND/SRAM/FPGA devices) * - * Copyright (C) 2013 Marvell + * Copyright (C) 2013-2014 Marvell * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -30,19 +30,47 @@ #include <linux/platform_device.h> /* Register definitions */ -#define DEV_WIDTH_BIT 30 -#define BADR_SKEW_BIT 28 -#define RD_HOLD_BIT 23 -#define ACC_NEXT_BIT 17 -#define RD_SETUP_BIT 12 -#define ACC_FIRST_BIT 6 - -#define SYNC_ENABLE_BIT 24 -#define WR_HIGH_BIT 16 -#define WR_LOW_BIT 8 - -#define READ_PARAM_OFFSET 0x0 -#define WRITE_PARAM_OFFSET 0x4 +#define ARMADA_DEV_WIDTH_SHIFT 30 +#define ARMADA_BADR_SKEW_SHIFT 28 +#define ARMADA_RD_HOLD_SHIFT 23 +#define ARMADA_ACC_NEXT_SHIFT 17 +#define ARMADA_RD_SETUP_SHIFT 12 +#define ARMADA_ACC_FIRST_SHIFT 6 + +#define ARMADA_SYNC_ENABLE_SHIFT 24 +#define ARMADA_WR_HIGH_SHIFT 16 +#define ARMADA_WR_LOW_SHIFT 8 + +#define ARMADA_READ_PARAM_OFFSET 0x0 +#define ARMADA_WRITE_PARAM_OFFSET 0x4 + +#define ORION_RESERVED (0x2 << 30) +#define ORION_BADR_SKEW_SHIFT 28 +#define ORION_WR_HIGH_EXT_BIT BIT(27) +#define ORION_WR_HIGH_EXT_MASK 0x8 +#define ORION_WR_LOW_EXT_BIT BIT(26) +#define ORION_WR_LOW_EXT_MASK 0x8 +#define ORION_ALE_WR_EXT_BIT BIT(25) +#define ORION_ALE_WR_EXT_MASK 0x8 +#define ORION_ACC_NEXT_EXT_BIT BIT(24) +#define ORION_ACC_NEXT_EXT_MASK 0x10 +#define ORION_ACC_FIRST_EXT_BIT BIT(23) +#define ORION_ACC_FIRST_EXT_MASK 0x10 +#define ORION_TURN_OFF_EXT_BIT BIT(22) +#define ORION_TURN_OFF_EXT_MASK 0x8 +#define ORION_DEV_WIDTH_SHIFT 20 +#define ORION_WR_HIGH_SHIFT 17 +#define ORION_WR_HIGH_MASK 0x7 +#define ORION_WR_LOW_SHIFT 14 +#define ORION_WR_LOW_MASK 0x7 +#define ORION_ALE_WR_SHIFT 11 +#define ORION_ALE_WR_MASK 0x7 +#define ORION_ACC_NEXT_SHIFT 7 +#define ORION_ACC_NEXT_MASK 0xF +#define ORION_ACC_FIRST_SHIFT 3 +#define ORION_ACC_FIRST_MASK 0xF +#define ORION_TURN_OFF_SHIFT 0 +#define ORION_TURN_OFF_MASK 0x7 struct devbus_read_params { u32 bus_width; @@ -89,117 +117,167 @@ static int get_timing_param_ps(struct devbus *devbus, return 0; } -static int devbus_set_timing_params(struct devbus *devbus, - struct device_node *node) +static int devbus_get_timing_params(struct devbus *devbus, + struct device_node *node, + struct devbus_read_params *r, + struct devbus_write_params *w) { - struct devbus_read_params r; - struct devbus_write_params w; - u32 value; int err; - dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n", - devbus->tick_ps); - - /* Get read timings */ - err = of_property_read_u32(node, "devbus,bus-width", &r.bus_width); + err = of_property_read_u32(node, "devbus,bus-width", &r->bus_width); if (err < 0) { dev_err(devbus->dev, "%s has no 'devbus,bus-width' property\n", node->full_name); return err; } - /* Convert bit width to byte width */ - r.bus_width /= 8; + + /* + * The bus width is encoded into the register as 0 for 8 bits, + * and 1 for 16 bits, so we do the necessary conversion here. + */ + if (r->bus_width == 8) + r->bus_width = 0; + else if (r->bus_width == 16) + r->bus_width = 1; + else { + dev_err(devbus->dev, "invalid bus width %d\n", r->bus_width); + return -EINVAL; + } err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps", - &r.badr_skew); + &r->badr_skew); if (err < 0) return err; err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps", - &r.turn_off); + &r->turn_off); if (err < 0) return err; err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps", - &r.acc_first); + &r->acc_first); if (err < 0) return err; err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps", - &r.acc_next); + &r->acc_next); if (err < 0) return err; - err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps", - &r.rd_setup); - if (err < 0) - return err; - - err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps", - &r.rd_hold); - if (err < 0) - return err; - - /* Get write timings */ - err = of_property_read_u32(node, "devbus,sync-enable", - &w.sync_enable); - if (err < 0) { - dev_err(devbus->dev, - "%s has no 'devbus,sync-enable' property\n", - node->full_name); - return err; + if (of_device_is_compatible(devbus->dev->of_node, "marvell,mvebu-devbus")) { + err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps", + &r->rd_setup); + if (err < 0) + return err; + + err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps", + &r->rd_hold); + if (err < 0) + return err; + + err = of_property_read_u32(node, "devbus,sync-enable", + &w->sync_enable); + if (err < 0) { + dev_err(devbus->dev, + "%s has no 'devbus,sync-enable' property\n", + node->full_name); + return err; + } } err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps", - &w.ale_wr); + &w->ale_wr); if (err < 0) return err; err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps", - &w.wr_low); + &w->wr_low); if (err < 0) return err; err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps", - &w.wr_high); + &w->wr_high); if (err < 0) return err; + return 0; +} + +static void devbus_orion_set_timing_params(struct devbus *devbus, + struct device_node *node, + struct devbus_read_params *r, + struct devbus_write_params *w) +{ + u32 value; + + /* + * The hardware designers found it would be a good idea to + * split most of the values in the register into two fields: + * one containing all the low-order bits, and another one + * containing just the high-order bit. For all of those + * fields, we have to split the value into these two parts. + */ + value = (r->turn_off & ORION_TURN_OFF_MASK) << ORION_TURN_OFF_SHIFT | + (r->acc_first & ORION_ACC_FIRST_MASK) << ORION_ACC_FIRST_SHIFT | + (r->acc_next & ORION_ACC_NEXT_MASK) << ORION_ACC_NEXT_SHIFT | + (w->ale_wr & ORION_ALE_WR_MASK) << ORION_ALE_WR_SHIFT | + (w->wr_low & ORION_WR_LOW_MASK) << ORION_WR_LOW_SHIFT | + (w->wr_high & ORION_WR_HIGH_MASK) << ORION_WR_HIGH_SHIFT | + r->bus_width << ORION_DEV_WIDTH_SHIFT | + ((r->turn_off & ORION_TURN_OFF_EXT_MASK) ? ORION_TURN_OFF_EXT_BIT : 0) | + ((r->acc_first & ORION_ACC_FIRST_EXT_MASK) ? ORION_ACC_FIRST_EXT_BIT : 0) | + ((r->acc_next & ORION_ACC_NEXT_EXT_MASK) ? ORION_ACC_NEXT_EXT_BIT : 0) | + ((w->ale_wr & ORION_ALE_WR_EXT_MASK) ? ORION_ALE_WR_EXT_BIT : 0) | + ((w->wr_low & ORION_WR_LOW_EXT_MASK) ? ORION_WR_LOW_EXT_BIT : 0) | + ((w->wr_high & ORION_WR_HIGH_EXT_MASK) ? ORION_WR_HIGH_EXT_BIT : 0) | + (r->badr_skew << ORION_BADR_SKEW_SHIFT) | + ORION_RESERVED; + + writel(value, devbus->base); +} + +static void devbus_armada_set_timing_params(struct devbus *devbus, + struct device_node *node, + struct devbus_read_params *r, + struct devbus_write_params *w) +{ + u32 value; + /* Set read timings */ - value = r.bus_width << DEV_WIDTH_BIT | - r.badr_skew << BADR_SKEW_BIT | - r.rd_hold << RD_HOLD_BIT | - r.acc_next << ACC_NEXT_BIT | - r.rd_setup << RD_SETUP_BIT | - r.acc_first << ACC_FIRST_BIT | - r.turn_off; + value = r->bus_width << ARMADA_DEV_WIDTH_SHIFT | + r->badr_skew << ARMADA_BADR_SKEW_SHIFT | + r->rd_hold << ARMADA_RD_HOLD_SHIFT | + r->acc_next << ARMADA_ACC_NEXT_SHIFT | + r->rd_setup << ARMADA_RD_SETUP_SHIFT | + r->acc_first << ARMADA_ACC_FIRST_SHIFT | + r->turn_off; dev_dbg(devbus->dev, "read parameters register 0x%p = 0x%x\n", - devbus->base + READ_PARAM_OFFSET, + devbus->base + ARMADA_READ_PARAM_OFFSET, value); - writel(value, devbus->base + READ_PARAM_OFFSET); + writel(value, devbus->base + ARMADA_READ_PARAM_OFFSET); /* Set write timings */ - value = w.sync_enable << SYNC_ENABLE_BIT | - w.wr_low << WR_LOW_BIT | - w.wr_high << WR_HIGH_BIT | - w.ale_wr; + value = w->sync_enable << ARMADA_SYNC_ENABLE_SHIFT | + w->wr_low << ARMADA_WR_LOW_SHIFT | + w->wr_high << ARMADA_WR_HIGH_SHIFT | + w->ale_wr; dev_dbg(devbus->dev, "write parameters register: 0x%p = 0x%x\n", - devbus->base + WRITE_PARAM_OFFSET, + devbus->base + ARMADA_WRITE_PARAM_OFFSET, value); - writel(value, devbus->base + WRITE_PARAM_OFFSET); - - return 0; + writel(value, devbus->base + ARMADA_WRITE_PARAM_OFFSET); } static int mvebu_devbus_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *node = pdev->dev.of_node; + struct devbus_read_params r; + struct devbus_write_params w; struct devbus *devbus; struct resource *res; struct clk *clk; @@ -229,10 +307,21 @@ static int mvebu_devbus_probe(struct platform_device *pdev) rate = clk_get_rate(clk) / 1000; devbus->tick_ps = 1000000000 / rate; - /* Read the device tree node and set the new timing parameters */ - err = devbus_set_timing_params(devbus, node); - if (err < 0) - return err; + dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n", + devbus->tick_ps); + + if (!of_property_read_bool(node, "devbus,keep-config")) { + /* Read the Device Tree node */ + err = devbus_get_timing_params(devbus, node, &r, &w); + if (err < 0) + return err; + + /* Set the new timing parameters */ + if (of_device_is_compatible(node, "marvell,orion-devbus")) + devbus_orion_set_timing_params(devbus, node, &r, &w); + else + devbus_armada_set_timing_params(devbus, node, &r, &w); + } /* * We need to create a child device explicitly from here to @@ -248,6 +337,7 @@ static int mvebu_devbus_probe(struct platform_device *pdev) static const struct of_device_id mvebu_devbus_of_match[] = { { .compatible = "marvell,mvebu-devbus" }, + { .compatible = "marvell,orion-devbus" }, {}, }; MODULE_DEVICE_TABLE(of, mvebu_devbus_of_match); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 5bdefe72625..6deb8a11c12 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1227,12 +1227,17 @@ config MCP_UCB1200_TS endmenu -config VEXPRESS_CONFIG - bool "ARM Versatile Express platform infrastructure" - depends on ARM || ARM64 +config MFD_VEXPRESS_SYSREG + bool "Versatile Express System Registers" + depends on VEXPRESS_CONFIG && GPIOLIB + default y + select CLKSRC_MMIO + select GPIO_GENERIC_PLATFORM + select MFD_CORE + select MFD_SYSCON help - Platform configuration infrastructure for the ARM Ltd. - Versatile Express. + System Registers are the platform configuration block + on the ARM Ltd. Versatile Express board. endmenu endif diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 2851275e265..cec3487b539 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -161,7 +161,7 @@ obj-$(CONFIG_MFD_RC5T583) += rc5t583.o rc5t583-irq.o obj-$(CONFIG_MFD_SEC_CORE) += sec-core.o sec-irq.o obj-$(CONFIG_MFD_SYSCON) += syscon.o obj-$(CONFIG_MFD_LM3533) += lm3533-core.o lm3533-ctrlbank.o -obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o vexpress-sysreg.o +obj-$(CONFIG_MFD_VEXPRESS_SYSREG) += vexpress-sysreg.o obj-$(CONFIG_MFD_RETU) += retu-mfd.o obj-$(CONFIG_MFD_AS3711) += as3711.o obj-$(CONFIG_MFD_AS3722) += as3722.o diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c index c9de3d598ea..1d15735f9ef 100644 --- a/drivers/mfd/rtsx_pcr.c +++ b/drivers/mfd/rtsx_pcr.c @@ -338,28 +338,58 @@ int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist, int num_sg, bool read, int timeout) { struct completion trans_done; - int err = 0, count; + u8 dir; + int err = 0, i, count; long timeleft; unsigned long flags; + struct scatterlist *sg; + enum dma_data_direction dma_dir; + u32 val; + dma_addr_t addr; + unsigned int len; + + dev_dbg(&(pcr->pci->dev), "--> %s: num_sg = %d\n", __func__, num_sg); + + /* don't transfer data during abort processing */ + if (pcr->remove_pci) + return -EINVAL; + + if ((sglist == NULL) || (num_sg <= 0)) + return -EINVAL; - count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read); + if (read) { + dir = DEVICE_TO_HOST; + dma_dir = DMA_FROM_DEVICE; + } else { + dir = HOST_TO_DEVICE; + dma_dir = DMA_TO_DEVICE; + } + + count = dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir); if (count < 1) { dev_err(&(pcr->pci->dev), "scatterlist map failed\n"); return -EINVAL; } dev_dbg(&(pcr->pci->dev), "DMA mapping count: %d\n", count); + val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; + pcr->sgi = 0; + for_each_sg(sglist, sg, count, i) { + addr = sg_dma_address(sg); + len = sg_dma_len(sg); + rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1); + } spin_lock_irqsave(&pcr->lock, flags); pcr->done = &trans_done; pcr->trans_result = TRANS_NOT_READY; init_completion(&trans_done); + rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); + rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); spin_unlock_irqrestore(&pcr->lock, flags); - rtsx_pci_dma_transfer(pcr, sglist, count, read); - timeleft = wait_for_completion_interruptible_timeout( &trans_done, msecs_to_jiffies(timeout)); if (timeleft <= 0) { @@ -383,7 +413,7 @@ out: pcr->done = NULL; spin_unlock_irqrestore(&pcr->lock, flags); - rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read); + dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dma_dir); if ((err < 0) && (err != -ENODEV)) rtsx_pci_stop_cmd(pcr); @@ -395,73 +425,6 @@ out: } EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data); -int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, - int num_sg, bool read) -{ - enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - - if (pcr->remove_pci) - return -EINVAL; - - if ((sglist == NULL) || num_sg < 1) - return -EINVAL; - - return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir); -} -EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg); - -int rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist, - int num_sg, bool read) -{ - enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - - if (pcr->remove_pci) - return -EINVAL; - - if (sglist == NULL || num_sg < 1) - return -EINVAL; - - dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir); - return num_sg; -} -EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg); - -int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist, - int sg_count, bool read) -{ - struct scatterlist *sg; - dma_addr_t addr; - unsigned int len; - int i; - u32 val; - u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE; - unsigned long flags; - - if (pcr->remove_pci) - return -EINVAL; - - if ((sglist == NULL) || (sg_count < 1)) - return -EINVAL; - - val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE; - pcr->sgi = 0; - for_each_sg(sglist, sg, sg_count, i) { - addr = sg_dma_address(sg); - len = sg_dma_len(sg); - rtsx_pci_add_sg_tbl(pcr, addr, len, i == sg_count - 1); - } - - spin_lock_irqsave(&pcr->lock, flags); - - rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr); - rtsx_pci_writel(pcr, RTSX_HDBCTLR, val); - - spin_unlock_irqrestore(&pcr->lock, flags); - - return 0; -} -EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer); - int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len) { int err; @@ -873,8 +836,6 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) int_reg = rtsx_pci_readl(pcr, RTSX_BIPR); /* Clear interrupt flag */ rtsx_pci_writel(pcr, RTSX_BIPR, int_reg); - dev_dbg(&pcr->pci->dev, "=========== BIPR 0x%8x ==========\n", int_reg); - if ((int_reg & pcr->bier) == 0) { spin_unlock(&pcr->lock); return IRQ_NONE; @@ -905,28 +866,17 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) } if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) { - if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) + if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) { pcr->trans_result = TRANS_RESULT_FAIL; - else if (int_reg & TRANS_OK_INT) + if (pcr->done) + complete(pcr->done); + } else if (int_reg & TRANS_OK_INT) { pcr->trans_result = TRANS_RESULT_OK; - - if (pcr->done) - complete(pcr->done); - - if (int_reg & SD_EXIST) { - struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD]; - if (slot && slot->done_transfer) - slot->done_transfer(slot->p_dev); - } - - if (int_reg & MS_EXIST) { - struct rtsx_slot *slot = &pcr->slots[RTSX_SD_CARD]; - if (slot && slot->done_transfer) - slot->done_transfer(slot->p_dev); + if (pcr->done) + complete(pcr->done); } } - if (pcr->card_inserted || pcr->card_removed) schedule_delayed_work(&pcr->carddet_work, msecs_to_jiffies(200)); diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index dbea55de439..e2a04bb8bc1 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -18,6 +18,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> +#include <linux/platform_data/syscon.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/mfd/syscon.h> @@ -119,6 +120,7 @@ static struct regmap_config syscon_regmap_config = { static int syscon_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct syscon_platform_data *pdata = dev_get_platdata(dev); struct syscon *syscon; struct resource *res; void __iomem *base; @@ -136,6 +138,8 @@ static int syscon_probe(struct platform_device *pdev) return -ENOMEM; syscon_regmap_config.max_register = res->end - res->start - 3; + if (pdata) + syscon_regmap_config.name = pdata->label; syscon->regmap = devm_regmap_init_mmio(dev, base, &syscon_regmap_config); if (IS_ERR(syscon->regmap)) { diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index e87140bef66..db11b4f4061 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -98,7 +98,11 @@ #define TWL4030_BASEADD_BACKUP 0x0014 #define TWL4030_BASEADD_INT 0x002E #define TWL4030_BASEADD_PM_MASTER 0x0036 + #define TWL4030_BASEADD_PM_RECEIVER 0x005B +#define TWL4030_DCDC_GLOBAL_CFG 0x06 +#define SMARTREFLEX_ENABLE BIT(3) + #define TWL4030_BASEADD_RTC 0x001C #define TWL4030_BASEADD_SECURED_REG 0x0000 @@ -1204,6 +1208,11 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id) * Disable TWL4030/TWL5030 I2C Pull-up on I2C1 and I2C4(SR) interface. * Program I2C_SCL_CTRL_PU(bit 0)=0, I2C_SDA_CTRL_PU (bit 2)=0, * SR_I2C_SCL_CTRL_PU(bit 4)=0 and SR_I2C_SDA_CTRL_PU(bit 6)=0. + * + * Also, always enable SmartReflex bit as that's needed for omaps to + * to do anything over I2C4 for voltage scaling even if SmartReflex + * is disabled. Without the SmartReflex bit omap sys_clkreq idle + * signal will never trigger for retention idle. */ if (twl_class_is_4030()) { u8 temp; @@ -1212,6 +1221,12 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id) temp &= ~(SR_I2C_SDA_CTRL_PU | SR_I2C_SCL_CTRL_PU | \ I2C_SDA_CTRL_PU | I2C_SCL_CTRL_PU); twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1); + + twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &temp, + TWL4030_DCDC_GLOBAL_CFG); + temp |= SMARTREFLEX_ENABLE; + twl_i2c_write_u8(TWL_MODULE_PM_RECEIVER, temp, + TWL4030_DCDC_GLOBAL_CFG); } if (node) { diff --git a/drivers/mfd/vexpress-config.c b/drivers/mfd/vexpress-config.c deleted file mode 100644 index d0db89d13e0..00000000000 --- a/drivers/mfd/vexpress-config.c +++ /dev/null @@ -1,287 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2012 ARM Limited - */ - -#define pr_fmt(fmt) "vexpress-config: " fmt - -#include <linux/bitops.h> -#include <linux/completion.h> -#include <linux/export.h> -#include <linux/list.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include <linux/slab.h> -#include <linux/string.h> -#include <linux/vexpress.h> - - -#define VEXPRESS_CONFIG_MAX_BRIDGES 2 - -static struct vexpress_config_bridge { - struct device_node *node; - struct vexpress_config_bridge_info *info; - struct list_head transactions; - spinlock_t transactions_lock; -} vexpress_config_bridges[VEXPRESS_CONFIG_MAX_BRIDGES]; - -static DECLARE_BITMAP(vexpress_config_bridges_map, - ARRAY_SIZE(vexpress_config_bridges)); -static DEFINE_MUTEX(vexpress_config_bridges_mutex); - -struct vexpress_config_bridge *vexpress_config_bridge_register( - struct device_node *node, - struct vexpress_config_bridge_info *info) -{ - struct vexpress_config_bridge *bridge; - int i; - - pr_debug("Registering bridge '%s'\n", info->name); - - mutex_lock(&vexpress_config_bridges_mutex); - i = find_first_zero_bit(vexpress_config_bridges_map, - ARRAY_SIZE(vexpress_config_bridges)); - if (i >= ARRAY_SIZE(vexpress_config_bridges)) { - pr_err("Can't register more bridges!\n"); - mutex_unlock(&vexpress_config_bridges_mutex); - return NULL; - } - __set_bit(i, vexpress_config_bridges_map); - bridge = &vexpress_config_bridges[i]; - - bridge->node = node; - bridge->info = info; - INIT_LIST_HEAD(&bridge->transactions); - spin_lock_init(&bridge->transactions_lock); - - mutex_unlock(&vexpress_config_bridges_mutex); - - return bridge; -} -EXPORT_SYMBOL(vexpress_config_bridge_register); - -void vexpress_config_bridge_unregister(struct vexpress_config_bridge *bridge) -{ - struct vexpress_config_bridge __bridge = *bridge; - int i; - - mutex_lock(&vexpress_config_bridges_mutex); - for (i = 0; i < ARRAY_SIZE(vexpress_config_bridges); i++) - if (&vexpress_config_bridges[i] == bridge) - __clear_bit(i, vexpress_config_bridges_map); - mutex_unlock(&vexpress_config_bridges_mutex); - - WARN_ON(!list_empty(&__bridge.transactions)); - while (!list_empty(&__bridge.transactions)) - cpu_relax(); -} -EXPORT_SYMBOL(vexpress_config_bridge_unregister); - - -struct vexpress_config_func { - struct vexpress_config_bridge *bridge; - void *func; -}; - -struct vexpress_config_func *__vexpress_config_func_get(struct device *dev, - struct device_node *node) -{ - struct device_node *bridge_node; - struct vexpress_config_func *func; - int i; - - if (WARN_ON(dev && node && dev->of_node != node)) - return NULL; - if (dev && !node) - node = dev->of_node; - - func = kzalloc(sizeof(*func), GFP_KERNEL); - if (!func) - return NULL; - - bridge_node = of_node_get(node); - while (bridge_node) { - const __be32 *prop = of_get_property(bridge_node, - "arm,vexpress,config-bridge", NULL); - - if (prop) { - bridge_node = of_find_node_by_phandle( - be32_to_cpup(prop)); - break; - } - - bridge_node = of_get_next_parent(bridge_node); - } - - mutex_lock(&vexpress_config_bridges_mutex); - for (i = 0; i < ARRAY_SIZE(vexpress_config_bridges); i++) { - struct vexpress_config_bridge *bridge = - &vexpress_config_bridges[i]; - - if (test_bit(i, vexpress_config_bridges_map) && - bridge->node == bridge_node) { - func->bridge = bridge; - func->func = bridge->info->func_get(dev, node); - break; - } - } - mutex_unlock(&vexpress_config_bridges_mutex); - - if (!func->func) { - of_node_put(node); - kfree(func); - return NULL; - } - - return func; -} -EXPORT_SYMBOL(__vexpress_config_func_get); - -void vexpress_config_func_put(struct vexpress_config_func *func) -{ - func->bridge->info->func_put(func->func); - of_node_put(func->bridge->node); - kfree(func); -} -EXPORT_SYMBOL(vexpress_config_func_put); - -struct vexpress_config_trans { - struct vexpress_config_func *func; - int offset; - bool write; - u32 *data; - int status; - struct completion completion; - struct list_head list; -}; - -static void vexpress_config_dump_trans(const char *what, - struct vexpress_config_trans *trans) -{ - pr_debug("%s %s trans %p func 0x%p offset %d data 0x%x status %d\n", - what, trans->write ? "write" : "read", trans, - trans->func->func, trans->offset, - trans->data ? *trans->data : 0, trans->status); -} - -static int vexpress_config_schedule(struct vexpress_config_trans *trans) -{ - int status; - struct vexpress_config_bridge *bridge = trans->func->bridge; - unsigned long flags; - - init_completion(&trans->completion); - trans->status = -EFAULT; - - spin_lock_irqsave(&bridge->transactions_lock, flags); - - if (list_empty(&bridge->transactions)) { - vexpress_config_dump_trans("Executing", trans); - status = bridge->info->func_exec(trans->func->func, - trans->offset, trans->write, trans->data); - } else { - vexpress_config_dump_trans("Queuing", trans); - status = VEXPRESS_CONFIG_STATUS_WAIT; - } - - switch (status) { - case VEXPRESS_CONFIG_STATUS_DONE: - vexpress_config_dump_trans("Finished", trans); - trans->status = status; - break; - case VEXPRESS_CONFIG_STATUS_WAIT: - list_add_tail(&trans->list, &bridge->transactions); - break; - } - - spin_unlock_irqrestore(&bridge->transactions_lock, flags); - - return status; -} - -void vexpress_config_complete(struct vexpress_config_bridge *bridge, - int status) -{ - struct vexpress_config_trans *trans; - unsigned long flags; - const char *message = "Completed"; - - spin_lock_irqsave(&bridge->transactions_lock, flags); - - trans = list_first_entry(&bridge->transactions, - struct vexpress_config_trans, list); - trans->status = status; - - do { - vexpress_config_dump_trans(message, trans); - list_del(&trans->list); - complete(&trans->completion); - - if (list_empty(&bridge->transactions)) - break; - - trans = list_first_entry(&bridge->transactions, - struct vexpress_config_trans, list); - vexpress_config_dump_trans("Executing pending", trans); - trans->status = bridge->info->func_exec(trans->func->func, - trans->offset, trans->write, trans->data); - message = "Finished pending"; - } while (trans->status == VEXPRESS_CONFIG_STATUS_DONE); - - spin_unlock_irqrestore(&bridge->transactions_lock, flags); -} -EXPORT_SYMBOL(vexpress_config_complete); - -int vexpress_config_wait(struct vexpress_config_trans *trans) -{ - wait_for_completion(&trans->completion); - - return trans->status; -} -EXPORT_SYMBOL(vexpress_config_wait); - -int vexpress_config_read(struct vexpress_config_func *func, int offset, - u32 *data) -{ - struct vexpress_config_trans trans = { - .func = func, - .offset = offset, - .write = false, - .data = data, - .status = 0, - }; - int status = vexpress_config_schedule(&trans); - - if (status == VEXPRESS_CONFIG_STATUS_WAIT) - status = vexpress_config_wait(&trans); - - return status; -} -EXPORT_SYMBOL(vexpress_config_read); - -int vexpress_config_write(struct vexpress_config_func *func, int offset, - u32 data) -{ - struct vexpress_config_trans trans = { - .func = func, - .offset = offset, - .write = true, - .data = &data, - .status = 0, - }; - int status = vexpress_config_schedule(&trans); - - if (status == VEXPRESS_CONFIG_STATUS_WAIT) - status = vexpress_config_wait(&trans); - - return status; -} -EXPORT_SYMBOL(vexpress_config_write); diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c index 35281e804e7..9e21e4fc959 100644 --- a/drivers/mfd/vexpress-sysreg.c +++ b/drivers/mfd/vexpress-sysreg.c @@ -11,23 +11,22 @@ * Copyright (C) 2012 ARM Limited */ +#include <linux/basic_mmio_gpio.h> #include <linux/err.h> -#include <linux/gpio.h> #include <linux/io.h> -#include <linux/leds.h> +#include <linux/mfd/core.h> #include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_data/syscon.h> #include <linux/platform_device.h> -#include <linux/regulator/driver.h> #include <linux/slab.h> #include <linux/stat.h> -#include <linux/timer.h> #include <linux/vexpress.h> #define SYS_ID 0x000 #define SYS_SW 0x004 #define SYS_LED 0x008 #define SYS_100HZ 0x024 -#define SYS_FLAGS 0x030 #define SYS_FLAGSSET 0x030 #define SYS_FLAGSCLR 0x034 #define SYS_NVFLAGS 0x038 @@ -46,465 +45,209 @@ #define SYS_CFGSTAT 0x0a8 #define SYS_HBI_MASK 0xfff -#define SYS_ID_HBI_SHIFT 16 #define SYS_PROCIDx_HBI_SHIFT 0 -#define SYS_LED_LED(n) (1 << (n)) - #define SYS_MCI_CARDIN (1 << 0) #define SYS_MCI_WPROT (1 << 1) -#define SYS_FLASH_WPn (1 << 0) - #define SYS_MISC_MASTERSITE (1 << 14) -#define SYS_CFGCTRL_START (1 << 31) -#define SYS_CFGCTRL_WRITE (1 << 30) -#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26) -#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20) -#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16) -#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12) -#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0) - -#define SYS_CFGSTAT_ERR (1 << 1) -#define SYS_CFGSTAT_COMPLETE (1 << 0) - - -static void __iomem *vexpress_sysreg_base; -static struct device *vexpress_sysreg_dev; -static int vexpress_master_site; - -void vexpress_flags_set(u32 data) -{ - writel(~0, vexpress_sysreg_base + SYS_FLAGSCLR); - writel(data, vexpress_sysreg_base + SYS_FLAGSSET); -} +static void __iomem *__vexpress_sysreg_base; -u32 vexpress_get_procid(int site) +static void __iomem *vexpress_sysreg_base(void) { - if (site == VEXPRESS_SITE_MASTER) - site = vexpress_master_site; - - return readl(vexpress_sysreg_base + (site == VEXPRESS_SITE_DB1 ? - SYS_PROCID0 : SYS_PROCID1)); -} + if (!__vexpress_sysreg_base) { + struct device_node *node = of_find_compatible_node(NULL, NULL, + "arm,vexpress-sysreg"); -u32 vexpress_get_hbi(int site) -{ - u32 id; - - switch (site) { - case VEXPRESS_SITE_MB: - id = readl(vexpress_sysreg_base + SYS_ID); - return (id >> SYS_ID_HBI_SHIFT) & SYS_HBI_MASK; - case VEXPRESS_SITE_MASTER: - case VEXPRESS_SITE_DB1: - case VEXPRESS_SITE_DB2: - id = vexpress_get_procid(site); - return (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK; + __vexpress_sysreg_base = of_iomap(node, 0); } - return ~0; -} + WARN_ON(!__vexpress_sysreg_base); -void __iomem *vexpress_get_24mhz_clock_base(void) -{ - return vexpress_sysreg_base + SYS_24MHZ; + return __vexpress_sysreg_base; } -static void vexpress_sysreg_find_prop(struct device_node *node, - const char *name, u32 *val) +static int vexpress_sysreg_get_master(void) { - of_node_get(node); - while (node) { - if (of_property_read_u32(node, name, val) == 0) { - of_node_put(node); - return; - } - node = of_get_next_parent(node); - } -} - -unsigned __vexpress_get_site(struct device *dev, struct device_node *node) -{ - u32 site = 0; - - WARN_ON(dev && node && dev->of_node != node); - if (dev && !node) - node = dev->of_node; - - if (node) { - vexpress_sysreg_find_prop(node, "arm,vexpress,site", &site); - } else if (dev && dev->bus == &platform_bus_type) { - struct platform_device *pdev = to_platform_device(dev); - - if (pdev->num_resources == 1 && - pdev->resource[0].flags == IORESOURCE_BUS) - site = pdev->resource[0].start; - } else if (dev && strncmp(dev_name(dev), "ct:", 3) == 0) { - site = VEXPRESS_SITE_MASTER; - } + if (readl(vexpress_sysreg_base() + SYS_MISC) & SYS_MISC_MASTERSITE) + return VEXPRESS_SITE_DB2; - if (site == VEXPRESS_SITE_MASTER) - site = vexpress_master_site; - - return site; + return VEXPRESS_SITE_DB1; } - -struct vexpress_sysreg_config_func { - u32 template; - u32 device; -}; - -static struct vexpress_config_bridge *vexpress_sysreg_config_bridge; -static struct timer_list vexpress_sysreg_config_timer; -static u32 *vexpress_sysreg_config_data; -static int vexpress_sysreg_config_tries; - -static void *vexpress_sysreg_config_func_get(struct device *dev, - struct device_node *node) +void vexpress_flags_set(u32 data) { - struct vexpress_sysreg_config_func *config_func; - u32 site = 0; - u32 position = 0; - u32 dcc = 0; - u32 func_device[2]; - int err = -EFAULT; - - if (node) { - of_node_get(node); - vexpress_sysreg_find_prop(node, "arm,vexpress,site", &site); - vexpress_sysreg_find_prop(node, "arm,vexpress,position", - &position); - vexpress_sysreg_find_prop(node, "arm,vexpress,dcc", &dcc); - err = of_property_read_u32_array(node, - "arm,vexpress-sysreg,func", func_device, - ARRAY_SIZE(func_device)); - of_node_put(node); - } else if (dev && dev->bus == &platform_bus_type) { - struct platform_device *pdev = to_platform_device(dev); - - if (pdev->num_resources == 1 && - pdev->resource[0].flags == IORESOURCE_BUS) { - site = pdev->resource[0].start; - func_device[0] = pdev->resource[0].end; - func_device[1] = pdev->id; - err = 0; - } - } - if (err) - return NULL; - - config_func = kzalloc(sizeof(*config_func), GFP_KERNEL); - if (!config_func) - return NULL; - - config_func->template = SYS_CFGCTRL_DCC(dcc); - config_func->template |= SYS_CFGCTRL_FUNC(func_device[0]); - config_func->template |= SYS_CFGCTRL_SITE(site == VEXPRESS_SITE_MASTER ? - vexpress_master_site : site); - config_func->template |= SYS_CFGCTRL_POSITION(position); - config_func->device |= func_device[1]; - - dev_dbg(vexpress_sysreg_dev, "func 0x%p = 0x%x, %d\n", config_func, - config_func->template, config_func->device); - - return config_func; + writel(~0, vexpress_sysreg_base() + SYS_FLAGSCLR); + writel(data, vexpress_sysreg_base() + SYS_FLAGSSET); } -static void vexpress_sysreg_config_func_put(void *func) +unsigned int vexpress_get_mci_cardin(struct device *dev) { - kfree(func); + return readl(vexpress_sysreg_base() + SYS_MCI) & SYS_MCI_CARDIN; } -static int vexpress_sysreg_config_func_exec(void *func, int offset, - bool write, u32 *data) +u32 vexpress_get_procid(int site) { - int status; - struct vexpress_sysreg_config_func *config_func = func; - u32 command; - - if (WARN_ON(!vexpress_sysreg_base)) - return -ENOENT; - - command = readl(vexpress_sysreg_base + SYS_CFGCTRL); - if (WARN_ON(command & SYS_CFGCTRL_START)) - return -EBUSY; - - command = SYS_CFGCTRL_START; - command |= write ? SYS_CFGCTRL_WRITE : 0; - command |= config_func->template; - command |= SYS_CFGCTRL_DEVICE(config_func->device + offset); - - /* Use a canary for reads */ - if (!write) - *data = 0xdeadbeef; - - dev_dbg(vexpress_sysreg_dev, "command %x, data %x\n", - command, *data); - writel(*data, vexpress_sysreg_base + SYS_CFGDATA); - writel(0, vexpress_sysreg_base + SYS_CFGSTAT); - writel(command, vexpress_sysreg_base + SYS_CFGCTRL); - mb(); - - if (vexpress_sysreg_dev) { - /* Schedule completion check */ - if (!write) - vexpress_sysreg_config_data = data; - vexpress_sysreg_config_tries = 100; - mod_timer(&vexpress_sysreg_config_timer, - jiffies + usecs_to_jiffies(100)); - status = VEXPRESS_CONFIG_STATUS_WAIT; - } else { - /* Early execution, no timer available, have to spin */ - u32 cfgstat; - - do { - cpu_relax(); - cfgstat = readl(vexpress_sysreg_base + SYS_CFGSTAT); - } while (!cfgstat); - - if (!write && (cfgstat & SYS_CFGSTAT_COMPLETE)) - *data = readl(vexpress_sysreg_base + SYS_CFGDATA); - status = VEXPRESS_CONFIG_STATUS_DONE; - - if (cfgstat & SYS_CFGSTAT_ERR) - status = -EINVAL; - } + if (site == VEXPRESS_SITE_MASTER) + site = vexpress_sysreg_get_master(); - return status; + return readl(vexpress_sysreg_base() + (site == VEXPRESS_SITE_DB1 ? + SYS_PROCID0 : SYS_PROCID1)); } -struct vexpress_config_bridge_info vexpress_sysreg_config_bridge_info = { - .name = "vexpress-sysreg", - .func_get = vexpress_sysreg_config_func_get, - .func_put = vexpress_sysreg_config_func_put, - .func_exec = vexpress_sysreg_config_func_exec, -}; - -static void vexpress_sysreg_config_complete(unsigned long data) +void __iomem *vexpress_get_24mhz_clock_base(void) { - int status = VEXPRESS_CONFIG_STATUS_DONE; - u32 cfgstat = readl(vexpress_sysreg_base + SYS_CFGSTAT); - - if (cfgstat & SYS_CFGSTAT_ERR) - status = -EINVAL; - if (!vexpress_sysreg_config_tries--) - status = -ETIMEDOUT; - - if (status < 0) { - dev_err(vexpress_sysreg_dev, "error %d\n", status); - } else if (!(cfgstat & SYS_CFGSTAT_COMPLETE)) { - mod_timer(&vexpress_sysreg_config_timer, - jiffies + usecs_to_jiffies(50)); - return; - } - - if (vexpress_sysreg_config_data) { - *vexpress_sysreg_config_data = readl(vexpress_sysreg_base + - SYS_CFGDATA); - dev_dbg(vexpress_sysreg_dev, "read data %x\n", - *vexpress_sysreg_config_data); - vexpress_sysreg_config_data = NULL; - } - - vexpress_config_complete(vexpress_sysreg_config_bridge, status); + return vexpress_sysreg_base() + SYS_24MHZ; } -void vexpress_sysreg_setup(struct device_node *node) -{ - if (WARN_ON(!vexpress_sysreg_base)) - return; - - if (readl(vexpress_sysreg_base + SYS_MISC) & SYS_MISC_MASTERSITE) - vexpress_master_site = VEXPRESS_SITE_DB2; - else - vexpress_master_site = VEXPRESS_SITE_DB1; - - vexpress_sysreg_config_bridge = vexpress_config_bridge_register( - node, &vexpress_sysreg_config_bridge_info); - WARN_ON(!vexpress_sysreg_config_bridge); -} - void __init vexpress_sysreg_early_init(void __iomem *base) { - vexpress_sysreg_base = base; - vexpress_sysreg_setup(NULL); -} - -void __init vexpress_sysreg_of_early_init(void) -{ - struct device_node *node; - - if (vexpress_sysreg_base) - return; + __vexpress_sysreg_base = base; - node = of_find_compatible_node(NULL, NULL, "arm,vexpress-sysreg"); - if (node) { - vexpress_sysreg_base = of_iomap(node, 0); - vexpress_sysreg_setup(node); - } + vexpress_config_set_master(vexpress_sysreg_get_master()); } -#ifdef CONFIG_GPIOLIB - -#define VEXPRESS_SYSREG_GPIO(_name, _reg, _value) \ - [VEXPRESS_GPIO_##_name] = { \ - .reg = _reg, \ - .value = _reg##_##_value, \ - } +/* The sysreg block is just a random collection of various functions... */ -static struct vexpress_sysreg_gpio { - unsigned long reg; - u32 value; -} vexpress_sysreg_gpios[] = { - VEXPRESS_SYSREG_GPIO(MMC_CARDIN, SYS_MCI, CARDIN), - VEXPRESS_SYSREG_GPIO(MMC_WPROT, SYS_MCI, WPROT), - VEXPRESS_SYSREG_GPIO(FLASH_WPn, SYS_FLASH, WPn), - VEXPRESS_SYSREG_GPIO(LED0, SYS_LED, LED(0)), - VEXPRESS_SYSREG_GPIO(LED1, SYS_LED, LED(1)), - VEXPRESS_SYSREG_GPIO(LED2, SYS_LED, LED(2)), - VEXPRESS_SYSREG_GPIO(LED3, SYS_LED, LED(3)), - VEXPRESS_SYSREG_GPIO(LED4, SYS_LED, LED(4)), - VEXPRESS_SYSREG_GPIO(LED5, SYS_LED, LED(5)), - VEXPRESS_SYSREG_GPIO(LED6, SYS_LED, LED(6)), - VEXPRESS_SYSREG_GPIO(LED7, SYS_LED, LED(7)), +static struct syscon_platform_data vexpress_sysreg_sys_id_pdata = { + .label = "sys_id", }; -static int vexpress_sysreg_gpio_direction_input(struct gpio_chip *chip, - unsigned offset) -{ - return 0; -} - -static int vexpress_sysreg_gpio_get(struct gpio_chip *chip, - unsigned offset) -{ - struct vexpress_sysreg_gpio *gpio = &vexpress_sysreg_gpios[offset]; - u32 reg_value = readl(vexpress_sysreg_base + gpio->reg); - - return !!(reg_value & gpio->value); -} - -static void vexpress_sysreg_gpio_set(struct gpio_chip *chip, - unsigned offset, int value) -{ - struct vexpress_sysreg_gpio *gpio = &vexpress_sysreg_gpios[offset]; - u32 reg_value = readl(vexpress_sysreg_base + gpio->reg); - - if (value) - reg_value |= gpio->value; - else - reg_value &= ~gpio->value; - - writel(reg_value, vexpress_sysreg_base + gpio->reg); -} - -static int vexpress_sysreg_gpio_direction_output(struct gpio_chip *chip, - unsigned offset, int value) -{ - vexpress_sysreg_gpio_set(chip, offset, value); - - return 0; -} - -static struct gpio_chip vexpress_sysreg_gpio_chip = { - .label = "vexpress-sysreg", - .direction_input = vexpress_sysreg_gpio_direction_input, - .direction_output = vexpress_sysreg_gpio_direction_output, - .get = vexpress_sysreg_gpio_get, - .set = vexpress_sysreg_gpio_set, - .ngpio = ARRAY_SIZE(vexpress_sysreg_gpios), - .base = 0, +static struct bgpio_pdata vexpress_sysreg_sys_led_pdata = { + .label = "sys_led", + .base = -1, + .ngpio = 8, }; - -#define VEXPRESS_SYSREG_GREEN_LED(_name, _default_trigger, _gpio) \ - { \ - .name = "v2m:green:"_name, \ - .default_trigger = _default_trigger, \ - .gpio = VEXPRESS_GPIO_##_gpio, \ - } - -struct gpio_led vexpress_sysreg_leds[] = { - VEXPRESS_SYSREG_GREEN_LED("user1", "heartbeat", LED0), - VEXPRESS_SYSREG_GREEN_LED("user2", "mmc0", LED1), - VEXPRESS_SYSREG_GREEN_LED("user3", "cpu0", LED2), - VEXPRESS_SYSREG_GREEN_LED("user4", "cpu1", LED3), - VEXPRESS_SYSREG_GREEN_LED("user5", "cpu2", LED4), - VEXPRESS_SYSREG_GREEN_LED("user6", "cpu3", LED5), - VEXPRESS_SYSREG_GREEN_LED("user7", "cpu4", LED6), - VEXPRESS_SYSREG_GREEN_LED("user8", "cpu5", LED7), +static struct bgpio_pdata vexpress_sysreg_sys_mci_pdata = { + .label = "sys_mci", + .base = -1, + .ngpio = 2, }; -struct gpio_led_platform_data vexpress_sysreg_leds_pdata = { - .num_leds = ARRAY_SIZE(vexpress_sysreg_leds), - .leds = vexpress_sysreg_leds, +static struct bgpio_pdata vexpress_sysreg_sys_flash_pdata = { + .label = "sys_flash", + .base = -1, + .ngpio = 1, }; -#endif - +static struct syscon_platform_data vexpress_sysreg_sys_misc_pdata = { + .label = "sys_misc", +}; -static ssize_t vexpress_sysreg_sys_id_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "0x%08x\n", readl(vexpress_sysreg_base + SYS_ID)); -} +static struct syscon_platform_data vexpress_sysreg_sys_procid_pdata = { + .label = "sys_procid", +}; -DEVICE_ATTR(sys_id, S_IRUGO, vexpress_sysreg_sys_id_show, NULL); +static struct mfd_cell vexpress_sysreg_cells[] = { + { + .name = "syscon", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM(SYS_ID, 0x4), + }, + .platform_data = &vexpress_sysreg_sys_id_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_id_pdata), + }, { + .name = "basic-mmio-gpio", + .of_compatible = "arm,vexpress-sysreg,sys_led", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM_NAMED(SYS_LED, 0x4, "dat"), + }, + .platform_data = &vexpress_sysreg_sys_led_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_led_pdata), + }, { + .name = "basic-mmio-gpio", + .of_compatible = "arm,vexpress-sysreg,sys_mci", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM_NAMED(SYS_MCI, 0x4, "dat"), + }, + .platform_data = &vexpress_sysreg_sys_mci_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_mci_pdata), + }, { + .name = "basic-mmio-gpio", + .of_compatible = "arm,vexpress-sysreg,sys_flash", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM_NAMED(SYS_FLASH, 0x4, "dat"), + }, + .platform_data = &vexpress_sysreg_sys_flash_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_flash_pdata), + }, { + .name = "syscon", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM(SYS_MISC, 0x4), + }, + .platform_data = &vexpress_sysreg_sys_misc_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_misc_pdata), + }, { + .name = "syscon", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM(SYS_PROCID0, 0x8), + }, + .platform_data = &vexpress_sysreg_sys_procid_pdata, + .pdata_size = sizeof(vexpress_sysreg_sys_procid_pdata), + }, { + .name = "vexpress-syscfg", + .num_resources = 1, + .resources = (struct resource []) { + DEFINE_RES_MEM(SYS_CFGDATA, 0xc), + }, + } +}; static int vexpress_sysreg_probe(struct platform_device *pdev) { - int err; - struct resource *res = platform_get_resource(pdev, - IORESOURCE_MEM, 0); - - if (!devm_request_mem_region(&pdev->dev, res->start, - resource_size(res), pdev->name)) { - dev_err(&pdev->dev, "Failed to request memory region!\n"); - return -EBUSY; - } + struct resource *mem; + void __iomem *base; + struct bgpio_chip *mmc_gpio_chip; + u32 dt_hbi; - if (!vexpress_sysreg_base) { - vexpress_sysreg_base = devm_ioremap(&pdev->dev, res->start, - resource_size(res)); - vexpress_sysreg_setup(pdev->dev.of_node); - } + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; - if (!vexpress_sysreg_base) { - dev_err(&pdev->dev, "Failed to obtain base address!\n"); - return -EFAULT; - } + base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); + if (!base) + return -ENOMEM; - setup_timer(&vexpress_sysreg_config_timer, - vexpress_sysreg_config_complete, 0); + vexpress_config_set_master(vexpress_sysreg_get_master()); - vexpress_sysreg_dev = &pdev->dev; + /* Confirm board type against DT property, if available */ + if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) { + u32 id = vexpress_get_procid(VEXPRESS_SITE_MASTER); + u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK; -#ifdef CONFIG_GPIOLIB - vexpress_sysreg_gpio_chip.dev = &pdev->dev; - err = gpiochip_add(&vexpress_sysreg_gpio_chip); - if (err) { - vexpress_config_bridge_unregister( - vexpress_sysreg_config_bridge); - dev_err(&pdev->dev, "Failed to register GPIO chip! (%d)\n", - err); - return err; + if (WARN_ON(dt_hbi != hbi)) + dev_warn(&pdev->dev, "DT HBI (%x) is not matching hardware (%x)!\n", + dt_hbi, hbi); } - platform_device_register_data(vexpress_sysreg_dev, "leds-gpio", - PLATFORM_DEVID_AUTO, &vexpress_sysreg_leds_pdata, - sizeof(vexpress_sysreg_leds_pdata)); -#endif - - device_create_file(vexpress_sysreg_dev, &dev_attr_sys_id); - - return 0; + /* + * Duplicated SYS_MCI pseudo-GPIO controller for compatibility with + * older trees using sysreg node for MMC control lines. + */ + mmc_gpio_chip = devm_kzalloc(&pdev->dev, sizeof(*mmc_gpio_chip), + GFP_KERNEL); + if (!mmc_gpio_chip) + return -ENOMEM; + bgpio_init(mmc_gpio_chip, &pdev->dev, 0x4, base + SYS_MCI, + NULL, NULL, NULL, NULL, 0); + mmc_gpio_chip->gc.ngpio = 2; + gpiochip_add(&mmc_gpio_chip->gc); + + return mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, + vexpress_sysreg_cells, + ARRAY_SIZE(vexpress_sysreg_cells), mem, 0, NULL); } static const struct of_device_id vexpress_sysreg_match[] = { @@ -522,7 +265,12 @@ static struct platform_driver vexpress_sysreg_driver = { static int __init vexpress_sysreg_init(void) { - vexpress_sysreg_of_early_init(); + struct device_node *node; + + /* Need the sysreg early, before any other device... */ + for_each_matching_node(node, vexpress_sysreg_match) + of_platform_device_create(node, NULL, NULL); + return platform_driver_register(&vexpress_sysreg_driver); } core_initcall(vexpress_sysreg_init); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 742e67901bf..a43d0c46727 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -516,6 +516,15 @@ config SRAM the genalloc API. It is supposed to be used for small on-chip SRAM areas found on many SoCs. +config VEXPRESS_SYSCFG + bool "Versatile Express System Configuration driver" + depends on VEXPRESS_CONFIG + default y + help + ARM Ltd. Versatile Express uses specialised platform configuration + bus. System Configuration interface is one of the possible means + of generating transactions on this bus. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 7eb4b69580c..d59ce1261b3 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -55,3 +55,4 @@ obj-$(CONFIG_SRAM) += sram.o obj-y += mic/ obj-$(CONFIG_GENWQE) += genwqe/ obj-$(CONFIG_ECHO) += echo/ +obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c index 5babf4013f4..62cc6bb3f62 100644 --- a/drivers/misc/genwqe/card_utils.c +++ b/drivers/misc/genwqe/card_utils.c @@ -718,7 +718,7 @@ int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count) int rc; struct pci_dev *pci_dev = cd->pci_dev; - rc = pci_enable_msi_block(pci_dev, count); + rc = pci_enable_msi_exact(pci_dev, count); if (rc == 0) cd->flags |= GENWQE_FLAG_MSI_ENABLED; return rc; diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c new file mode 100644 index 00000000000..73068e50e56 --- /dev/null +++ b/drivers/misc/vexpress-syscfg.c @@ -0,0 +1,324 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2014 ARM Limited + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/syscore_ops.h> +#include <linux/vexpress.h> + + +#define SYS_CFGDATA 0x0 + +#define SYS_CFGCTRL 0x4 +#define SYS_CFGCTRL_START (1 << 31) +#define SYS_CFGCTRL_WRITE (1 << 30) +#define SYS_CFGCTRL_DCC(n) (((n) & 0xf) << 26) +#define SYS_CFGCTRL_FUNC(n) (((n) & 0x3f) << 20) +#define SYS_CFGCTRL_SITE(n) (((n) & 0x3) << 16) +#define SYS_CFGCTRL_POSITION(n) (((n) & 0xf) << 12) +#define SYS_CFGCTRL_DEVICE(n) (((n) & 0xfff) << 0) + +#define SYS_CFGSTAT 0x8 +#define SYS_CFGSTAT_ERR (1 << 1) +#define SYS_CFGSTAT_COMPLETE (1 << 0) + + +struct vexpress_syscfg { + struct device *dev; + void __iomem *base; + struct list_head funcs; +}; + +struct vexpress_syscfg_func { + struct list_head list; + struct vexpress_syscfg *syscfg; + struct regmap *regmap; + int num_templates; + u32 template[0]; /* Keep it last! */ +}; + + +static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func, + int index, bool write, u32 *data) +{ + struct vexpress_syscfg *syscfg = func->syscfg; + u32 command, status; + int tries; + long timeout; + + if (WARN_ON(index > func->num_templates)) + return -EINVAL; + + command = readl(syscfg->base + SYS_CFGCTRL); + if (WARN_ON(command & SYS_CFGCTRL_START)) + return -EBUSY; + + command = func->template[index]; + command |= SYS_CFGCTRL_START; + command |= write ? SYS_CFGCTRL_WRITE : 0; + + /* Use a canary for reads */ + if (!write) + *data = 0xdeadbeef; + + dev_dbg(syscfg->dev, "func %p, command %x, data %x\n", + func, command, *data); + writel(*data, syscfg->base + SYS_CFGDATA); + writel(0, syscfg->base + SYS_CFGSTAT); + writel(command, syscfg->base + SYS_CFGCTRL); + mb(); + + /* The operation can take ages... Go to sleep, 100us initially */ + tries = 100; + timeout = 100; + do { + if (!irqs_disabled()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(timeout)); + if (signal_pending(current)) + return -EINTR; + } else { + udelay(timeout); + } + + status = readl(syscfg->base + SYS_CFGSTAT); + if (status & SYS_CFGSTAT_ERR) + return -EFAULT; + + if (timeout > 20) + timeout -= 20; + } while (--tries && !(status & SYS_CFGSTAT_COMPLETE)); + if (WARN_ON_ONCE(!tries)) + return -ETIMEDOUT; + + if (!write) { + *data = readl(syscfg->base + SYS_CFGDATA); + dev_dbg(syscfg->dev, "func %p, read data %x\n", func, *data); + } + + return 0; +} + +static int vexpress_syscfg_read(void *context, unsigned int index, + unsigned int *val) +{ + struct vexpress_syscfg_func *func = context; + + return vexpress_syscfg_exec(func, index, false, val); +} + +static int vexpress_syscfg_write(void *context, unsigned int index, + unsigned int val) +{ + struct vexpress_syscfg_func *func = context; + + return vexpress_syscfg_exec(func, index, true, &val); +} + +struct regmap_config vexpress_syscfg_regmap_config = { + .lock = vexpress_config_lock, + .unlock = vexpress_config_unlock, + .reg_bits = 32, + .val_bits = 32, + .reg_read = vexpress_syscfg_read, + .reg_write = vexpress_syscfg_write, + .reg_format_endian = REGMAP_ENDIAN_LITTLE, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + + +static struct regmap *vexpress_syscfg_regmap_init(struct device *dev, + void *context) +{ + struct platform_device *pdev = to_platform_device(dev); + struct vexpress_syscfg *syscfg = context; + struct vexpress_syscfg_func *func; + struct property *prop; + const __be32 *val = NULL; + __be32 energy_quirk[4]; + int num; + u32 site, position, dcc; + int i; + + if (dev->of_node) { + int err = vexpress_config_get_topo(dev->of_node, &site, + &position, &dcc); + + if (err) + return ERR_PTR(err); + + prop = of_find_property(dev->of_node, + "arm,vexpress-sysreg,func", NULL); + if (!prop) + return ERR_PTR(-EINVAL); + + num = prop->length / sizeof(u32) / 2; + val = prop->value; + } else { + if (pdev->num_resources != 1 || + pdev->resource[0].flags != IORESOURCE_BUS) + return ERR_PTR(-EFAULT); + + site = pdev->resource[0].start; + if (site == VEXPRESS_SITE_MASTER) + site = vexpress_config_get_master(); + position = 0; + dcc = 0; + num = 1; + } + + /* + * "arm,vexpress-energy" function used to be described + * by its first device only, now it requires both + */ + if (num == 1 && of_device_is_compatible(dev->of_node, + "arm,vexpress-energy")) { + num = 2; + energy_quirk[0] = *val; + energy_quirk[2] = *val++; + energy_quirk[1] = *val; + energy_quirk[3] = cpu_to_be32(be32_to_cpup(val) + 1); + val = energy_quirk; + } + + func = kzalloc(sizeof(*func) + sizeof(*func->template) * num, + GFP_KERNEL); + if (!func) + return NULL; + + func->syscfg = syscfg; + func->num_templates = num; + + for (i = 0; i < num; i++) { + u32 function, device; + + if (dev->of_node) { + function = be32_to_cpup(val++); + device = be32_to_cpup(val++); + } else { + function = pdev->resource[0].end; + device = pdev->id; + } + + dev_dbg(dev, "func %p: %u/%u/%u/%u/%u\n", + func, site, position, dcc, + function, device); + + func->template[i] = SYS_CFGCTRL_DCC(dcc); + func->template[i] |= SYS_CFGCTRL_SITE(site); + func->template[i] |= SYS_CFGCTRL_POSITION(position); + func->template[i] |= SYS_CFGCTRL_FUNC(function); + func->template[i] |= SYS_CFGCTRL_DEVICE(device); + } + + vexpress_syscfg_regmap_config.max_register = num - 1; + + func->regmap = regmap_init(dev, NULL, func, + &vexpress_syscfg_regmap_config); + + if (IS_ERR(func->regmap)) + kfree(func); + else + list_add(&func->list, &syscfg->funcs); + + return func->regmap; +} + +static void vexpress_syscfg_regmap_exit(struct regmap *regmap, void *context) +{ + struct vexpress_syscfg *syscfg = context; + struct vexpress_syscfg_func *func, *tmp; + + regmap_exit(regmap); + + list_for_each_entry_safe(func, tmp, &syscfg->funcs, list) { + if (func->regmap == regmap) { + list_del(&syscfg->funcs); + kfree(func); + break; + } + } +} + +static struct vexpress_config_bridge_ops vexpress_syscfg_bridge_ops = { + .regmap_init = vexpress_syscfg_regmap_init, + .regmap_exit = vexpress_syscfg_regmap_exit, +}; + + +/* Non-DT hack, to be gone... */ +static struct device *vexpress_syscfg_bridge; + +int vexpress_syscfg_device_register(struct platform_device *pdev) +{ + pdev->dev.parent = vexpress_syscfg_bridge; + + return platform_device_register(pdev); +} + + +int vexpress_syscfg_probe(struct platform_device *pdev) +{ + struct vexpress_syscfg *syscfg; + struct resource *res; + struct device *bridge; + + syscfg = devm_kzalloc(&pdev->dev, sizeof(*syscfg), GFP_KERNEL); + if (!syscfg) + return -ENOMEM; + syscfg->dev = &pdev->dev; + INIT_LIST_HEAD(&syscfg->funcs); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), pdev->name)) + return -EBUSY; + + syscfg->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); + if (!syscfg->base) + return -EFAULT; + + /* Must use dev.parent (MFD), as that's where DT phandle points at... */ + bridge = vexpress_config_bridge_register(pdev->dev.parent, + &vexpress_syscfg_bridge_ops, syscfg); + if (IS_ERR(bridge)) + return PTR_ERR(bridge); + + /* Non-DT case */ + if (!pdev->dev.of_node) + vexpress_syscfg_bridge = bridge; + + return 0; +} + +static const struct platform_device_id vexpress_syscfg_id_table[] = { + { "vexpress-syscfg", }, + {}, +}; + +static struct platform_driver vexpress_syscfg_driver = { + .driver.name = "vexpress-syscfg", + .id_table = vexpress_syscfg_id_table, + .probe = vexpress_syscfg_probe, +}; + +static int __init vexpress_syscfg_init(void) +{ + return platform_driver_register(&vexpress_syscfg_driver); +} +core_initcall(vexpress_syscfg_init); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 771c60ab4a3..a084edd37af 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/device.h> +#include <linux/io.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -23,6 +24,7 @@ #include <linux/mmc/pm.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> +#include <linux/mmc/slot-gpio.h> #include <linux/amba/bus.h> #include <linux/clk.h> #include <linux/scatterlist.h> @@ -364,7 +366,6 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) #ifdef CONFIG_DMA_ENGINE static void mmci_dma_setup(struct mmci_host *host) { - struct mmci_platform_data *plat = host->plat; const char *rxname, *txname; dma_cap_mask_t mask; @@ -378,25 +379,6 @@ static void mmci_dma_setup(struct mmci_host *host) dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - if (plat && plat->dma_filter) { - if (!host->dma_rx_channel && plat->dma_rx_param) { - host->dma_rx_channel = dma_request_channel(mask, - plat->dma_filter, - plat->dma_rx_param); - /* E.g if no DMA hardware is present */ - if (!host->dma_rx_channel) - dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); - } - - if (!host->dma_tx_channel && plat->dma_tx_param) { - host->dma_tx_channel = dma_request_channel(mask, - plat->dma_filter, - plat->dma_tx_param); - if (!host->dma_tx_channel) - dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); - } - } - /* * If only an RX channel is specified, the driver will * attempt to use it bidirectionally, however if it is @@ -444,11 +426,9 @@ static void mmci_dma_setup(struct mmci_host *host) */ static inline void mmci_dma_release(struct mmci_host *host) { - struct mmci_platform_data *plat = host->plat; - if (host->dma_rx_channel) dma_release_channel(host->dma_rx_channel); - if (host->dma_tx_channel && plat->dma_tx_param) + if (host->dma_tx_channel) dma_release_channel(host->dma_tx_channel); host->dma_rx_channel = host->dma_tx_channel = NULL; } @@ -1285,7 +1265,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) * indicating signal direction for the signals in * the SD/MMC bus and feedback-clock usage. */ - pwr |= host->plat->sigdir; + pwr |= host->pwr_reg_add; if (ios->bus_width == MMC_BUS_WIDTH_4) pwr &= ~MCI_ST_DATA74DIREN; @@ -1326,35 +1306,18 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) pm_runtime_put_autosuspend(mmc_dev(mmc)); } -static int mmci_get_ro(struct mmc_host *mmc) -{ - struct mmci_host *host = mmc_priv(mmc); - - if (host->gpio_wp == -ENOSYS) - return -ENOSYS; - - return gpio_get_value_cansleep(host->gpio_wp); -} - static int mmci_get_cd(struct mmc_host *mmc) { struct mmci_host *host = mmc_priv(mmc); struct mmci_platform_data *plat = host->plat; - unsigned int status; + unsigned int status = mmc_gpio_get_cd(mmc); - if (host->gpio_cd == -ENOSYS) { + if (status == -ENOSYS) { if (!plat->status) return 1; /* Assume always present */ status = plat->status(mmc_dev(host->mmc)); - } else - status = !!gpio_get_value_cansleep(host->gpio_cd) - ^ plat->cd_invert; - - /* - * Use positive logic throughout - status is zero for no card, - * non-zero for card inserted. - */ + } return status; } @@ -1391,70 +1354,44 @@ static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) return ret; } -static irqreturn_t mmci_cd_irq(int irq, void *dev_id) -{ - struct mmci_host *host = dev_id; - - mmc_detect_change(host->mmc, msecs_to_jiffies(500)); - - return IRQ_HANDLED; -} - static struct mmc_host_ops mmci_ops = { .request = mmci_request, .pre_req = mmci_pre_request, .post_req = mmci_post_request, .set_ios = mmci_set_ios, - .get_ro = mmci_get_ro, + .get_ro = mmc_gpio_get_ro, .get_cd = mmci_get_cd, .start_signal_voltage_switch = mmci_sig_volt_switch, }; -#ifdef CONFIG_OF -static void mmci_dt_populate_generic_pdata(struct device_node *np, - struct mmci_platform_data *pdata) +static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) { - int bus_width = 0; - - pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); - pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); + struct mmci_host *host = mmc_priv(mmc); + int ret = mmc_of_parse(mmc); - if (of_get_property(np, "cd-inverted", NULL)) - pdata->cd_invert = true; - else - pdata->cd_invert = false; + if (ret) + return ret; - of_property_read_u32(np, "max-frequency", &pdata->f_max); - if (!pdata->f_max) - pr_warn("%s has no 'max-frequency' property\n", np->full_name); + if (of_get_property(np, "st,sig-dir-dat0", NULL)) + host->pwr_reg_add |= MCI_ST_DATA0DIREN; + if (of_get_property(np, "st,sig-dir-dat2", NULL)) + host->pwr_reg_add |= MCI_ST_DATA2DIREN; + if (of_get_property(np, "st,sig-dir-dat31", NULL)) + host->pwr_reg_add |= MCI_ST_DATA31DIREN; + if (of_get_property(np, "st,sig-dir-dat74", NULL)) + host->pwr_reg_add |= MCI_ST_DATA74DIREN; + if (of_get_property(np, "st,sig-dir-cmd", NULL)) + host->pwr_reg_add |= MCI_ST_CMDDIREN; + if (of_get_property(np, "st,sig-pin-fbclk", NULL)) + host->pwr_reg_add |= MCI_ST_FBCLKEN; if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) - pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED; + mmc->caps |= MMC_CAP_MMC_HIGHSPEED; if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) - pdata->capabilities |= MMC_CAP_SD_HIGHSPEED; + mmc->caps |= MMC_CAP_SD_HIGHSPEED; - of_property_read_u32(np, "bus-width", &bus_width); - switch (bus_width) { - case 0 : - /* No bus-width supplied. */ - break; - case 4 : - pdata->capabilities |= MMC_CAP_4_BIT_DATA; - break; - case 8 : - pdata->capabilities |= MMC_CAP_8_BIT_DATA; - break; - default : - pr_warn("%s: Unsupported bus width\n", np->full_name); - } -} -#else -static void mmci_dt_populate_generic_pdata(struct device_node *np, - struct mmci_platform_data *pdata) -{ - return; + return 0; } -#endif static int mmci_probe(struct amba_device *dev, const struct amba_id *id) @@ -1478,26 +1415,17 @@ static int mmci_probe(struct amba_device *dev, return -ENOMEM; } - if (np) - mmci_dt_populate_generic_pdata(np, plat); + mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); + if (!mmc) + return -ENOMEM; - ret = amba_request_regions(dev, DRIVER_NAME); + ret = mmci_of_parse(np, mmc); if (ret) - goto out; - - mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); - if (!mmc) { - ret = -ENOMEM; - goto rel_regions; - } + goto host_free; host = mmc_priv(mmc); host->mmc = mmc; - host->gpio_wp = -ENOSYS; - host->gpio_cd = -ENOSYS; - host->gpio_cd_irq = -1; - host->hw_designer = amba_manf(dev); host->hw_revision = amba_rev(dev); dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); @@ -1529,10 +1457,11 @@ static int mmci_probe(struct amba_device *dev, dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", host->mclk); } + host->phybase = dev->res.start; - host->base = ioremap(dev->res.start, resource_size(&dev->res)); - if (!host->base) { - ret = -ENOMEM; + host->base = devm_ioremap_resource(&dev->dev, &dev->res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); goto clk_disable; } @@ -1546,15 +1475,13 @@ static int mmci_probe(struct amba_device *dev, else mmc->f_min = DIV_ROUND_UP(host->mclk, 512); /* - * If the platform data supplies a maximum operating - * frequency, this takes precedence. Else, we fall back - * to using the module parameter, which has a (low) - * default value in case it is not specified. Either - * value must not exceed the clock rate into the block, - * of course. + * If no maximum operating frequency is supplied, fall back to use + * the module parameter, which has a (low) default value in case it + * is not specified. Either value must not exceed the clock rate into + * the block, of course. */ - if (plat->f_max) - mmc->f_max = min(host->mclk, plat->f_max); + if (mmc->f_max) + mmc->f_max = min(host->mclk, mmc->f_max); else mmc->f_max = min(host->mclk, fmax); dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); @@ -1566,8 +1493,15 @@ static int mmci_probe(struct amba_device *dev, else if (plat->ocr_mask) dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); - mmc->caps = plat->capabilities; - mmc->caps2 = plat->capabilities2; + /* DT takes precedence over platform data. */ + if (!np) { + if (!plat->cd_invert) + mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + } + + /* We support these capabilities. */ + mmc->caps |= MMC_CAP_CMD23; if (variant->busy_detect) { mmci_ops.card_busy = mmci_card_busy; @@ -1579,7 +1513,7 @@ static int mmci_probe(struct amba_device *dev, mmc->ops = &mmci_ops; /* We support these PM capabilities. */ - mmc->pm_caps = MMC_PM_KEEP_POWER; + mmc->pm_caps |= MMC_PM_KEEP_POWER; /* * We can do SGIO @@ -1616,62 +1550,30 @@ static int mmci_probe(struct amba_device *dev, writel(0, host->base + MMCIMASK1); writel(0xfff, host->base + MMCICLEAR); - if (plat->gpio_cd == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto err_gpio_cd; - } - if (gpio_is_valid(plat->gpio_cd)) { - ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); - if (ret == 0) - ret = gpio_direction_input(plat->gpio_cd); - if (ret == 0) - host->gpio_cd = plat->gpio_cd; - else if (ret != -ENOSYS) - goto err_gpio_cd; - - /* - * A gpio pin that will detect cards when inserted and removed - * will most likely want to trigger on the edges if it is - * 0 when ejected and 1 when inserted (or mutatis mutandis - * for the inverted case) so we request triggers on both - * edges. - */ - ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), - mmci_cd_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - DRIVER_NAME " (cd)", host); - if (ret >= 0) - host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); - } - if (plat->gpio_wp == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; - goto err_gpio_wp; + /* If DT, cd/wp gpios must be supplied through it. */ + if (!np && gpio_is_valid(plat->gpio_cd)) { + ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0); + if (ret) + goto clk_disable; } - if (gpio_is_valid(plat->gpio_wp)) { - ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); - if (ret == 0) - ret = gpio_direction_input(plat->gpio_wp); - if (ret == 0) - host->gpio_wp = plat->gpio_wp; - else if (ret != -ENOSYS) - goto err_gpio_wp; + if (!np && gpio_is_valid(plat->gpio_wp)) { + ret = mmc_gpio_request_ro(mmc, plat->gpio_wp); + if (ret) + goto clk_disable; } - if ((host->plat->status || host->gpio_cd != -ENOSYS) - && host->gpio_cd_irq < 0) - mmc->caps |= MMC_CAP_NEEDS_POLL; - - ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); + ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, + DRIVER_NAME " (cmd)", host); if (ret) - goto unmap; + goto clk_disable; if (!dev->irq[1]) host->singleirq = true; else { - ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, - DRIVER_NAME " (pio)", host); + ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq, + IRQF_SHARED, DRIVER_NAME " (pio)", host); if (ret) - goto irq0_free; + goto clk_disable; } writel(MCI_IRQENABLE, host->base + MMCIMASK0); @@ -1693,25 +1595,10 @@ static int mmci_probe(struct amba_device *dev, return 0; - irq0_free: - free_irq(dev->irq[0], host); - unmap: - if (host->gpio_wp != -ENOSYS) - gpio_free(host->gpio_wp); - err_gpio_wp: - if (host->gpio_cd_irq >= 0) - free_irq(host->gpio_cd_irq, host); - if (host->gpio_cd != -ENOSYS) - gpio_free(host->gpio_cd); - err_gpio_cd: - iounmap(host->base); clk_disable: clk_disable_unprepare(host->clk); host_free: mmc_free_host(mmc); - rel_regions: - amba_release_regions(dev); - out: return ret; } @@ -1737,92 +1624,46 @@ static int mmci_remove(struct amba_device *dev) writel(0, host->base + MMCIDATACTRL); mmci_dma_release(host); - free_irq(dev->irq[0], host); - if (!host->singleirq) - free_irq(dev->irq[1], host); - - if (host->gpio_wp != -ENOSYS) - gpio_free(host->gpio_wp); - if (host->gpio_cd_irq >= 0) - free_irq(host->gpio_cd_irq, host); - if (host->gpio_cd != -ENOSYS) - gpio_free(host->gpio_cd); - - iounmap(host->base); clk_disable_unprepare(host->clk); - mmc_free_host(mmc); - - amba_release_regions(dev); - } - - return 0; -} - -#ifdef CONFIG_SUSPEND -static int mmci_suspend(struct device *dev) -{ - struct amba_device *adev = to_amba_device(dev); - struct mmc_host *mmc = amba_get_drvdata(adev); - - if (mmc) { - struct mmci_host *host = mmc_priv(mmc); - pm_runtime_get_sync(dev); - writel(0, host->base + MMCIMASK0); } return 0; } -static int mmci_resume(struct device *dev) -{ - struct amba_device *adev = to_amba_device(dev); - struct mmc_host *mmc = amba_get_drvdata(adev); - - if (mmc) { - struct mmci_host *host = mmc_priv(mmc); - writel(MCI_IRQENABLE, host->base + MMCIMASK0); - pm_runtime_put(dev); - } - - return 0; -} -#endif - -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static void mmci_save(struct mmci_host *host) { unsigned long flags; - if (host->variant->pwrreg_nopower) { - spin_lock_irqsave(&host->lock, flags); + spin_lock_irqsave(&host->lock, flags); - writel(0, host->base + MMCIMASK0); + writel(0, host->base + MMCIMASK0); + if (host->variant->pwrreg_nopower) { writel(0, host->base + MMCIDATACTRL); writel(0, host->base + MMCIPOWER); writel(0, host->base + MMCICLOCK); - mmci_reg_delay(host); - - spin_unlock_irqrestore(&host->lock, flags); } + mmci_reg_delay(host); + spin_unlock_irqrestore(&host->lock, flags); } static void mmci_restore(struct mmci_host *host) { unsigned long flags; - if (host->variant->pwrreg_nopower) { - spin_lock_irqsave(&host->lock, flags); + spin_lock_irqsave(&host->lock, flags); + if (host->variant->pwrreg_nopower) { writel(host->clk_reg, host->base + MMCICLOCK); writel(host->datactrl_reg, host->base + MMCIDATACTRL); writel(host->pwr_reg, host->base + MMCIPOWER); - writel(MCI_IRQENABLE, host->base + MMCIMASK0); - mmci_reg_delay(host); - - spin_unlock_irqrestore(&host->lock, flags); } + writel(MCI_IRQENABLE, host->base + MMCIMASK0); + mmci_reg_delay(host); + + spin_unlock_irqrestore(&host->lock, flags); } static int mmci_runtime_suspend(struct device *dev) @@ -1857,8 +1698,9 @@ static int mmci_runtime_resume(struct device *dev) #endif static const struct dev_pm_ops mmci_dev_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume) - SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) }; static struct amba_id mmci_ids[] = { diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 58b1b8896bf..347d942d740 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -13,6 +13,16 @@ #define MCI_PWR_ON 0x03 #define MCI_OD (1 << 6) #define MCI_ROD (1 << 7) +/* + * The ST Micro version does not have ROD and reuse the voltage registers for + * direction settings. + */ +#define MCI_ST_DATA2DIREN (1 << 2) +#define MCI_ST_CMDDIREN (1 << 3) +#define MCI_ST_DATA0DIREN (1 << 4) +#define MCI_ST_DATA31DIREN (1 << 5) +#define MCI_ST_FBCLKEN (1 << 7) +#define MCI_ST_DATA74DIREN (1 << 8) #define MMCICLOCK 0x004 #define MCI_CLK_ENABLE (1 << 8) @@ -176,9 +186,6 @@ struct mmci_host { struct mmc_data *data; struct mmc_host *mmc; struct clk *clk; - int gpio_cd; - int gpio_wp; - int gpio_cd_irq; bool singleirq; spinlock_t lock; @@ -186,6 +193,7 @@ struct mmci_host { unsigned int mclk; unsigned int cclk; u32 pwr_reg; + u32 pwr_reg_add; u32 clk_reg; u32 datactrl_reg; u32 busy_status; diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 5fb994f9a65..0b9ded13a3a 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -31,28 +31,14 @@ #include <linux/mfd/rtsx_pci.h> #include <asm/unaligned.h> -struct realtek_next { - unsigned int sg_count; - s32 cookie; -}; - struct realtek_pci_sdmmc { struct platform_device *pdev; struct rtsx_pcr *pcr; struct mmc_host *mmc; struct mmc_request *mrq; - struct mmc_command *cmd; - struct mmc_data *data; - - spinlock_t lock; - struct timer_list timer; - struct tasklet_struct cmd_tasklet; - struct tasklet_struct data_tasklet; - struct tasklet_struct finish_tasklet; - - u8 rsp_type; - u8 rsp_len; - int sg_count; + + struct mutex host_mutex; + u8 ssc_depth; unsigned int clock; bool vpclk; @@ -62,13 +48,8 @@ struct realtek_pci_sdmmc { int power_state; #define SDMMC_POWER_ON 1 #define SDMMC_POWER_OFF 0 - - struct realtek_next next_data; }; -static int sd_start_multi_rw(struct realtek_pci_sdmmc *host, - struct mmc_request *mrq); - static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) { return &(host->pdev->dev); @@ -105,95 +86,6 @@ static void sd_print_debug_regs(struct realtek_pci_sdmmc *host) #define sd_print_debug_regs(host) #endif /* DEBUG */ -static void sd_isr_done_transfer(struct platform_device *pdev) -{ - struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); - - spin_lock(&host->lock); - if (host->cmd) - tasklet_schedule(&host->cmd_tasklet); - if (host->data) - tasklet_schedule(&host->data_tasklet); - spin_unlock(&host->lock); -} - -static void sd_request_timeout(unsigned long host_addr) -{ - struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; - unsigned long flags; - - spin_lock_irqsave(&host->lock, flags); - - if (!host->mrq) { - dev_err(sdmmc_dev(host), "error: no request exist\n"); - goto out; - } - - if (host->cmd) - host->cmd->error = -ETIMEDOUT; - if (host->data) - host->data->error = -ETIMEDOUT; - - dev_dbg(sdmmc_dev(host), "timeout for request\n"); - -out: - tasklet_schedule(&host->finish_tasklet); - spin_unlock_irqrestore(&host->lock, flags); -} - -static void sd_finish_request(unsigned long host_addr) -{ - struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; - struct rtsx_pcr *pcr = host->pcr; - struct mmc_request *mrq; - struct mmc_command *cmd; - struct mmc_data *data; - unsigned long flags; - bool any_error; - - spin_lock_irqsave(&host->lock, flags); - - del_timer(&host->timer); - mrq = host->mrq; - if (!mrq) { - dev_err(sdmmc_dev(host), "error: no request need finish\n"); - goto out; - } - - cmd = mrq->cmd; - data = mrq->data; - - any_error = (mrq->sbc && mrq->sbc->error) || - (mrq->stop && mrq->stop->error) || - (cmd && cmd->error) || (data && data->error); - - if (any_error) { - rtsx_pci_stop_cmd(pcr); - sd_clear_error(host); - } - - if (data) { - if (any_error) - data->bytes_xfered = 0; - else - data->bytes_xfered = data->blocks * data->blksz; - - if (!data->host_cookie) - rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, - data->flags & MMC_DATA_READ); - - } - - host->mrq = NULL; - host->cmd = NULL; - host->data = NULL; - -out: - spin_unlock_irqrestore(&host->lock, flags); - mutex_unlock(&pcr->pcr_mutex); - mmc_request_done(host->mmc, mrq); -} - static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, u8 *buf, int buf_len, int timeout) { @@ -311,7 +203,8 @@ static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, return 0; } -static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd) +static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host, + struct mmc_command *cmd) { struct rtsx_pcr *pcr = host->pcr; u8 cmd_idx = (u8)cmd->opcode; @@ -319,14 +212,11 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd) int err = 0; int timeout = 100; int i; + u8 *ptr; + int stat_idx = 0; u8 rsp_type; int rsp_len = 5; - unsigned long flags; - - if (host->cmd) - dev_err(sdmmc_dev(host), "error: cmd already exist\n"); - - host->cmd = cmd; + bool clock_toggled = false; dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", __func__, cmd_idx, arg); @@ -361,8 +251,6 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd) err = -EINVAL; goto out; } - host->rsp_type = rsp_type; - host->rsp_len = rsp_len; if (rsp_type == SD_RSP_TYPE_R1b) timeout = 3000; @@ -372,6 +260,8 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd) 0xFF, SD_CLK_TOGGLE_EN); if (err < 0) goto out; + + clock_toggled = true; } rtsx_pci_init_cmd(pcr); @@ -395,60 +285,25 @@ static void sd_send_cmd(struct realtek_pci_sdmmc *host, struct mmc_command *cmd) /* Read data from ping-pong buffer */ for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); + stat_idx = 16; } else if (rsp_type != SD_RSP_TYPE_R0) { /* Read data from SD_CMDx registers */ for (i = SD_CMD0; i <= SD_CMD4; i++) rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); + stat_idx = 5; } rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0); - mod_timer(&host->timer, jiffies + msecs_to_jiffies(timeout)); - - spin_lock_irqsave(&pcr->lock, flags); - pcr->trans_result = TRANS_NOT_READY; - rtsx_pci_send_cmd_no_wait(pcr); - spin_unlock_irqrestore(&pcr->lock, flags); - - return; - -out: - cmd->error = err; - tasklet_schedule(&host->finish_tasklet); -} - -static void sd_get_rsp(unsigned long host_addr) -{ - struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; - struct rtsx_pcr *pcr = host->pcr; - struct mmc_command *cmd; - int i, err = 0, stat_idx; - u8 *ptr, rsp_type; - unsigned long flags; - - spin_lock_irqsave(&host->lock, flags); - - cmd = host->cmd; - host->cmd = NULL; - - if (!cmd) { - dev_err(sdmmc_dev(host), "error: cmd not exist\n"); + err = rtsx_pci_send_cmd(pcr, timeout); + if (err < 0) { + sd_print_debug_regs(host); + sd_clear_error(host); + dev_dbg(sdmmc_dev(host), + "rtsx_pci_send_cmd error (err = %d)\n", err); goto out; } - spin_lock(&pcr->lock); - if (pcr->trans_result == TRANS_NO_DEVICE) - err = -ENODEV; - else if (pcr->trans_result != TRANS_RESULT_OK) - err = -EINVAL; - spin_unlock(&pcr->lock); - - if (err < 0) - goto out; - - rsp_type = host->rsp_type; - stat_idx = host->rsp_len; - if (rsp_type == SD_RSP_TYPE_R0) { err = 0; goto out; @@ -485,106 +340,26 @@ static void sd_get_rsp(unsigned long host_addr) cmd->resp[0]); } - if (cmd == host->mrq->sbc) { - sd_send_cmd(host, host->mrq->cmd); - spin_unlock_irqrestore(&host->lock, flags); - return; - } - - if (cmd == host->mrq->stop) - goto out; - - if (cmd->data) { - sd_start_multi_rw(host, host->mrq); - spin_unlock_irqrestore(&host->lock, flags); - return; - } - out: cmd->error = err; - tasklet_schedule(&host->finish_tasklet); - spin_unlock_irqrestore(&host->lock, flags); -} - -static int sd_pre_dma_transfer(struct realtek_pci_sdmmc *host, - struct mmc_data *data, struct realtek_next *next) -{ - struct rtsx_pcr *pcr = host->pcr; - int read = data->flags & MMC_DATA_READ; - int sg_count = 0; - - if (!next && data->host_cookie && - data->host_cookie != host->next_data.cookie) { - dev_err(sdmmc_dev(host), - "error: invalid cookie data[%d] host[%d]\n", - data->host_cookie, host->next_data.cookie); - data->host_cookie = 0; - } - - if (next || (!next && data->host_cookie != host->next_data.cookie)) - sg_count = rtsx_pci_dma_map_sg(pcr, - data->sg, data->sg_len, read); - else - sg_count = host->next_data.sg_count; - - if (next) { - next->sg_count = sg_count; - if (++next->cookie < 0) - next->cookie = 1; - data->host_cookie = next->cookie; - } - - return sg_count; -} - -static void sdmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, - bool is_first_req) -{ - struct realtek_pci_sdmmc *host = mmc_priv(mmc); - struct mmc_data *data = mrq->data; - - if (data->host_cookie) { - dev_err(sdmmc_dev(host), - "error: descard already cookie data[%d]\n", - data->host_cookie); - data->host_cookie = 0; - } - - dev_dbg(sdmmc_dev(host), "dma sg prepared: %d\n", - sd_pre_dma_transfer(host, data, &host->next_data)); -} - -static void sdmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, - int err) -{ - struct realtek_pci_sdmmc *host = mmc_priv(mmc); - struct rtsx_pcr *pcr = host->pcr; - struct mmc_data *data = mrq->data; - int read = data->flags & MMC_DATA_READ; - - rtsx_pci_dma_unmap_sg(pcr, data->sg, data->sg_len, read); - data->host_cookie = 0; + if (err && clock_toggled) + rtsx_pci_write_register(pcr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); } -static int sd_start_multi_rw(struct realtek_pci_sdmmc *host, - struct mmc_request *mrq) +static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) { struct rtsx_pcr *pcr = host->pcr; struct mmc_host *mmc = host->mmc; struct mmc_card *card = mmc->card; struct mmc_data *data = mrq->data; int uhs = mmc_card_uhs(card); - int read = data->flags & MMC_DATA_READ; + int read = (data->flags & MMC_DATA_READ) ? 1 : 0; u8 cfg2, trans_mode; int err; size_t data_len = data->blksz * data->blocks; - if (host->data) - dev_err(sdmmc_dev(host), "error: data already exist\n"); - - host->data = data; - if (read) { cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0; @@ -635,54 +410,15 @@ static int sd_start_multi_rw(struct realtek_pci_sdmmc *host, rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, SD_TRANSFER_END, SD_TRANSFER_END); - mod_timer(&host->timer, jiffies + 10 * HZ); rtsx_pci_send_cmd_no_wait(pcr); - err = rtsx_pci_dma_transfer(pcr, data->sg, host->sg_count, read); - if (err < 0) { - data->error = err; - tasklet_schedule(&host->finish_tasklet); - } - return 0; -} - -static void sd_finish_multi_rw(unsigned long host_addr) -{ - struct realtek_pci_sdmmc *host = (struct realtek_pci_sdmmc *)host_addr; - struct rtsx_pcr *pcr = host->pcr; - struct mmc_data *data; - int err = 0; - unsigned long flags; - - spin_lock_irqsave(&host->lock, flags); - - if (!host->data) { - dev_err(sdmmc_dev(host), "error: no data exist\n"); - goto out; - } - - data = host->data; - host->data = NULL; - - if (pcr->trans_result == TRANS_NO_DEVICE) - err = -ENODEV; - else if (pcr->trans_result != TRANS_RESULT_OK) - err = -EINVAL; - + err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000); if (err < 0) { - data->error = err; - goto out; - } - - if (!host->mrq->sbc && data->stop) { - sd_send_cmd(host, data->stop); - spin_unlock_irqrestore(&host->lock, flags); - return; + sd_clear_error(host); + return err; } -out: - tasklet_schedule(&host->finish_tasklet); - spin_unlock_irqrestore(&host->lock, flags); + return 0; } static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) @@ -901,13 +637,6 @@ static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode) return 0; } -static inline bool sd_use_muti_rw(struct mmc_command *cmd) -{ - return mmc_op_multi(cmd->opcode) || - (cmd->opcode == MMC_READ_SINGLE_BLOCK) || - (cmd->opcode == MMC_WRITE_BLOCK); -} - static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct realtek_pci_sdmmc *host = mmc_priv(mmc); @@ -916,14 +645,6 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) struct mmc_data *data = mrq->data; unsigned int data_size = 0; int err; - unsigned long flags; - - mutex_lock(&pcr->pcr_mutex); - spin_lock_irqsave(&host->lock, flags); - - if (host->mrq) - dev_err(sdmmc_dev(host), "error: request already exist\n"); - host->mrq = mrq; if (host->eject) { cmd->error = -ENOMEDIUM; @@ -936,6 +657,8 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) goto finish; } + mutex_lock(&pcr->pcr_mutex); + rtsx_pci_start_run(pcr); rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth, @@ -944,28 +667,46 @@ static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) rtsx_pci_write_register(pcr, CARD_SHARE_MODE, CARD_SHARE_MASK, CARD_SHARE_48_SD); + mutex_lock(&host->host_mutex); + host->mrq = mrq; + mutex_unlock(&host->host_mutex); + if (mrq->data) data_size = data->blocks * data->blksz; - if (sd_use_muti_rw(cmd)) - host->sg_count = sd_pre_dma_transfer(host, data, NULL); + if (!data_size || mmc_op_multi(cmd->opcode) || + (cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (cmd->opcode == MMC_WRITE_BLOCK)) { + sd_send_cmd_get_rsp(host, cmd); - if (!data_size || sd_use_muti_rw(cmd)) { - if (mrq->sbc) - sd_send_cmd(host, mrq->sbc); - else - sd_send_cmd(host, cmd); - spin_unlock_irqrestore(&host->lock, flags); + if (!cmd->error && data_size) { + sd_rw_multi(host, mrq); + + if (mmc_op_multi(cmd->opcode) && mrq->stop) + sd_send_cmd_get_rsp(host, mrq->stop); + } } else { - spin_unlock_irqrestore(&host->lock, flags); sd_normal_rw(host, mrq); - tasklet_schedule(&host->finish_tasklet); } - return; + + if (mrq->data) { + if (cmd->error || data->error) + data->bytes_xfered = 0; + else + data->bytes_xfered = data->blocks * data->blksz; + } + + mutex_unlock(&pcr->pcr_mutex); finish: - tasklet_schedule(&host->finish_tasklet); - spin_unlock_irqrestore(&host->lock, flags); + if (cmd->error) + dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error); + + mutex_lock(&host->host_mutex); + host->mrq = NULL; + mutex_unlock(&host->host_mutex); + + mmc_request_done(mmc, mrq); } static int sd_set_bus_width(struct realtek_pci_sdmmc *host, @@ -1400,8 +1141,6 @@ out: } static const struct mmc_host_ops realtek_pci_sdmmc_ops = { - .pre_req = sdmmc_pre_req, - .post_req = sdmmc_post_req, .request = sdmmc_request, .set_ios = sdmmc_set_ios, .get_ro = sdmmc_get_ro, @@ -1465,7 +1204,6 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) struct realtek_pci_sdmmc *host; struct rtsx_pcr *pcr; struct pcr_handle *handle = pdev->dev.platform_data; - unsigned long host_addr; if (!handle) return -ENXIO; @@ -1489,15 +1227,8 @@ static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) pcr->slots[RTSX_SD_CARD].p_dev = pdev; pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event; - host_addr = (unsigned long)host; - host->next_data.cookie = 1; - setup_timer(&host->timer, sd_request_timeout, host_addr); - tasklet_init(&host->cmd_tasklet, sd_get_rsp, host_addr); - tasklet_init(&host->data_tasklet, sd_finish_multi_rw, host_addr); - tasklet_init(&host->finish_tasklet, sd_finish_request, host_addr); - spin_lock_init(&host->lock); + mutex_init(&host->host_mutex); - pcr->slots[RTSX_SD_CARD].done_transfer = sd_isr_done_transfer; realtek_init_host(host); mmc_add_host(mmc); @@ -1510,8 +1241,6 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); struct rtsx_pcr *pcr; struct mmc_host *mmc; - struct mmc_request *mrq; - unsigned long flags; if (!host) return 0; @@ -1519,33 +1248,22 @@ static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) pcr = host->pcr; pcr->slots[RTSX_SD_CARD].p_dev = NULL; pcr->slots[RTSX_SD_CARD].card_event = NULL; - pcr->slots[RTSX_SD_CARD].done_transfer = NULL; mmc = host->mmc; - mrq = host->mrq; - spin_lock_irqsave(&host->lock, flags); + mutex_lock(&host->host_mutex); if (host->mrq) { dev_dbg(&(pdev->dev), "%s: Controller removed during transfer\n", mmc_hostname(mmc)); - if (mrq->sbc) - mrq->sbc->error = -ENOMEDIUM; - if (mrq->cmd) - mrq->cmd->error = -ENOMEDIUM; - if (mrq->stop) - mrq->stop->error = -ENOMEDIUM; - if (mrq->data) - mrq->data->error = -ENOMEDIUM; + rtsx_pci_complete_unfinished_transfer(pcr); - tasklet_schedule(&host->finish_tasklet); + host->mrq->cmd->error = -ENOMEDIUM; + if (host->mrq->stop) + host->mrq->stop->error = -ENOMEDIUM; + mmc_request_done(mmc, host->mrq); } - spin_unlock_irqrestore(&host->lock, flags); - - del_timer_sync(&host->timer); - tasklet_kill(&host->cmd_tasklet); - tasklet_kill(&host->data_tasklet); - tasklet_kill(&host->finish_tasklet); + mutex_unlock(&host->host_mutex); mmc_remove_host(mmc); host->eject = true; diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 0b2ccb68c0d..4dbfaee9aa9 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -82,8 +82,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, block = blk_rq_pos(req) << 9 >> tr->blkshift; nsect = blk_rq_cur_bytes(req) >> tr->blkshift; - - buf = req->buffer; + buf = bio_data(req->bio); if (req->cmd_type != REQ_TYPE_FS) return -EIO; diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 4615d79fc93..b922c8efcf4 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -523,6 +523,7 @@ static struct nand_ecclayout hwecc4_2048 = { #if defined(CONFIG_OF) static const struct of_device_id davinci_nand_of_match[] = { {.compatible = "ti,davinci-nand", }, + {.compatible = "ti,keystone-nand", }, {}, }; MODULE_DEVICE_TABLE(of, davinci_nand_of_match); @@ -581,6 +582,11 @@ static struct davinci_nand_pdata of_property_read_bool(pdev->dev.of_node, "ti,davinci-nand-use-bbt")) pdata->bbt_options = NAND_BBT_USE_FLASH; + + if (of_device_is_compatible(pdev->dev.of_node, + "ti,keystone-nand")) { + pdata->options |= NAND_NO_SUBPAGE_WRITE; + } } return dev_get_platdata(&pdev->dev); diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c index 7ff473c871a..20a667c95da 100644 --- a/drivers/mtd/ubi/block.c +++ b/drivers/mtd/ubi/block.c @@ -253,7 +253,7 @@ static int do_ubiblock_request(struct ubiblock *dev, struct request *req) * flash access anyway. */ mutex_lock(&dev->dev_mutex); - ret = ubiblock_read(dev, req->buffer, sec, len); + ret = ubiblock_read(dev, bio_data(req->bio), sec, len); mutex_unlock(&dev->dev_mutex); return ret; @@ -431,7 +431,7 @@ int ubiblock_create(struct ubi_volume_info *vi) * Create one workqueue per volume (per registered block device). * Rembember workqueues are cheap, they're not threads. */ - dev->wq = alloc_workqueue(gd->disk_name, 0, 0); + dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); if (!dev->wq) goto out_free_queue; INIT_WORK(&dev->work, ubiblock_do_work); diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 02317c1c023..0f3425dac91 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -671,6 +671,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); self_check_in_wl_tree(ubi, e, &ubi->free); + ubi->free_count--; + ubi_assert(ubi->free_count >= 0); rb_erase(&e->u.rb, &ubi->free); return e; @@ -684,6 +686,9 @@ int ubi_wl_get_peb(struct ubi_device *ubi) peb = __wl_get_peb(ubi); spin_unlock(&ubi->wl_lock); + if (peb < 0) + return peb; + err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, ubi->peb_size - ubi->vid_hdr_aloffset); if (err) { @@ -1068,6 +1073,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, /* Give the unused PEB back */ wl_tree_add(e2, &ubi->free); + ubi->free_count++; goto out_cancel; } self_check_in_wl_tree(ubi, e1, &ubi->used); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 9f69e818b00..93580a47cc5 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) } /* Forward declaration */ -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); +static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], + bool strict_match); static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); static void rlb_src_unlink(struct bonding *bond, u32 index); static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, @@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) bond->alb_info.rlb_promisc_timeout_counter = 0; - alb_send_learning_packets(bond->curr_active_slave, addr); + alb_send_learning_packets(bond->curr_active_slave, addr, true); } /* slave being removed should not be active at this point @@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) /*********************** tlb/rlb shared functions *********************/ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], - u16 vid) + __be16 vlan_proto, u16 vid) { struct learning_pkt pkt; struct sk_buff *skb; @@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], skb->dev = slave->dev; if (vid) { - skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); + skb = vlan_put_tag(skb, vlan_proto, vid); if (!skb) { pr_err("%s: Error: failed to insert VLAN tag\n", slave->bond->dev->name); @@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], dev_queue_xmit(skb); } - -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) +static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], + bool strict_match) { struct bonding *bond = bond_get_bond_by_slave(slave); struct net_device *upper; struct list_head *iter; /* send untagged */ - alb_send_lp_vid(slave, mac_addr, 0); + alb_send_lp_vid(slave, mac_addr, 0, 0); /* loop through vlans and send one packet for each */ rcu_read_lock(); netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { - if (upper->priv_flags & IFF_802_1Q_VLAN) - alb_send_lp_vid(slave, mac_addr, - vlan_dev_vlan_id(upper)); + if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { + if (strict_match && + ether_addr_equal_64bits(mac_addr, + upper->dev_addr)) { + alb_send_lp_vid(slave, mac_addr, + vlan_dev_vlan_proto(upper), + vlan_dev_vlan_id(upper)); + } else if (!strict_match) { + alb_send_lp_vid(slave, upper->dev_addr, + vlan_dev_vlan_proto(upper), + vlan_dev_vlan_id(upper)); + } + } } rcu_read_unlock(); } @@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, /* fasten the change in the switch */ if (SLAVE_IS_OK(slave1)) { - alb_send_learning_packets(slave1, slave1->dev->dev_addr); + alb_send_learning_packets(slave1, slave1->dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address * has changed @@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, } if (SLAVE_IS_OK(slave2)) { - alb_send_learning_packets(slave2, slave2->dev->dev_addr); + alb_send_learning_packets(slave2, slave2->dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address * has changed @@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work) /* send learning packets */ if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { + bool strict_match; + /* change of curr_active_slave involves swapping of mac addresses. * in order to avoid this swapping from happening while * sending the learning packets, the curr_slave_lock must be held for @@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work) */ read_lock(&bond->curr_slave_lock); - bond_for_each_slave_rcu(bond, slave, iter) - alb_send_learning_packets(slave, slave->dev->dev_addr); + bond_for_each_slave_rcu(bond, slave, iter) { + /* If updating current_active, use all currently + * user mac addreses (!strict_match). Otherwise, only + * use mac of the slave device. + */ + strict_match = (slave != bond->curr_active_slave); + alb_send_learning_packets(slave, slave->dev->dev_addr, + strict_match); + } read_unlock(&bond->curr_slave_lock); @@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave } else { /* set the new_slave to the bond mac address */ alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); - alb_send_learning_packets(new_slave, bond->dev->dev_addr); + alb_send_learning_packets(new_slave, bond->dev->dev_addr, + false); } write_lock_bh(&bond->curr_slave_lock); @@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); read_lock(&bond->lock); - alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); + alb_send_learning_packets(bond->curr_active_slave, + bond_dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform clients mac address has changed */ rlb_req_update_slave_clients(bond, bond->curr_active_slave); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 69aff72c895..d3a67896d43 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip) */ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, - struct bond_vlan_tag *inner, - struct bond_vlan_tag *outer) + struct bond_vlan_tag *tags) { struct sk_buff *skb; + int i; pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", arp_op, slave_dev->name, &dest_ip, &src_ip); @@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, net_err_ratelimited("ARP packet allocation failed\n"); return; } - if (outer->vlan_id) { - if (inner->vlan_id) { - pr_debug("inner tag: proto %X vid %X\n", - ntohs(inner->vlan_proto), inner->vlan_id); - skb = __vlan_put_tag(skb, inner->vlan_proto, - inner->vlan_id); - if (!skb) { - net_err_ratelimited("failed to insert inner VLAN tag\n"); - return; - } - } - pr_debug("outer reg: proto %X vid %X\n", - ntohs(outer->vlan_proto), outer->vlan_id); - skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); + /* Go through all the tags backwards and add them to the packet */ + for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) { + if (!tags[i].vlan_id) + continue; + + pr_debug("inner tag: proto %X vid %X\n", + ntohs(tags[i].vlan_proto), tags[i].vlan_id); + skb = __vlan_put_tag(skb, tags[i].vlan_proto, + tags[i].vlan_id); + if (!skb) { + net_err_ratelimited("failed to insert inner VLAN tag\n"); + return; + } + } + /* Set the outer tag */ + if (tags[0].vlan_id) { + pr_debug("outer tag: proto %X vid %X\n", + ntohs(tags[0].vlan_proto), tags[0].vlan_id); + skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id); if (!skb) { net_err_ratelimited("failed to insert outer VLAN tag\n"); return; @@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, arp_xmit(skb); } +/* Validate the device path between the @start_dev and the @end_dev. + * The path is valid if the @end_dev is reachable through device + * stacking. + * When the path is validated, collect any vlan information in the + * path. + */ +static bool bond_verify_device_path(struct net_device *start_dev, + struct net_device *end_dev, + struct bond_vlan_tag *tags) +{ + struct net_device *upper; + struct list_head *iter; + int idx; + + if (start_dev == end_dev) + return true; + + netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { + if (bond_verify_device_path(upper, end_dev, tags)) { + if (is_vlan_dev(upper)) { + idx = vlan_get_encap_level(upper); + if (idx >= BOND_MAX_VLAN_ENCAP) + return false; + + tags[idx].vlan_proto = + vlan_dev_vlan_proto(upper); + tags[idx].vlan_id = vlan_dev_vlan_id(upper); + } + return true; + } + } + + return false; +} static void bond_arp_send_all(struct bonding *bond, struct slave *slave) { - struct net_device *upper, *vlan_upper; - struct list_head *iter, *vlan_iter; struct rtable *rt; - struct bond_vlan_tag inner, outer; + struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP]; __be32 *targets = bond->params.arp_targets, addr; int i; + bool ret; for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { pr_debug("basa: target %pI4\n", &targets[i]); - inner.vlan_proto = 0; - inner.vlan_id = 0; - outer.vlan_proto = 0; - outer.vlan_id = 0; + memset(tags, 0, sizeof(tags)); /* Find out through which dev should the packet go */ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, @@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", bond->dev->name, &targets[i]); - bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); + bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], + 0, tags); continue; } @@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) goto found; rcu_read_lock(); - /* first we search only for vlan devices. for every vlan - * found we verify its upper dev list, searching for the - * rt->dst.dev. If found we save the tag of the vlan and - * proceed to send the packet. - */ - netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper, - vlan_iter) { - if (!is_vlan_dev(vlan_upper)) - continue; - - if (vlan_upper == rt->dst.dev) { - outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); - outer.vlan_id = vlan_dev_vlan_id(vlan_upper); - rcu_read_unlock(); - goto found; - } - netdev_for_each_all_upper_dev_rcu(vlan_upper, upper, - iter) { - if (upper == rt->dst.dev) { - /* If the upper dev is a vlan dev too, - * set the vlan tag to inner tag. - */ - if (is_vlan_dev(upper)) { - inner.vlan_proto = vlan_dev_vlan_proto(upper); - inner.vlan_id = vlan_dev_vlan_id(upper); - } - outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); - outer.vlan_id = vlan_dev_vlan_id(vlan_upper); - rcu_read_unlock(); - goto found; - } - } - } - - /* if the device we're looking for is not on top of any of - * our upper vlans, then just search for any dev that - * matches, and in case it's a vlan - save the id - */ - netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { - if (upper == rt->dst.dev) { - rcu_read_unlock(); - goto found; - } - } + ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags); rcu_read_unlock(); + if (ret) + goto found; + /* Not our device - skip */ pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", bond->dev->name, &targets[i], @@ -2259,7 +2255,7 @@ found: addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); ip_rt_put(rt); bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], - addr, &inner, &outer); + addr, tags); } } diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 724e30fa20b..83207029844 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = { static const struct bond_opt_value bond_intmax_tbl[] = { { "off", 0, BOND_VALFLAG_DEFAULT}, { "maxval", INT_MAX, BOND_VALFLAG_MAX}, + { NULL, -1, 0} }; static const struct bond_opt_value bond_lacp_rate_tbl[] = { diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 0e8b268da0a..5f6babcfc26 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -534,7 +534,7 @@ static ssize_t bonding_show_min_links(struct device *d, { struct bonding *bond = to_bond(d); - return sprintf(buf, "%d\n", bond->params.min_links); + return sprintf(buf, "%u\n", bond->params.min_links); } static ssize_t bonding_store_min_links(struct device *d, diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index b8bdd0acc8f..00bea320e3b 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -36,6 +36,7 @@ #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" +#define BOND_MAX_VLAN_ENCAP 2 #define BOND_MAX_ARP_TARGETS 16 #define BOND_DEFAULT_MIIMON 100 diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index a5c8dcfa835..95e04e2002d 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -60,6 +60,8 @@ #define CONTROL_IE BIT(1) #define CONTROL_INIT BIT(0) +#define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE) + /* test register */ #define TEST_RX BIT(7) #define TEST_TX1 BIT(6) @@ -108,11 +110,14 @@ #define IF_COMM_CONTROL BIT(4) #define IF_COMM_CLR_INT_PND BIT(3) #define IF_COMM_TXRQST BIT(2) +#define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST #define IF_COMM_DATAA BIT(1) #define IF_COMM_DATAB BIT(0) -#define IF_COMM_ALL (IF_COMM_MASK | IF_COMM_ARB | \ - IF_COMM_CONTROL | IF_COMM_TXRQST | \ - IF_COMM_DATAA | IF_COMM_DATAB) + +/* TX buffer setup */ +#define IF_COMM_TX (IF_COMM_ARB | IF_COMM_CONTROL | \ + IF_COMM_TXRQST | \ + IF_COMM_DATAA | IF_COMM_DATAB) /* For the low buffers we clear the interrupt bit, but keep newdat */ #define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \ @@ -120,12 +125,19 @@ IF_COMM_DATAA | IF_COMM_DATAB) /* For the high buffers we clear the interrupt bit and newdat */ -#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_TXRQST) +#define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT) + + +/* Receive setup of message objects */ +#define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL) + +/* Invalidation of message objects */ +#define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL) /* IFx arbitration */ -#define IF_ARB_MSGVAL BIT(15) -#define IF_ARB_MSGXTD BIT(14) -#define IF_ARB_TRANSMIT BIT(13) +#define IF_ARB_MSGVAL BIT(31) +#define IF_ARB_MSGXTD BIT(30) +#define IF_ARB_TRANSMIT BIT(29) /* IFx message control */ #define IF_MCONT_NEWDAT BIT(15) @@ -139,19 +151,17 @@ #define IF_MCONT_EOB BIT(7) #define IF_MCONT_DLC_MASK 0xf +#define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK) +#define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB) + +#define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB) + /* * Use IF1 for RX and IF2 for TX */ #define IF_RX 0 #define IF_TX 1 -/* status interrupt */ -#define STATUS_INTERRUPT 0x8000 - -/* global interrupt masks */ -#define ENABLE_ALL_INTERRUPTS 1 -#define DISABLE_ALL_INTERRUPTS 0 - /* minimum timeout for checking BUSY status */ #define MIN_TIMEOUT_VALUE 6 @@ -171,6 +181,7 @@ enum c_can_lec_type { LEC_BIT0_ERROR, LEC_CRC_ERROR, LEC_UNUSED, + LEC_MASK = LEC_UNUSED, }; /* @@ -226,143 +237,115 @@ static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable) priv->raminit(priv, enable); } -static inline int get_tx_next_msg_obj(const struct c_can_priv *priv) -{ - return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) + - C_CAN_MSG_OBJ_TX_FIRST; -} - -static inline int get_tx_echo_msg_obj(int txecho) -{ - return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST; -} - -static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index) -{ - u32 val = priv->read_reg(priv, index); - val |= ((u32) priv->read_reg(priv, index + 1)) << 16; - return val; -} - -static void c_can_enable_all_interrupts(struct c_can_priv *priv, - int enable) +static void c_can_irq_control(struct c_can_priv *priv, bool enable) { - unsigned int cntrl_save = priv->read_reg(priv, - C_CAN_CTRL_REG); + u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK; if (enable) - cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE); - else - cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE); + ctrl |= CONTROL_IRQMSK; - priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save); + priv->write_reg(priv, C_CAN_CTRL_REG, ctrl); } -static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface) +static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj) { - int count = MIN_TIMEOUT_VALUE; + struct c_can_priv *priv = netdev_priv(dev); + int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface); - while (count && priv->read_reg(priv, - C_CAN_IFACE(COMREQ_REG, iface)) & - IF_COMR_BUSY) { - count--; + priv->write_reg(priv, reg + 1, cmd); + priv->write_reg(priv, reg, obj); + + for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) { + if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY)) + return; udelay(1); } + netdev_err(dev, "Updating object timed out\n"); - if (!count) - return 1; +} - return 0; +static inline void c_can_object_get(struct net_device *dev, int iface, + u32 obj, u32 cmd) +{ + c_can_obj_update(dev, iface, cmd, obj); } -static inline void c_can_object_get(struct net_device *dev, - int iface, int objno, int mask) +static inline void c_can_object_put(struct net_device *dev, int iface, + u32 obj, u32 cmd) { - struct c_can_priv *priv = netdev_priv(dev); + c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj); +} - /* - * As per specs, after writting the message object number in the - * IF command request register the transfer b/w interface - * register and message RAM must be complete in 6 CAN-CLK - * period. - */ - priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface), - IFX_WRITE_LOW_16BIT(mask)); - priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface), - IFX_WRITE_LOW_16BIT(objno)); +/* + * Note: According to documentation clearing TXIE while MSGVAL is set + * is not allowed, but works nicely on C/DCAN. And that lowers the I/O + * load significantly. + */ +static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj) +{ + struct c_can_priv *priv = netdev_priv(dev); - if (c_can_msg_obj_is_busy(priv, iface)) - netdev_err(dev, "timed out in object get\n"); + priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0); + c_can_object_put(dev, iface, obj, IF_COMM_INVAL); } -static inline void c_can_object_put(struct net_device *dev, - int iface, int objno, int mask) +static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj) { struct c_can_priv *priv = netdev_priv(dev); - /* - * As per specs, after writting the message object number in the - * IF command request register the transfer b/w interface - * register and message RAM must be complete in 6 CAN-CLK - * period. - */ - priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface), - (IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask))); - priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface), - IFX_WRITE_LOW_16BIT(objno)); - - if (c_can_msg_obj_is_busy(priv, iface)) - netdev_err(dev, "timed out in object put\n"); + priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0); + priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0); + c_can_inval_tx_object(dev, iface, obj); } -static void c_can_write_msg_object(struct net_device *dev, - int iface, struct can_frame *frame, int objno) +static void c_can_setup_tx_object(struct net_device *dev, int iface, + struct can_frame *frame, int idx) { - int i; - u16 flags = 0; - unsigned int id; struct c_can_priv *priv = netdev_priv(dev); - - if (!(frame->can_id & CAN_RTR_FLAG)) - flags |= IF_ARB_TRANSMIT; + u16 ctrl = IF_MCONT_TX | frame->can_dlc; + bool rtr = frame->can_id & CAN_RTR_FLAG; + u32 arb = IF_ARB_MSGVAL; + int i; if (frame->can_id & CAN_EFF_FLAG) { - id = frame->can_id & CAN_EFF_MASK; - flags |= IF_ARB_MSGXTD; - } else - id = ((frame->can_id & CAN_SFF_MASK) << 18); + arb |= frame->can_id & CAN_EFF_MASK; + arb |= IF_ARB_MSGXTD; + } else { + arb |= (frame->can_id & CAN_SFF_MASK) << 18; + } + + if (!rtr) + arb |= IF_ARB_TRANSMIT; + + /* + * If we change the DIR bit, we need to invalidate the buffer + * first, i.e. clear the MSGVAL flag in the arbiter. + */ + if (rtr != (bool)test_bit(idx, &priv->tx_dir)) { + u32 obj = idx + C_CAN_MSG_OBJ_TX_FIRST; + + c_can_inval_msg_object(dev, iface, obj); + change_bit(idx, &priv->tx_dir); + } - flags |= IF_ARB_MSGVAL; + priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), arb); + priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), arb >> 16); - priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), - IFX_WRITE_LOW_16BIT(id)); - priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags | - IFX_WRITE_HIGH_16BIT(id)); + priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); for (i = 0; i < frame->can_dlc; i += 2) { priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, frame->data[i] | (frame->data[i + 1] << 8)); } - - /* enable interrupt for this message object */ - priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), - IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB | - frame->can_dlc); - c_can_object_put(dev, iface, objno, IF_COMM_ALL); } static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev, - int iface, - int ctrl_mask) + int iface) { int i; - struct c_can_priv *priv = netdev_priv(dev); - for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) { - priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), - ctrl_mask & ~IF_MCONT_NEWDAT); - c_can_object_put(dev, iface, i, IF_COMM_CONTROL); - } + for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) + c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT); } static int c_can_handle_lost_msg_obj(struct net_device *dev, @@ -377,6 +360,9 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev, priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); c_can_object_put(dev, iface, objno, IF_COMM_CONTROL); + stats->rx_errors++; + stats->rx_over_errors++; + /* create an error msg */ skb = alloc_can_err_skb(dev, &frame); if (unlikely(!skb)) @@ -384,22 +370,18 @@ static int c_can_handle_lost_msg_obj(struct net_device *dev, frame->can_id |= CAN_ERR_CRTL; frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - stats->rx_errors++; - stats->rx_over_errors++; netif_receive_skb(skb); return 1; } -static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl) +static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) { - u16 flags, data; - int i; - unsigned int val; - struct c_can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; - struct sk_buff *skb; + struct c_can_priv *priv = netdev_priv(dev); struct can_frame *frame; + struct sk_buff *skb; + u32 arb, data; skb = alloc_can_skb(dev, &frame); if (!skb) { @@ -409,115 +391,82 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl) frame->can_dlc = get_can_dlc(ctrl & 0x0F); - flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)); - val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) | - (flags << 16); + arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)); + arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16; - if (flags & IF_ARB_MSGXTD) - frame->can_id = (val & CAN_EFF_MASK) | CAN_EFF_FLAG; + if (arb & IF_ARB_MSGXTD) + frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG; else - frame->can_id = (val >> 18) & CAN_SFF_MASK; + frame->can_id = (arb >> 18) & CAN_SFF_MASK; - if (flags & IF_ARB_TRANSMIT) + if (arb & IF_ARB_TRANSMIT) { frame->can_id |= CAN_RTR_FLAG; - else { - for (i = 0; i < frame->can_dlc; i += 2) { - data = priv->read_reg(priv, - C_CAN_IFACE(DATA1_REG, iface) + i / 2); + } else { + int i, dreg = C_CAN_IFACE(DATA1_REG, iface); + + for (i = 0; i < frame->can_dlc; i += 2, dreg ++) { + data = priv->read_reg(priv, dreg); frame->data[i] = data; frame->data[i + 1] = data >> 8; } } - netif_receive_skb(skb); - stats->rx_packets++; stats->rx_bytes += frame->can_dlc; + + netif_receive_skb(skb); return 0; } static void c_can_setup_receive_object(struct net_device *dev, int iface, - int objno, unsigned int mask, - unsigned int id, unsigned int mcont) + u32 obj, u32 mask, u32 id, u32 mcont) { struct c_can_priv *priv = netdev_priv(dev); - priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), - IFX_WRITE_LOW_16BIT(mask)); - - /* According to C_CAN documentation, the reserved bit - * in IFx_MASK2 register is fixed 1 - */ - priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), - IFX_WRITE_HIGH_16BIT(mask) | BIT(13)); + mask |= BIT(29); + priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), mask); + priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), mask >> 16); - priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), - IFX_WRITE_LOW_16BIT(id)); - priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), - (IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id))); + id |= IF_ARB_MSGVAL; + priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), id); + priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), id >> 16); priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont); - c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST); - - netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, - c_can_read_reg32(priv, C_CAN_MSGVAL1_REG)); -} - -static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno) -{ - struct c_can_priv *priv = netdev_priv(dev); - - priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0); - priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0); - priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0); - - c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL); - - netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno, - c_can_read_reg32(priv, C_CAN_MSGVAL1_REG)); -} - -static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno) -{ - int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG); - - /* - * as transmission request register's bit n-1 corresponds to - * message object n, we need to handle the same properly. - */ - if (val & (1 << (objno - 1))) - return 1; - - return 0; + c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP); } static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, - struct net_device *dev) + struct net_device *dev) { - u32 msg_obj_no; - struct c_can_priv *priv = netdev_priv(dev); struct can_frame *frame = (struct can_frame *)skb->data; + struct c_can_priv *priv = netdev_priv(dev); + u32 idx, obj; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; - - spin_lock_bh(&priv->xmit_lock); - msg_obj_no = get_tx_next_msg_obj(priv); - - /* prepare message object for transmission */ - c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no); - priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc; - can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); - /* - * we have to stop the queue in case of a wrap around or - * if the next TX message object is still in use + * This is not a FIFO. C/D_CAN sends out the buffers + * prioritized. The lowest buffer number wins. */ - priv->tx_next++; - if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) || - (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0) + idx = fls(atomic_read(&priv->tx_active)); + obj = idx + C_CAN_MSG_OBJ_TX_FIRST; + + /* If this is the last buffer, stop the xmit queue */ + if (idx == C_CAN_MSG_OBJ_TX_NUM - 1) netif_stop_queue(dev); - spin_unlock_bh(&priv->xmit_lock); + /* + * Store the message in the interface so we can call + * can_put_echo_skb(). We must do this before we enable + * transmit as we might race against do_tx(). + */ + c_can_setup_tx_object(dev, IF_TX, frame, idx); + priv->dlc[idx] = frame->can_dlc; + can_put_echo_skb(skb, dev, idx); + + /* Update the active bits */ + atomic_add((1 << idx), &priv->tx_active); + /* Start transmission */ + c_can_object_put(dev, IF_TX, obj, IF_COMM_TX); return NETDEV_TX_OK; } @@ -594,11 +543,10 @@ static void c_can_configure_msg_objects(struct net_device *dev) /* setup receive message objects */ for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++) - c_can_setup_receive_object(dev, IF_RX, i, 0, 0, - (IF_MCONT_RXIE | IF_MCONT_UMASK) & ~IF_MCONT_EOB); + c_can_setup_receive_object(dev, IF_RX, i, 0, 0, IF_MCONT_RCV); c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0, - IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK); + IF_MCONT_RCV_EOB); } /* @@ -612,30 +560,22 @@ static int c_can_chip_config(struct net_device *dev) struct c_can_priv *priv = netdev_priv(dev); /* enable automatic retransmission */ - priv->write_reg(priv, C_CAN_CTRL_REG, - CONTROL_ENABLE_AR); + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) && (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) { /* loopback + silent mode : useful for hot self-test */ - priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | - CONTROL_SIE | CONTROL_IE | CONTROL_TEST); - priv->write_reg(priv, C_CAN_TEST_REG, - TEST_LBACK | TEST_SILENT); + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST); + priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT); } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { /* loopback mode : useful for self-test function */ - priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | - CONTROL_SIE | CONTROL_IE | CONTROL_TEST); + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST); priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK); } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { /* silent mode : bus-monitoring mode */ - priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE | - CONTROL_SIE | CONTROL_IE | CONTROL_TEST); + priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST); priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT); - } else - /* normal mode*/ - priv->write_reg(priv, C_CAN_CTRL_REG, - CONTROL_EIE | CONTROL_SIE | CONTROL_IE); + } /* configure message objects */ c_can_configure_msg_objects(dev); @@ -643,6 +583,11 @@ static int c_can_chip_config(struct net_device *dev) /* set a `lec` value so that we can check for updates later */ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); + /* Clear all internal status */ + atomic_set(&priv->tx_active, 0); + priv->rxmasked = 0; + priv->tx_dir = 0; + /* set bittiming params */ return c_can_set_bittiming(dev); } @@ -657,13 +602,11 @@ static int c_can_start(struct net_device *dev) if (err) return err; - priv->can.state = CAN_STATE_ERROR_ACTIVE; - - /* reset tx helper pointers */ - priv->tx_next = priv->tx_echo = 0; + /* Setup the command for new messages */ + priv->comm_rcv_high = priv->type != BOSCH_D_CAN ? + IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH; - /* enable status change, error and module interrupts */ - c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS); + priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; } @@ -672,15 +615,13 @@ static void c_can_stop(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); - /* disable all interrupts */ - c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); - - /* set the state as STOPPED */ + c_can_irq_control(priv, false); priv->can.state = CAN_STATE_STOPPED; } static int c_can_set_mode(struct net_device *dev, enum can_mode mode) { + struct c_can_priv *priv = netdev_priv(dev); int err; switch (mode) { @@ -689,6 +630,7 @@ static int c_can_set_mode(struct net_device *dev, enum can_mode mode) if (err) return err; netif_wake_queue(dev); + c_can_irq_control(priv, true); break; default: return -EOPNOTSUPP; @@ -724,42 +666,29 @@ static int c_can_get_berr_counter(const struct net_device *dev, return err; } -/* - * priv->tx_echo holds the number of the oldest can_frame put for - * transmission into the hardware, but not yet ACKed by the CAN tx - * complete IRQ. - * - * We iterate from priv->tx_echo to priv->tx_next and check if the - * packet has been transmitted, echo it back to the CAN framework. - * If we discover a not yet transmitted packet, stop looking for more. - */ static void c_can_do_tx(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; - u32 val, obj, pkts = 0, bytes = 0; - - spin_lock_bh(&priv->xmit_lock); - - for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { - obj = get_tx_echo_msg_obj(priv->tx_echo); - val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG); + u32 idx, obj, pkts = 0, bytes = 0, pend, clr; - if (val & (1 << (obj - 1))) - break; + clr = pend = priv->read_reg(priv, C_CAN_INTPND2_REG); - can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST); - bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST]; + while ((idx = ffs(pend))) { + idx--; + pend &= ~(1 << idx); + obj = idx + C_CAN_MSG_OBJ_TX_FIRST; + c_can_inval_tx_object(dev, IF_RX, obj); + can_get_echo_skb(dev, idx); + bytes += priv->dlc[idx]; pkts++; - c_can_inval_msg_object(dev, IF_TX, obj); } - /* restart queue if wrap-up or if queue stalled on last pkt */ - if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) || - ((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0)) - netif_wake_queue(dev); + /* Clear the bits in the tx_active mask */ + atomic_sub(clr, &priv->tx_active); - spin_unlock_bh(&priv->xmit_lock); + if (clr & (1 << (C_CAN_MSG_OBJ_TX_NUM - 1))) + netif_wake_queue(dev); if (pkts) { stats->tx_bytes += bytes; @@ -800,18 +729,28 @@ static u32 c_can_adjust_pending(u32 pend) return pend & ~((1 << lasts) - 1); } +static inline void c_can_rx_object_get(struct net_device *dev, + struct c_can_priv *priv, u32 obj) +{ + c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); +} + +static inline void c_can_rx_finalize(struct net_device *dev, + struct c_can_priv *priv, u32 obj) +{ + if (priv->type != BOSCH_D_CAN) + c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); +} + static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, u32 pend, int quota) { - u32 pkts = 0, ctrl, obj, mcmd; + u32 pkts = 0, ctrl, obj; while ((obj = ffs(pend)) && quota > 0) { pend &= ~BIT(obj - 1); - mcmd = obj < C_CAN_MSG_RX_LOW_LAST ? - IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH; - - c_can_object_get(dev, IF_RX, obj, mcmd); + c_can_rx_object_get(dev, priv, obj); ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX)); if (ctrl & IF_MCONT_MSGLST) { @@ -833,9 +772,7 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, /* read the data from the message object */ c_can_read_msg_object(dev, IF_RX, ctrl); - if (obj == C_CAN_MSG_RX_LOW_LAST) - /* activate all lower message objects */ - c_can_activate_all_lower_rx_msg_obj(dev, IF_RX, ctrl); + c_can_rx_finalize(dev, priv, obj); pkts++; quota--; @@ -844,6 +781,13 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, return pkts; } +static inline u32 c_can_get_pending(struct c_can_priv *priv) +{ + u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); + + return pend; +} + /* * theory of operation: * @@ -853,18 +797,9 @@ static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, * has arrived. To work-around this issue, we keep two groups of message * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. * - * To ensure in-order frame reception we use the following - * approach while re-activating a message object to receive further - * frames: - * - if the current message object number is lower than - * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing - * the INTPND bit. - * - if the current message object number is equal to - * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower - * receive message objects. - * - if the current message object number is greater than - * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of - * only this message object. + * We clear the newdat bit right away. + * + * This can result in packet reordering when the readout is slow. */ static int c_can_do_rx_poll(struct net_device *dev, int quota) { @@ -880,7 +815,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota) while (quota > 0) { if (!pend) { - pend = priv->read_reg(priv, C_CAN_INTPND1_REG); + pend = c_can_get_pending(priv); if (!pend) break; /* @@ -905,12 +840,6 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota) return pkts; } -static inline int c_can_has_and_handle_berr(struct c_can_priv *priv) -{ - return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && - (priv->current_status & LEC_UNUSED); -} - static int c_can_handle_state_change(struct net_device *dev, enum c_can_bus_error_types error_type) { @@ -922,6 +851,26 @@ static int c_can_handle_state_change(struct net_device *dev, struct sk_buff *skb; struct can_berr_counter bec; + switch (error_type) { + case C_CAN_ERROR_WARNING: + /* error warning state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_WARNING; + break; + case C_CAN_ERROR_PASSIVE: + /* error passive state */ + priv->can.can_stats.error_passive++; + priv->can.state = CAN_STATE_ERROR_PASSIVE; + break; + case C_CAN_BUS_OFF: + /* bus-off state */ + priv->can.state = CAN_STATE_BUS_OFF; + can_bus_off(dev); + break; + default: + break; + } + /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) @@ -935,8 +884,6 @@ static int c_can_handle_state_change(struct net_device *dev, switch (error_type) { case C_CAN_ERROR_WARNING: /* error warning state */ - priv->can.can_stats.error_warning++; - priv->can.state = CAN_STATE_ERROR_WARNING; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : @@ -947,8 +894,6 @@ static int c_can_handle_state_change(struct net_device *dev, break; case C_CAN_ERROR_PASSIVE: /* error passive state */ - priv->can.can_stats.error_passive++; - priv->can.state = CAN_STATE_ERROR_PASSIVE; cf->can_id |= CAN_ERR_CRTL; if (rx_err_passive) cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; @@ -960,22 +905,16 @@ static int c_can_handle_state_change(struct net_device *dev, break; case C_CAN_BUS_OFF: /* bus-off state */ - priv->can.state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; - /* - * disable all interrupts in bus-off mode to ensure that - * the CPU is not hogged down - */ - c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); can_bus_off(dev); break; default: break; } - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); return 1; } @@ -996,6 +935,13 @@ static int c_can_handle_bus_err(struct net_device *dev, if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR) return 0; + if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) + return 0; + + /* common for all type of bus errors */ + priv->can.can_stats.bus_error++; + stats->rx_errors++; + /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) @@ -1005,10 +951,6 @@ static int c_can_handle_bus_err(struct net_device *dev, * check for 'last error code' which tells us the * type of the last error to occur on the CAN bus */ - - /* common for all type of bus errors */ - priv->can.can_stats.bus_error++; - stats->rx_errors++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_UNSPEC; @@ -1043,95 +985,64 @@ static int c_can_handle_bus_err(struct net_device *dev, break; } - /* set a `lec` value so that we can check for updates later */ - priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); - - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; - + netif_receive_skb(skb); return 1; } static int c_can_poll(struct napi_struct *napi, int quota) { - u16 irqstatus; - int lec_type = 0; - int work_done = 0; struct net_device *dev = napi->dev; struct c_can_priv *priv = netdev_priv(dev); + u16 curr, last = priv->last_status; + int work_done = 0; - irqstatus = priv->irqstatus; - if (!irqstatus) - goto end; + priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); + /* Ack status on C_CAN. D_CAN is self clearing */ + if (priv->type != BOSCH_D_CAN) + priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); - /* status events have the highest priority */ - if (irqstatus == STATUS_INTERRUPT) { - priv->current_status = priv->read_reg(priv, - C_CAN_STS_REG); - - /* handle Tx/Rx events */ - if (priv->current_status & STATUS_TXOK) - priv->write_reg(priv, C_CAN_STS_REG, - priv->current_status & ~STATUS_TXOK); - - if (priv->current_status & STATUS_RXOK) - priv->write_reg(priv, C_CAN_STS_REG, - priv->current_status & ~STATUS_RXOK); - - /* handle state changes */ - if ((priv->current_status & STATUS_EWARN) && - (!(priv->last_status & STATUS_EWARN))) { - netdev_dbg(dev, "entered error warning state\n"); - work_done += c_can_handle_state_change(dev, - C_CAN_ERROR_WARNING); - } - if ((priv->current_status & STATUS_EPASS) && - (!(priv->last_status & STATUS_EPASS))) { - netdev_dbg(dev, "entered error passive state\n"); - work_done += c_can_handle_state_change(dev, - C_CAN_ERROR_PASSIVE); - } - if ((priv->current_status & STATUS_BOFF) && - (!(priv->last_status & STATUS_BOFF))) { - netdev_dbg(dev, "entered bus off state\n"); - work_done += c_can_handle_state_change(dev, - C_CAN_BUS_OFF); - } + /* handle state changes */ + if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { + netdev_dbg(dev, "entered error warning state\n"); + work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING); + } - /* handle bus recovery events */ - if ((!(priv->current_status & STATUS_BOFF)) && - (priv->last_status & STATUS_BOFF)) { - netdev_dbg(dev, "left bus off state\n"); - priv->can.state = CAN_STATE_ERROR_ACTIVE; - } - if ((!(priv->current_status & STATUS_EPASS)) && - (priv->last_status & STATUS_EPASS)) { - netdev_dbg(dev, "left error passive state\n"); - priv->can.state = CAN_STATE_ERROR_ACTIVE; - } + if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) { + netdev_dbg(dev, "entered error passive state\n"); + work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE); + } - priv->last_status = priv->current_status; - - /* handle lec errors on the bus */ - lec_type = c_can_has_and_handle_berr(priv); - if (lec_type) - work_done += c_can_handle_bus_err(dev, lec_type); - } else if ((irqstatus >= C_CAN_MSG_OBJ_RX_FIRST) && - (irqstatus <= C_CAN_MSG_OBJ_RX_LAST)) { - /* handle events corresponding to receive message objects */ - work_done += c_can_do_rx_poll(dev, (quota - work_done)); - } else if ((irqstatus >= C_CAN_MSG_OBJ_TX_FIRST) && - (irqstatus <= C_CAN_MSG_OBJ_TX_LAST)) { - /* handle events corresponding to transmit message objects */ - c_can_do_tx(dev); + if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) { + netdev_dbg(dev, "entered bus off state\n"); + work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF); + goto end; } + /* handle bus recovery events */ + if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) { + netdev_dbg(dev, "left bus off state\n"); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } + if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) { + netdev_dbg(dev, "left error passive state\n"); + priv->can.state = CAN_STATE_ERROR_ACTIVE; + } + + /* handle lec errors on the bus */ + work_done += c_can_handle_bus_err(dev, curr & LEC_MASK); + + /* Handle Tx/Rx events. We do this unconditionally */ + work_done += c_can_do_rx_poll(dev, (quota - work_done)); + c_can_do_tx(dev); + end: if (work_done < quota) { napi_complete(napi); - /* enable all IRQs */ - c_can_enable_all_interrupts(priv, ENABLE_ALL_INTERRUPTS); + /* enable all IRQs if we are not in bus off state */ + if (priv->can.state != CAN_STATE_BUS_OFF) + c_can_irq_control(priv, true); } return work_done; @@ -1142,12 +1053,11 @@ static irqreturn_t c_can_isr(int irq, void *dev_id) struct net_device *dev = (struct net_device *)dev_id; struct c_can_priv *priv = netdev_priv(dev); - priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG); - if (!priv->irqstatus) + if (!priv->read_reg(priv, C_CAN_INT_REG)) return IRQ_NONE; /* disable all interrupts and schedule the NAPI */ - c_can_enable_all_interrupts(priv, DISABLE_ALL_INTERRUPTS); + c_can_irq_control(priv, false); napi_schedule(&priv->napi); return IRQ_HANDLED; @@ -1184,6 +1094,8 @@ static int c_can_open(struct net_device *dev) can_led_event(dev, CAN_LED_EVENT_OPEN); napi_enable(&priv->napi); + /* enable status change, error and module interrupts */ + c_can_irq_control(priv, true); netif_start_queue(dev); return 0; @@ -1226,7 +1138,6 @@ struct net_device *alloc_c_can_dev(void) return NULL; priv = netdev_priv(dev); - spin_lock_init(&priv->xmit_lock); netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT); priv->dev = dev; @@ -1281,6 +1192,7 @@ int c_can_power_up(struct net_device *dev) u32 val; unsigned long time_out; struct c_can_priv *priv = netdev_priv(dev); + int ret; if (!(dev->flags & IFF_UP)) return 0; @@ -1307,7 +1219,11 @@ int c_can_power_up(struct net_device *dev) if (time_after(jiffies, time_out)) return -ETIMEDOUT; - return c_can_start(dev); + ret = c_can_start(dev); + if (!ret) + c_can_irq_control(priv, true); + + return ret; } EXPORT_SYMBOL_GPL(c_can_power_up); #endif diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index faa8404162b..c56f1b1c11c 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -22,14 +22,6 @@ #ifndef C_CAN_H #define C_CAN_H -/* - * IFx register masks: - * allow easy operation on 16-bit registers when the - * argument is 32-bit instead - */ -#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF) -#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16) - /* message object split */ #define C_CAN_NO_OF_OBJECTS 32 #define C_CAN_MSG_OBJ_RX_NUM 16 @@ -45,8 +37,6 @@ #define C_CAN_MSG_OBJ_RX_SPLIT 9 #define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1) - -#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1) #define RECEIVE_OBJECT_BITS 0x0000ffff enum reg { @@ -183,23 +173,20 @@ struct c_can_priv { struct napi_struct napi; struct net_device *dev; struct device *device; - spinlock_t xmit_lock; - int tx_object; - int current_status; + atomic_t tx_active; + unsigned long tx_dir; int last_status; u16 (*read_reg) (struct c_can_priv *priv, enum reg index); void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val); void __iomem *base; const u16 *regs; - unsigned long irq_flags; /* for request_irq() */ - unsigned int tx_next; - unsigned int tx_echo; void *priv; /* for board-specific data */ - u16 irqstatus; enum c_can_dev_id type; u32 __iomem *raminit_ctrlreg; - unsigned int instance; + int instance; void (*raminit) (const struct c_can_priv *priv, bool enable); + u32 comm_rcv_high; + u32 rxmasked; u32 dlc[C_CAN_MSG_OBJ_TX_NUM]; }; diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c index bce0be54c2f..fe5f6303b58 100644 --- a/drivers/net/can/c_can/c_can_pci.c +++ b/drivers/net/can/c_can/c_can_pci.c @@ -84,8 +84,11 @@ static int c_can_pci_probe(struct pci_dev *pdev, goto out_disable_device; } - pci_set_master(pdev); - pci_enable_msi(pdev); + ret = pci_enable_msi(pdev); + if (!ret) { + dev_info(&pdev->dev, "MSI enabled\n"); + pci_set_master(pdev); + } addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!addr) { @@ -132,6 +135,8 @@ static int c_can_pci_probe(struct pci_dev *pdev, goto out_free_c_can; } + priv->type = c_can_pci_data->type; + /* Configure access to registers */ switch (c_can_pci_data->reg_align) { case C_CAN_REG_ALIGN_32: diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index 806d9275342..1df0b322d1e 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -222,7 +222,7 @@ static int c_can_plat_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 1); priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0) + if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0) dev_info(&pdev->dev, "control memory is not used for raminit\n"); else priv->raminit = c_can_hw_raminit; diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index c7a26047874..e318e87e2bf 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -256,7 +256,7 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt, /* Check if the CAN device has bit-timing parameters */ if (!btc) - return -ENOTSUPP; + return -EOPNOTSUPP; /* * Depending on the given can_bittiming parameter structure the CAN diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index c540e3d12e3..564933ae218 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct peak_pci_chan *chan; - struct net_device *dev; + struct net_device *dev, *prev_dev; void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; @@ -688,11 +688,13 @@ failure_remove_channels: writew(0x0, cfg_base + PITA_ICR + 2); chan = NULL; - for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { - unregister_sja1000dev(dev); - free_sja1000dev(dev); + for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) { priv = netdev_priv(dev); chan = priv->priv; + prev_dev = chan->prev_dev; + + unregister_sja1000dev(dev); + free_sja1000dev(dev); } /* free any PCIeC resources too */ @@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev) /* Loop over all registered devices */ while (1) { + struct net_device *prev_dev = chan->prev_dev; + dev_info(&pdev->dev, "removing device %s\n", dev->name); unregister_sja1000dev(dev); free_sja1000dev(dev); - dev = chan->prev_dev; + dev = prev_dev; if (!dev) { /* do that only for first channel */ diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c index df136a2516c..014695d7e6a 100644 --- a/drivers/net/can/sja1000/sja1000_isa.c +++ b/drivers/net/can/sja1000/sja1000_isa.c @@ -46,6 +46,7 @@ static int clk[MAXDEV]; static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; +static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */ module_param_array(port, ulong, NULL, S_IRUGO); MODULE_PARM_DESC(port, "I/O port number"); @@ -101,19 +102,26 @@ static void sja1000_isa_port_write_reg(const struct sja1000_priv *priv, static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv, int reg) { - unsigned long base = (unsigned long)priv->reg_base; + unsigned long flags, base = (unsigned long)priv->reg_base; + u8 readval; + spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags); outb(reg, base); - return inb(base + 1); + readval = inb(base + 1); + spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags); + + return readval; } static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv, int reg, u8 val) { - unsigned long base = (unsigned long)priv->reg_base; + unsigned long flags, base = (unsigned long)priv->reg_base; + spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags); outb(reg, base); outb(val, base + 1); + spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags); } static int sja1000_isa_probe(struct platform_device *pdev) @@ -169,6 +177,7 @@ static int sja1000_isa_probe(struct platform_device *pdev) if (iosize == SJA1000_IOSIZE_INDIRECT) { priv->read_reg = sja1000_isa_port_read_reg_indirect; priv->write_reg = sja1000_isa_port_write_reg_indirect; + spin_lock_init(&indirect_lock[idx]); } else { priv->read_reg = sja1000_isa_port_read_reg; priv->write_reg = sja1000_isa_port_write_reg; @@ -198,6 +207,7 @@ static int sja1000_isa_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); + dev->dev_id = idx; err = register_sja1000dev(dev); if (err) { diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index f5b16e0e3a1..dcf9196f631 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -322,13 +322,13 @@ static void slcan_write_wakeup(struct tty_struct *tty) if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) return; - spin_lock(&sl->lock); + spin_lock_bh(&sl->lock); if (sl->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sl->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); - spin_unlock(&sl->lock); + spin_unlock_bh(&sl->lock); netif_wake_queue(sl->dev); return; } @@ -336,7 +336,7 @@ static void slcan_write_wakeup(struct tty_struct *tty) actual = tty->ops->write(tty, sl->xhead, sl->xleft); sl->xleft -= actual; sl->xhead += actual; - spin_unlock(&sl->lock); + spin_unlock_bh(&sl->lock); } /* Send a can_frame to a TTY queue. */ diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 39b26fe28d1..d7401017a3f 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/cirrus/Kconfig" source "drivers/net/ethernet/cisco/Kconfig" + +config CX_ECAT + tristate "Beckhoff CX5020 EtherCAT master support" + depends on PCI + ---help--- + Driver for EtherCAT master module located on CCAT FPGA + that can be found on Beckhoff CX5020, and possibly other of CX + Beckhoff CX series industrial PCs. + + To compile this driver as a module, choose M here. The module + will be called ec_bhf. + source "drivers/net/ethernet/davicom/Kconfig" config DNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 545d0b3b9cb..35190e36c45 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ +obj-$(CONFIG_CX_ECAT) += ec_bhf.o obj-$(CONFIG_DM9000) += davicom/ obj-$(CONFIG_DNET) += dnet.o obj-$(CONFIG_NET_VENDOR_DEC) += dec/ diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig index 80c1ab74a4b..fdddba51473 100644 --- a/drivers/net/ethernet/altera/Kconfig +++ b/drivers/net/ethernet/altera/Kconfig @@ -1,5 +1,6 @@ config ALTERA_TSE tristate "Altera Triple-Speed Ethernet MAC support" + depends on HAS_DMA select PHYLIB ---help--- This driver supports the Altera Triple-Speed (TSE) Ethernet MAC. diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile index d4a187e4536..3eff2fd3997 100644 --- a/drivers/net/ethernet/altera/Makefile +++ b/drivers/net/ethernet/altera/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_ALTERA_TSE) += altera_tse.o altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ altera_msgdma.o altera_sgdma.o altera_utils.o +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c index 3df18669ea3..0fb986ba329 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.c +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -18,6 +18,7 @@ #include "altera_utils.h" #include "altera_tse.h" #include "altera_msgdmahw.h" +#include "altera_msgdma.h" /* No initialization work to do for MSGDMA */ int msgdma_initialize(struct altera_tse_private *priv) @@ -29,21 +30,23 @@ void msgdma_uninitialize(struct altera_tse_private *priv) { } +void msgdma_start_rxdma(struct altera_tse_private *priv) +{ +} + void msgdma_reset(struct altera_tse_private *priv) { int counter; - struct msgdma_csr *txcsr = - (struct msgdma_csr *)priv->tx_dma_csr; - struct msgdma_csr *rxcsr = - (struct msgdma_csr *)priv->rx_dma_csr; /* Reset Rx mSGDMA */ - iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); - iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, + msgdma_csroffs(status)); + csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr, + msgdma_csroffs(control)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(&rxcsr->status, + if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status), MSGDMA_CSR_STAT_RESETTING)) break; udelay(1); @@ -54,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv) "TSE Rx mSGDMA resetting bit never cleared!\n"); /* clear all status bits */ - iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status)); /* Reset Tx mSGDMA */ - iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); - iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, + msgdma_csroffs(status)); + + csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr, + msgdma_csroffs(control)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(&txcsr->status, + if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status), MSGDMA_CSR_STAT_RESETTING)) break; udelay(1); @@ -73,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv) "TSE Tx mSGDMA resetting bit never cleared!\n"); /* clear all status bits */ - iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status)); } void msgdma_disable_rxirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->rx_dma_csr; - tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_enable_rxirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->rx_dma_csr; - tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_disable_txirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->tx_dma_csr; - tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_enable_txirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->tx_dma_csr; - tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_clear_rxirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->rx_dma_csr; - iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status); + csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status)); } void msgdma_clear_txirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->tx_dma_csr; - iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status); + csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status)); } /* return 0 to indicate transmit is pending */ int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) { - struct msgdma_extended_desc *desc = priv->tx_dma_desc; - - iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); - iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); - iowrite32(0, &desc->write_addr_lo); - iowrite32(0, &desc->write_addr_hi); - iowrite32(buffer->len, &desc->len); - iowrite32(0, &desc->burst_seq_num); - iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); - iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); + csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc, + msgdma_descroffs(read_addr_lo)); + csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc, + msgdma_descroffs(read_addr_hi)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi)); + csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num)); + csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc, + msgdma_descroffs(stride)); + csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc, + msgdma_descroffs(control)); return 0; } @@ -133,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) u32 ready = 0; u32 inuse; u32 status; - struct msgdma_csr *txcsr = - (struct msgdma_csr *)priv->tx_dma_csr; /* Get number of sent descriptors */ - inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; + inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level)) + & 0xffff; if (inuse) { /* Tx FIFO is not empty */ ready = priv->tx_prod - priv->tx_cons - inuse - 1; } else { /* Check for buffered last packet */ - status = ioread32(&txcsr->status); + status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); if (status & MSGDMA_CSR_STAT_BUSY) ready = priv->tx_prod - priv->tx_cons - 1; else @@ -154,10 +159,9 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) /* Put buffer to the mSGDMA RX FIFO */ -int msgdma_add_rx_desc(struct altera_tse_private *priv, +void msgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *rxbuffer) { - struct msgdma_extended_desc *desc = priv->rx_dma_desc; u32 len = priv->rx_dma_buf_sz; dma_addr_t dma_addr = rxbuffer->dma_addr; u32 control = (MSGDMA_DESC_CTL_END_ON_EOP @@ -167,15 +171,16 @@ int msgdma_add_rx_desc(struct altera_tse_private *priv, | MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO); - iowrite32(0, &desc->read_addr_lo); - iowrite32(0, &desc->read_addr_hi); - iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); - iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); - iowrite32(len, &desc->len); - iowrite32(0, &desc->burst_seq_num); - iowrite32(0x00010001, &desc->stride); - iowrite32(control, &desc->control); - return 1; + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo)); + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi)); + csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc, + msgdma_descroffs(write_addr_lo)); + csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc, + msgdma_descroffs(write_addr_hi)); + csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len)); + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num)); + csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride)); + csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control)); } /* status is returned on upper 16 bits, @@ -186,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv) u32 rxstatus = 0; u32 pktlength; u32 pktstatus; - struct msgdma_csr *rxcsr = - (struct msgdma_csr *)priv->rx_dma_csr; - struct msgdma_response *rxresp = - (struct msgdma_response *)priv->rx_dma_resp; - - if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { - pktlength = ioread32(&rxresp->bytes_transferred); - pktstatus = ioread32(&rxresp->status); + + if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level)) + & 0xffff) { + pktlength = csrrd32(priv->rx_dma_resp, + msgdma_respoffs(bytes_transferred)); + pktstatus = csrrd32(priv->rx_dma_resp, + msgdma_respoffs(status)); rxstatus = pktstatus; rxstatus = rxstatus << 16; rxstatus |= (pktlength & 0xffff); diff --git a/drivers/net/ethernet/altera/altera_msgdma.h b/drivers/net/ethernet/altera/altera_msgdma.h index 7f0f5bf2bba..42cf61c8105 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.h +++ b/drivers/net/ethernet/altera/altera_msgdma.h @@ -25,10 +25,11 @@ void msgdma_disable_txirq(struct altera_tse_private *); void msgdma_clear_rxirq(struct altera_tse_private *); void msgdma_clear_txirq(struct altera_tse_private *); u32 msgdma_tx_completions(struct altera_tse_private *); -int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); +void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *); int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *); u32 msgdma_rx_status(struct altera_tse_private *); int msgdma_initialize(struct altera_tse_private *); void msgdma_uninitialize(struct altera_tse_private *); +void msgdma_start_rxdma(struct altera_tse_private *); #endif /* __ALTERA_MSGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h index d7b59ba4019..e335626e1b6 100644 --- a/drivers/net/ethernet/altera/altera_msgdmahw.h +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h @@ -17,15 +17,6 @@ #ifndef __ALTERA_MSGDMAHW_H__ #define __ALTERA_MSGDMAHW_H__ -/* mSGDMA standard descriptor format - */ -struct msgdma_desc { - u32 read_addr; /* data buffer source address */ - u32 write_addr; /* data buffer destination address */ - u32 len; /* the number of bytes to transfer per descriptor */ - u32 control; /* characteristics of the transfer */ -}; - /* mSGDMA extended descriptor format */ struct msgdma_extended_desc { @@ -159,6 +150,10 @@ struct msgdma_response { u32 status; }; +#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a)) +#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a)) +#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a)) + /* mSGDMA response register bit definitions */ #define MSGDMA_RESP_EARLY_TERM BIT(8) diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c index 0ee96639ae4..99cc56f451c 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.c +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@ -20,28 +20,28 @@ #include "altera_sgdmahw.h" #include "altera_sgdma.h" -static void sgdma_descrip(struct sgdma_descrip *desc, - struct sgdma_descrip *ndesc, - dma_addr_t ndesc_phys, - dma_addr_t raddr, - dma_addr_t waddr, - u16 length, - int generate_eop, - int rfixed, - int wfixed); +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, + struct sgdma_descrip __iomem *ndesc, + dma_addr_t ndesc_phys, + dma_addr_t raddr, + dma_addr_t waddr, + u16 length, + int generate_eop, + int rfixed, + int wfixed); static int sgdma_async_write(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc); static int sgdma_async_read(struct altera_tse_private *priv); static dma_addr_t sgdma_txphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc); static dma_addr_t sgdma_rxphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc); static int sgdma_txbusy(struct altera_tse_private *priv); @@ -64,18 +64,23 @@ queue_rx_peekhead(struct altera_tse_private *priv); int sgdma_initialize(struct altera_tse_private *priv) { - priv->txctrlreg = SGDMA_CTRLREG_ILASTD; + priv->txctrlreg = SGDMA_CTRLREG_ILASTD | + SGDMA_CTRLREG_INTEN; priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP | + SGDMA_CTRLREG_INTEN | SGDMA_CTRLREG_ILASTD; + priv->sgdmadesclen = sizeof(struct sgdma_descrip); + INIT_LIST_HEAD(&priv->txlisthd); INIT_LIST_HEAD(&priv->rxlisthd); priv->rxdescphys = (dma_addr_t) 0; priv->txdescphys = (dma_addr_t) 0; - priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, + priv->rxdescphys = dma_map_single(priv->device, + (void __force *)priv->rx_dma_desc, priv->rxdescmem, DMA_BIDIRECTIONAL); if (dma_mapping_error(priv->device, priv->rxdescphys)) { @@ -84,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv) return -EINVAL; } - priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, + priv->txdescphys = dma_map_single(priv->device, + (void __force *)priv->tx_dma_desc, priv->txdescmem, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, priv->txdescphys)) { @@ -93,6 +99,16 @@ int sgdma_initialize(struct altera_tse_private *priv) return -EINVAL; } + /* Initialize descriptor memory to all 0's, sync memory to cache */ + memset_io(priv->tx_dma_desc, 0, priv->txdescmem); + memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); + + dma_sync_single_for_device(priv->device, priv->txdescphys, + priv->txdescmem, DMA_TO_DEVICE); + + dma_sync_single_for_device(priv->device, priv->rxdescphys, + priv->rxdescmem, DMA_TO_DEVICE); + return 0; } @@ -112,58 +128,48 @@ void sgdma_uninitialize(struct altera_tse_private *priv) */ void sgdma_reset(struct altera_tse_private *priv) { - u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc; - u32 txdescriplen = priv->txdescmem; - u32 *prxdescripmem = (u32 *)priv->rx_dma_desc; - u32 rxdescriplen = priv->rxdescmem; - struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr; - struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr; - /* Initialize descriptor memory to 0 */ - memset(ptxdescripmem, 0, txdescriplen); - memset(prxdescripmem, 0, rxdescriplen); + memset_io(priv->tx_dma_desc, 0, priv->txdescmem); + memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); - iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); - iowrite32(0, &ptxsgdma->control); + csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control)); + csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); - iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); - iowrite32(0, &prxsgdma->control); + csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control)); + csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); } +/* For SGDMA, interrupts remain enabled after initially enabling, + * so no need to provide implementations for abstract enable + * and disable + */ + void sgdma_enable_rxirq(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - priv->rxctrlreg |= SGDMA_CTRLREG_INTEN; - tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN); } void sgdma_enable_txirq(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; - priv->txctrlreg |= SGDMA_CTRLREG_INTEN; - tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN); } -/* for SGDMA, RX interrupts remain enabled after enabling */ void sgdma_disable_rxirq(struct altera_tse_private *priv) { } -/* for SGDMA, TX interrupts remain enabled after enabling */ void sgdma_disable_txirq(struct altera_tse_private *priv) { } void sgdma_clear_rxirq(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); + tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control), + SGDMA_CTRLREG_CLRINT); } void sgdma_clear_txirq(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; - tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); + tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control), + SGDMA_CTRLREG_CLRINT); } /* transmits buffer through SGDMA. Returns number of buffers @@ -173,28 +179,27 @@ void sgdma_clear_txirq(struct altera_tse_private *priv) */ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) { - int pktstx = 0; - struct sgdma_descrip *descbase = - (struct sgdma_descrip *)priv->tx_dma_desc; + struct sgdma_descrip __iomem *descbase = + (struct sgdma_descrip __iomem *)priv->tx_dma_desc; - struct sgdma_descrip *cdesc = &descbase[0]; - struct sgdma_descrip *ndesc = &descbase[1]; + struct sgdma_descrip __iomem *cdesc = &descbase[0]; + struct sgdma_descrip __iomem *ndesc = &descbase[1]; /* wait 'til the tx sgdma is ready for the next transmit request */ if (sgdma_txbusy(priv)) return 0; - sgdma_descrip(cdesc, /* current descriptor */ - ndesc, /* next descriptor */ - sgdma_txphysaddr(priv, ndesc), - buffer->dma_addr, /* address of packet to xmit */ - 0, /* write addr 0 for tx dma */ - buffer->len, /* length of packet */ - SGDMA_CONTROL_EOP, /* Generate EOP */ - 0, /* read fixed */ - SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ + sgdma_setup_descrip(cdesc, /* current descriptor */ + ndesc, /* next descriptor */ + sgdma_txphysaddr(priv, ndesc), + buffer->dma_addr, /* address of packet to xmit */ + 0, /* write addr 0 for tx dma */ + buffer->len, /* length of packet */ + SGDMA_CONTROL_EOP, /* Generate EOP */ + 0, /* read fixed */ + SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ - pktstx = sgdma_async_write(priv, cdesc); + sgdma_async_write(priv, cdesc); /* enqueue the request to the pending transmit queue */ queue_tx(priv, buffer); @@ -208,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) u32 sgdma_tx_completions(struct altera_tse_private *priv) { u32 ready = 0; - struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc; if (!sgdma_txbusy(priv) && - ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && + ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control)) + & SGDMA_CONTROL_HW_OWNED) == 0) && (dequeue_tx(priv))) { ready = 1; } @@ -219,11 +224,15 @@ u32 sgdma_tx_completions(struct altera_tse_private *priv) return ready; } -int sgdma_add_rx_desc(struct altera_tse_private *priv, - struct tse_buffer *rxbuffer) +void sgdma_start_rxdma(struct altera_tse_private *priv) +{ + sgdma_async_read(priv); +} + +void sgdma_add_rx_desc(struct altera_tse_private *priv, + struct tse_buffer *rxbuffer) { queue_rx(priv, rxbuffer); - return sgdma_async_read(priv); } /* status is returned on upper 16 bits, @@ -231,38 +240,62 @@ int sgdma_add_rx_desc(struct altera_tse_private *priv, */ u32 sgdma_rx_status(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; - struct sgdma_descrip *desc = NULL; - int pktsrx; - unsigned int rxstatus = 0; - unsigned int pktlength = 0; - unsigned int pktstatus = 0; + struct sgdma_descrip __iomem *base = + (struct sgdma_descrip __iomem *)priv->rx_dma_desc; + struct sgdma_descrip __iomem *desc = NULL; struct tse_buffer *rxbuffer = NULL; + unsigned int rxstatus = 0; - dma_sync_single_for_cpu(priv->device, - priv->rxdescphys, - priv->rxdescmem, - DMA_BIDIRECTIONAL); + u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)); desc = &base[0]; - if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) || - (desc->status & SGDMA_STATUS_EOP)) { - pktlength = desc->bytes_xferred; - pktstatus = desc->status & 0x3f; - rxstatus = pktstatus; + if (sts & SGDMA_STSREG_EOP) { + unsigned int pktlength = 0; + unsigned int pktstatus = 0; + dma_sync_single_for_cpu(priv->device, + priv->rxdescphys, + priv->sgdmadesclen, + DMA_FROM_DEVICE); + + pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); + pktstatus = csrrd8(desc, sgdma_descroffs(status)); + rxstatus = pktstatus & ~SGDMA_STATUS_EOP; rxstatus = rxstatus << 16; rxstatus |= (pktlength & 0xffff); - desc->status = 0; - - rxbuffer = dequeue_rx(priv); - if (rxbuffer == NULL) + if (rxstatus) { + csrwr8(0, desc, sgdma_descroffs(status)); + + rxbuffer = dequeue_rx(priv); + if (rxbuffer == NULL) + netdev_info(priv->dev, + "sgdma rx and rx queue empty!\n"); + + /* Clear control */ + csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); + /* clear status */ + csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status)); + + /* kick the rx sgdma after reaping this descriptor */ + sgdma_async_read(priv); + + } else { + /* If the SGDMA indicated an end of packet on recv, + * then it's expected that the rxstatus from the + * descriptor is non-zero - meaning a valid packet + * with a nonzero length, or an error has been + * indicated. if not, then all we can do is signal + * an error and return no packet received. Most likely + * there is a system design error, or an error in the + * underlying kernel (cache or cache management problem) + */ netdev_err(priv->dev, - "sgdma rx and rx queue empty!\n"); - - /* kick the rx sgdma after reaping this descriptor */ - pktsrx = sgdma_async_read(priv); + "SGDMA RX Error Info: %x, %x, %x\n", + sts, csrrd8(desc, sgdma_descroffs(status)), + rxstatus); + } + } else if (sts == 0) { + sgdma_async_read(priv); } return rxstatus; @@ -270,38 +303,41 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) /* Private functions */ -static void sgdma_descrip(struct sgdma_descrip *desc, - struct sgdma_descrip *ndesc, - dma_addr_t ndesc_phys, - dma_addr_t raddr, - dma_addr_t waddr, - u16 length, - int generate_eop, - int rfixed, - int wfixed) +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, + struct sgdma_descrip __iomem *ndesc, + dma_addr_t ndesc_phys, + dma_addr_t raddr, + dma_addr_t waddr, + u16 length, + int generate_eop, + int rfixed, + int wfixed) { /* Clear the next descriptor as not owned by hardware */ - u32 ctrl = ndesc->control; + + u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control)); ctrl &= ~SGDMA_CONTROL_HW_OWNED; - ndesc->control = ctrl; + csrwr8(ctrl, ndesc, sgdma_descroffs(control)); - ctrl = 0; ctrl = SGDMA_CONTROL_HW_OWNED; ctrl |= generate_eop; ctrl |= rfixed; ctrl |= wfixed; /* Channel is implicitly zero, initialized to 0 by default */ - - desc->raddr = raddr; - desc->waddr = waddr; - desc->next = lower_32_bits(ndesc_phys); - desc->control = ctrl; - desc->status = 0; - desc->rburst = 0; - desc->wburst = 0; - desc->bytes = length; - desc->bytes_xferred = 0; + csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr)); + csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr)); + + csrwr32(0, desc, sgdma_descroffs(pad1)); + csrwr32(0, desc, sgdma_descroffs(pad2)); + csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next)); + + csrwr8(ctrl, desc, sgdma_descroffs(control)); + csrwr8(0, desc, sgdma_descroffs(status)); + csrwr8(0, desc, sgdma_descroffs(wburst)); + csrwr8(0, desc, sgdma_descroffs(rburst)); + csrwr16(length, desc, sgdma_descroffs(bytes)); + csrwr16(0, desc, sgdma_descroffs(bytes_xferred)); } /* If hardware is busy, don't restart async read. @@ -312,48 +348,43 @@ static void sgdma_descrip(struct sgdma_descrip *desc, */ static int sgdma_async_read(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - struct sgdma_descrip *descbase = - (struct sgdma_descrip *)priv->rx_dma_desc; + struct sgdma_descrip __iomem *descbase = + (struct sgdma_descrip __iomem *)priv->rx_dma_desc; - struct sgdma_descrip *cdesc = &descbase[0]; - struct sgdma_descrip *ndesc = &descbase[1]; + struct sgdma_descrip __iomem *cdesc = &descbase[0]; + struct sgdma_descrip __iomem *ndesc = &descbase[1]; - unsigned int sts = ioread32(&csr->status); struct tse_buffer *rxbuffer = NULL; if (!sgdma_rxbusy(priv)) { rxbuffer = queue_rx_peekhead(priv); - if (rxbuffer == NULL) + if (rxbuffer == NULL) { + netdev_err(priv->dev, "no rx buffers available\n"); return 0; - - sgdma_descrip(cdesc, /* current descriptor */ - ndesc, /* next descriptor */ - sgdma_rxphysaddr(priv, ndesc), - 0, /* read addr 0 for rx dma */ - rxbuffer->dma_addr, /* write addr for rx dma */ - 0, /* read 'til EOP */ - 0, /* EOP: NA for rx dma */ - 0, /* read fixed: NA for rx dma */ - 0); /* SOP: NA for rx DMA */ - - /* clear control and status */ - iowrite32(0, &csr->control); - - /* If status available, clear those bits */ - if (sts & 0xf) - iowrite32(0xf, &csr->status); + } + + sgdma_setup_descrip(cdesc, /* current descriptor */ + ndesc, /* next descriptor */ + sgdma_rxphysaddr(priv, ndesc), + 0, /* read addr 0 for rx dma */ + rxbuffer->dma_addr, /* write addr for rx dma */ + 0, /* read 'til EOP */ + 0, /* EOP: NA for rx dma */ + 0, /* read fixed: NA for rx dma */ + 0); /* SOP: NA for rx DMA */ dma_sync_single_for_device(priv->device, priv->rxdescphys, - priv->rxdescmem, - DMA_BIDIRECTIONAL); + priv->sgdmadesclen, + DMA_TO_DEVICE); - iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), - &csr->next_descrip); + csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), + priv->rx_dma_csr, + sgdma_csroffs(next_descrip)); - iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), - &csr->control); + csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START), + priv->rx_dma_csr, + sgdma_csroffs(control)); return 1; } @@ -362,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv) } static int sgdma_async_write(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; - if (sgdma_txbusy(priv)) return 0; /* clear control and status */ - iowrite32(0, &csr->control); - iowrite32(0x1f, &csr->status); + csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); + csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status)); dma_sync_single_for_device(priv->device, priv->txdescphys, - priv->txdescmem, DMA_TO_DEVICE); + priv->sgdmadesclen, DMA_TO_DEVICE); - iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), - &csr->next_descrip); + csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), + priv->tx_dma_csr, + sgdma_csroffs(next_descrip)); - iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), - &csr->control); + csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START), + priv->tx_dma_csr, + sgdma_csroffs(control)); return 1; } static dma_addr_t sgdma_txphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { dma_addr_t paddr = priv->txdescmem_busaddr; uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; @@ -396,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv, static dma_addr_t sgdma_rxphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { dma_addr_t paddr = priv->rxdescmem_busaddr; uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; @@ -485,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv) */ static int sgdma_rxbusy(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - return ioread32(&csr->status) & SGDMA_STSREG_BUSY; + return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY; } /* waits for the tx sgdma to finish it's current operation, returns 0 @@ -495,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv) static int sgdma_txbusy(struct altera_tse_private *priv) { int delay = 0; - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; /* if DMA is busy, wait for current transactino to finish */ - while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) + while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY) && (delay++ < 100)) udelay(1); - if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { + if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY) { netdev_err(priv->dev, "timeout waiting for tx dma\n"); return 1; } diff --git a/drivers/net/ethernet/altera/altera_sgdma.h b/drivers/net/ethernet/altera/altera_sgdma.h index 07d471729dc..584977e29ef 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.h +++ b/drivers/net/ethernet/altera/altera_sgdma.h @@ -26,10 +26,11 @@ void sgdma_clear_rxirq(struct altera_tse_private *); void sgdma_clear_txirq(struct altera_tse_private *); int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *); u32 sgdma_tx_completions(struct altera_tse_private *); -int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); +void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *); void sgdma_status(struct altera_tse_private *); u32 sgdma_rx_status(struct altera_tse_private *); int sgdma_initialize(struct altera_tse_private *); void sgdma_uninitialize(struct altera_tse_private *); +void sgdma_start_rxdma(struct altera_tse_private *); #endif /* __ALTERA_SGDMA_H__ */ diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h index ba3334f3538..85bc33b218d 100644 --- a/drivers/net/ethernet/altera/altera_sgdmahw.h +++ b/drivers/net/ethernet/altera/altera_sgdmahw.h @@ -19,16 +19,16 @@ /* SGDMA descriptor structure */ struct sgdma_descrip { - unsigned int raddr; /* address of data to be read */ - unsigned int pad1; - unsigned int waddr; - unsigned int pad2; - unsigned int next; - unsigned int pad3; - unsigned short bytes; - unsigned char rburst; - unsigned char wburst; - unsigned short bytes_xferred; /* 16 bits, bytes xferred */ + u32 raddr; /* address of data to be read */ + u32 pad1; + u32 waddr; + u32 pad2; + u32 next; + u32 pad3; + u16 bytes; + u8 rburst; + u8 wburst; + u16 bytes_xferred; /* 16 bits, bytes xferred */ /* bit 0: error * bit 1: length error @@ -39,7 +39,7 @@ struct sgdma_descrip { * bit 6: reserved * bit 7: status eop for recv case */ - unsigned char status; + u8 status; /* bit 0: eop * bit 1: read_fixed @@ -47,7 +47,7 @@ struct sgdma_descrip { * bits 3,4,5,6: Channel (always 0) * bit 7: hardware owned */ - unsigned char control; + u8 control; } __packed; @@ -101,6 +101,8 @@ struct sgdma_csr { u32 pad3[3]; }; +#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a)) +#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a)) #define SGDMA_STSREG_ERR BIT(0) /* Error */ #define SGDMA_STSREG_EOP BIT(1) /* EOP */ diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 8feeed05de0..2adb24d4523 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -58,6 +58,8 @@ /* MAC function configuration default settings */ #define ALTERA_TSE_TX_IPG_LENGTH 12 +#define ALTERA_TSE_PAUSE_QUANTA 0xffff + #define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1) /* MAC Command_Config Register Bit Definitions @@ -355,6 +357,8 @@ struct altera_tse_mac { u32 reserved5[42]; }; +#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a)) + /* Transmit and Receive Command Registers Bit Definitions */ #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) @@ -390,10 +394,11 @@ struct altera_dmaops { void (*clear_rxirq)(struct altera_tse_private *); int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *); u32 (*tx_completions)(struct altera_tse_private *); - int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); + void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *); u32 (*get_rx_status)(struct altera_tse_private *); int (*init_dma)(struct altera_tse_private *); void (*uninit_dma)(struct altera_tse_private *); + void (*start_rxdma)(struct altera_tse_private *); }; /* This structure is private to each device. @@ -453,6 +458,7 @@ struct altera_tse_private { u32 rxctrlreg; dma_addr_t rxdescphys; dma_addr_t txdescphys; + size_t sgdmadesclen; struct list_head txlisthd; struct list_head rxlisthd; @@ -483,4 +489,49 @@ struct altera_tse_private { */ void altera_tse_set_ethtool_ops(struct net_device *); +static inline +u32 csrrd32(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readl(paddr); +} + +static inline +u16 csrrd16(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readw(paddr); +} + +static inline +u8 csrrd8(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readb(paddr); +} + +static inline +void csrwr32(u32 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writel(val, paddr); +} + +static inline +void csrwr16(u16 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writew(val, paddr); +} + +static inline +void csrwr8(u8 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writeb(val, paddr); +} + #endif /* __ALTERA_TSE_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c index 319ca74f5e7..54c25eff795 100644 --- a/drivers/net/ethernet/altera/altera_tse_ethtool.c +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c @@ -77,7 +77,7 @@ static void tse_get_drvinfo(struct net_device *dev, struct altera_tse_private *priv = netdev_priv(dev); u32 rev = ioread32(&priv->mac_dev->megacore_revision); - strcpy(info->driver, "Altera TSE MAC IP Driver"); + strcpy(info->driver, "altera_tse"); strcpy(info->version, "v8.0"); snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d", rev & 0xFFFF, (rev & 0xFFFF0000) >> 16); @@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *buf) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; u64 ext; - buf[0] = ioread32(&mac->frames_transmitted_ok); - buf[1] = ioread32(&mac->frames_received_ok); - buf[2] = ioread32(&mac->frames_check_sequence_errors); - buf[3] = ioread32(&mac->alignment_errors); + buf[0] = csrrd32(priv->mac_dev, + tse_csroffs(frames_transmitted_ok)); + buf[1] = csrrd32(priv->mac_dev, + tse_csroffs(frames_received_ok)); + buf[2] = csrrd32(priv->mac_dev, + tse_csroffs(frames_check_sequence_errors)); + buf[3] = csrrd32(priv->mac_dev, + tse_csroffs(alignment_errors)); /* Extended aOctetsTransmittedOK counter */ - ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; - ext |= ioread32(&mac->octets_transmitted_ok); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_octets_transmitted_ok)) << 32; + + ext |= csrrd32(priv->mac_dev, + tse_csroffs(octets_transmitted_ok)); buf[4] = ext; /* Extended aOctetsReceivedOK counter */ - ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; - ext |= ioread32(&mac->octets_received_ok); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_octets_received_ok)) << 32; + + ext |= csrrd32(priv->mac_dev, + tse_csroffs(octets_received_ok)); buf[5] = ext; - buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); - buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); - buf[8] = ioread32(&mac->if_in_errors); - buf[9] = ioread32(&mac->if_out_errors); - buf[10] = ioread32(&mac->if_in_ucast_pkts); - buf[11] = ioread32(&mac->if_in_multicast_pkts); - buf[12] = ioread32(&mac->if_in_broadcast_pkts); - buf[13] = ioread32(&mac->if_out_discards); - buf[14] = ioread32(&mac->if_out_ucast_pkts); - buf[15] = ioread32(&mac->if_out_multicast_pkts); - buf[16] = ioread32(&mac->if_out_broadcast_pkts); - buf[17] = ioread32(&mac->ether_stats_drop_events); + buf[6] = csrrd32(priv->mac_dev, + tse_csroffs(tx_pause_mac_ctrl_frames)); + buf[7] = csrrd32(priv->mac_dev, + tse_csroffs(rx_pause_mac_ctrl_frames)); + buf[8] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_errors)); + buf[9] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_errors)); + buf[10] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_ucast_pkts)); + buf[11] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_multicast_pkts)); + buf[12] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_broadcast_pkts)); + buf[13] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_discards)); + buf[14] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_ucast_pkts)); + buf[15] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_multicast_pkts)); + buf[16] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_broadcast_pkts)); + buf[17] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_drop_events)); /* Extended etherStatsOctets counter */ - ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; - ext |= ioread32(&mac->ether_stats_octets); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_ether_stats_octets)) << 32; + ext |= csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_octets)); buf[18] = ext; - buf[19] = ioread32(&mac->ether_stats_pkts); - buf[20] = ioread32(&mac->ether_stats_undersize_pkts); - buf[21] = ioread32(&mac->ether_stats_oversize_pkts); - buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); - buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); - buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); - buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); - buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); - buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); - buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); - buf[29] = ioread32(&mac->ether_stats_jabbers); - buf[30] = ioread32(&mac->ether_stats_fragments); + buf[19] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts)); + buf[20] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_undersize_pkts)); + buf[21] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_oversize_pkts)); + buf[22] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_64_octets)); + buf[23] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_65to127_octets)); + buf[24] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_128to255_octets)); + buf[25] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_256to511_octets)); + buf[26] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_512to1023_octets)); + buf[27] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_1024to1518_octets)); + buf[28] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_1519tox_octets)); + buf[29] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_jabbers)); + buf[30] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_fragments)); } static int tse_sset_count(struct net_device *dev, int sset) @@ -178,19 +213,24 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, { int i; struct altera_tse_private *priv = netdev_priv(dev); - u32 *tse_mac_regs = (u32 *)priv->mac_dev; u32 *buf = regbuf; /* Set version to a known value, so ethtool knows * how to do any special formatting of this data. * This version number will need to change if and * when this register table is changed. + * + * version[31:0] = 1: Dump the first 128 TSE Registers + * Upper bits are all 0 by default + * + * Upper 16-bits will indicate feature presence for + * Ethtool register decoding in future version. */ regs->version = 1; for (i = 0; i < TSE_NUM_REGS; i++) - buf[i] = ioread32(&tse_mac_regs[i]); + buf[i] = csrrd32(priv->mac_dev, i * 4); } static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index c70a29e0b9f..7330681574d 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv) */ static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { - struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; - unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; - u32 data; + struct net_device *ndev = bus->priv; + struct altera_tse_private *priv = netdev_priv(ndev); /* set MDIO address */ - iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); + csrwr32((mii_id & 0x1f), priv->mac_dev, + tse_csroffs(mdio_phy0_addr)); /* get the data */ - data = ioread32(&mdio_regs[regnum]) & 0xffff; - return data; + return csrrd32(priv->mac_dev, + tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff; } static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { - struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; - unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; + struct net_device *ndev = bus->priv; + struct altera_tse_private *priv = netdev_priv(ndev); /* set MDIO address */ - iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); + csrwr32((mii_id & 0x1f), priv->mac_dev, + tse_csroffs(mdio_phy0_addr)); /* write the data */ - iowrite32((u32) value, &mdio_regs[regnum]); + csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4); return 0; } @@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) for (i = 0; i < PHY_MAX_ADDR; i++) mdio->irq[i] = PHY_POLL; - mdio->priv = priv->mac_dev; + mdio->priv = dev; mdio->parent = priv->device; ret = of_mdiobus_register(mdio, mdio_node); @@ -224,6 +225,7 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv, dev_kfree_skb_any(rxbuffer->skb); return -EINVAL; } + rxbuffer->dma_addr &= (dma_addr_t)~3; rxbuffer->len = len; return 0; } @@ -425,9 +427,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit) priv->dev->stats.rx_bytes += pktlength; entry = next_entry; + + tse_rx_refill(priv); } - tse_rx_refill(priv); return count; } @@ -520,7 +523,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id) struct altera_tse_private *priv; unsigned long int flags; - if (unlikely(!dev)) { pr_err("%s: invalid dev pointer\n", __func__); return IRQ_NONE; @@ -562,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int nopaged_len = skb_headlen(skb); enum netdev_tx ret = NETDEV_TX_OK; dma_addr_t dma_addr; - int txcomplete = 0; spin_lock_bh(&priv->tx_lock); @@ -598,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_sync_single_for_device(priv->device, buffer->dma_addr, buffer->len, DMA_TO_DEVICE); - txcomplete = priv->dmaops->tx_buffer(priv, buffer); + priv->dmaops->tx_buffer(priv, buffer); skb_tx_timestamp(skb); @@ -697,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev) struct altera_tse_private *priv = netdev_priv(dev); struct phy_device *phydev = NULL; char phy_id_fmt[MII_BUS_ID_SIZE + 3]; - int ret; if (priv->phy_addr != POLL_PHY) { snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, @@ -711,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev) netdev_err(dev, "Could not attach to PHY\n"); } else { + int ret; phydev = phy_find_first(priv->mdio); if (phydev == NULL) { netdev_err(dev, "No PHY found\n"); @@ -790,7 +791,6 @@ static int init_phy(struct net_device *dev) static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) { - struct altera_tse_mac *mac = priv->mac_dev; u32 msb; u32 lsb; @@ -798,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) lsb = ((addr[5] << 8) | addr[4]) & 0xffff; /* Set primary MAC address */ - iowrite32(msb, &mac->mac_addr_0); - iowrite32(lsb, &mac->mac_addr_1); + csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0)); + csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1)); } /* MAC software reset. @@ -810,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) */ static int reset_mac(struct altera_tse_private *priv) { - void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config; int counter; u32 dat; - dat = ioread32(cmd_cfg_reg); + dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; - iowrite32(dat, cmd_cfg_reg); + csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) + if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_SW_RESET)) break; udelay(1); } if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - dat = ioread32(cmd_cfg_reg); + dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); dat &= ~MAC_CMDCFG_SW_RESET; - iowrite32(dat, cmd_cfg_reg); + csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); return -1; } return 0; @@ -839,42 +839,58 @@ static int reset_mac(struct altera_tse_private *priv) */ static int init_mac(struct altera_tse_private *priv) { - struct altera_tse_mac *mac = priv->mac_dev; unsigned int cmd = 0; u32 frm_length; /* Setup Rx FIFO */ - iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, - &mac->rx_section_empty); - iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); - iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); - iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); + csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, + priv->mac_dev, tse_csroffs(rx_section_empty)); + + csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev, + tse_csroffs(rx_section_full)); + + csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev, + tse_csroffs(rx_almost_empty)); + + csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev, + tse_csroffs(rx_almost_full)); /* Setup Tx FIFO */ - iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, - &mac->tx_section_empty); - iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); - iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); - iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); + csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, + priv->mac_dev, tse_csroffs(tx_section_empty)); + + csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev, + tse_csroffs(tx_section_full)); + + csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev, + tse_csroffs(tx_almost_empty)); + + csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev, + tse_csroffs(tx_almost_full)); /* MAC Address Configuration */ tse_update_mac_addr(priv, priv->dev->dev_addr); /* MAC Function Configuration */ frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; - iowrite32(frm_length, &mac->frm_length); - iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); + csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length)); + + csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev, + tse_csroffs(tx_ipg_length)); /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit * start address */ - tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); - tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | - ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); + tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat), + ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); + + tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat), + ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | + ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); /* Set the MAC options */ - cmd = ioread32(&mac->command_config); - cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */ + cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config)); + cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames * with CRC errors @@ -882,7 +898,16 @@ static int init_mac(struct altera_tse_private *priv) cmd |= MAC_CMDCFG_CNTL_FRM_ENA; cmd &= ~MAC_CMDCFG_TX_ENA; cmd &= ~MAC_CMDCFG_RX_ENA; - iowrite32(cmd, &mac->command_config); + + /* Default speed and duplex setting, full/100 */ + cmd &= ~MAC_CMDCFG_HD_ENA; + cmd &= ~MAC_CMDCFG_ETH_SPEED; + cmd &= ~MAC_CMDCFG_ENA_10; + + csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config)); + + csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev, + tse_csroffs(pause_quanta)); if (netif_msg_hw(priv)) dev_dbg(priv->device, @@ -895,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv) */ static void tse_set_mac(struct altera_tse_private *priv, bool enable) { - struct altera_tse_mac *mac = priv->mac_dev; - u32 value = ioread32(&mac->command_config); + u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config)); if (enable) value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; else value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); - iowrite32(value, &mac->command_config); + csrwr32(value, priv->mac_dev, tse_csroffs(command_config)); } /* Change the MTU @@ -933,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu) static void altera_tse_set_mcfilter(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; int i; struct netdev_hw_addr *ha; /* clear the hash filter */ for (i = 0; i < 64; i++) - iowrite32(0, &(mac->hash_table[i])); + csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4); netdev_for_each_mc_addr(ha, dev) { unsigned int hash = 0; @@ -955,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev) hash = (hash << 1) | xor_bit; } - iowrite32(1, &(mac->hash_table[hash])); + csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4); } } @@ -963,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev) static void altera_tse_set_mcfilterall(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; int i; /* set the hash filter */ for (i = 0; i < 64; i++) - iowrite32(1, &(mac->hash_table[i])); + csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4); } /* Set or clear the multicast filter for this adaptor @@ -976,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev) static void tse_set_rx_mode_hashfilter(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; spin_lock(&priv->mac_cfg_lock); if (dev->flags & IFF_PROMISC) - tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + tse_set_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); if (dev->flags & IFF_ALLMULTI) altera_tse_set_mcfilterall(dev); @@ -996,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev) static void tse_set_rx_mode(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; spin_lock(&priv->mac_cfg_lock); if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) - tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + tse_set_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); else - tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + tse_clear_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); spin_unlock(&priv->mac_cfg_lock); } @@ -1085,17 +1108,19 @@ static int tse_open(struct net_device *dev) spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); - /* Start MAC Rx/Tx */ - spin_lock(&priv->mac_cfg_lock); - tse_set_mac(priv, true); - spin_unlock(&priv->mac_cfg_lock); - if (priv->phydev) phy_start(priv->phydev); napi_enable(&priv->napi); netif_start_queue(dev); + priv->dmaops->start_rxdma(priv); + + /* Start MAC Rx/Tx */ + spin_lock(&priv->mac_cfg_lock); + tse_set_mac(priv, true); + spin_unlock(&priv->mac_cfg_lock); + return 0; tx_request_irq_error: @@ -1167,7 +1192,6 @@ static struct net_device_ops altera_tse_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; - static int request_and_map(struct platform_device *pdev, const char *name, struct resource **res, void __iomem **ptr) { @@ -1235,7 +1259,7 @@ static int altera_tse_probe(struct platform_device *pdev) /* Get the mapped address to the SGDMA descriptor memory */ ret = request_and_map(pdev, "s1", &dma_res, &descmap); if (ret) - goto out_free; + goto err_free_netdev; /* Start of that memory is for transmit descriptors */ priv->tx_dma_desc = descmap; @@ -1254,24 +1278,24 @@ static int altera_tse_probe(struct platform_device *pdev) if (upper_32_bits(priv->rxdescmem_busaddr)) { dev_dbg(priv->device, "SGDMA bus addresses greater than 32-bits\n"); - goto out_free; + goto err_free_netdev; } if (upper_32_bits(priv->txdescmem_busaddr)) { dev_dbg(priv->device, "SGDMA bus addresses greater than 32-bits\n"); - goto out_free; + goto err_free_netdev; } } else if (priv->dmaops && priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) { ret = request_and_map(pdev, "rx_resp", &dma_res, &priv->rx_dma_resp); if (ret) - goto out_free; + goto err_free_netdev; ret = request_and_map(pdev, "tx_desc", &dma_res, &priv->tx_dma_desc); if (ret) - goto out_free; + goto err_free_netdev; priv->txdescmem = resource_size(dma_res); priv->txdescmem_busaddr = dma_res->start; @@ -1279,13 +1303,13 @@ static int altera_tse_probe(struct platform_device *pdev) ret = request_and_map(pdev, "rx_desc", &dma_res, &priv->rx_dma_desc); if (ret) - goto out_free; + goto err_free_netdev; priv->rxdescmem = resource_size(dma_res); priv->rxdescmem_busaddr = dma_res->start; } else { - goto out_free; + goto err_free_netdev; } if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask))) @@ -1294,26 +1318,26 @@ static int altera_tse_probe(struct platform_device *pdev) else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32))) dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32)); else - goto out_free; + goto err_free_netdev; /* MAC address space */ ret = request_and_map(pdev, "control_port", &control_port, (void __iomem **)&priv->mac_dev); if (ret) - goto out_free; + goto err_free_netdev; /* xSGDMA Rx Dispatcher address space */ ret = request_and_map(pdev, "rx_csr", &dma_res, &priv->rx_dma_csr); if (ret) - goto out_free; + goto err_free_netdev; /* xSGDMA Tx Dispatcher address space */ ret = request_and_map(pdev, "tx_csr", &dma_res, &priv->tx_dma_csr); if (ret) - goto out_free; + goto err_free_netdev; /* Rx IRQ */ @@ -1321,7 +1345,7 @@ static int altera_tse_probe(struct platform_device *pdev) if (priv->rx_irq == -ENXIO) { dev_err(&pdev->dev, "cannot obtain Rx IRQ\n"); ret = -ENXIO; - goto out_free; + goto err_free_netdev; } /* Tx IRQ */ @@ -1329,7 +1353,7 @@ static int altera_tse_probe(struct platform_device *pdev) if (priv->tx_irq == -ENXIO) { dev_err(&pdev->dev, "cannot obtain Tx IRQ\n"); ret = -ENXIO; - goto out_free; + goto err_free_netdev; } /* get FIFO depths from device tree */ @@ -1337,14 +1361,14 @@ static int altera_tse_probe(struct platform_device *pdev) &priv->rx_fifo_depth)) { dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n"); ret = -ENXIO; - goto out_free; + goto err_free_netdev; } if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &priv->rx_fifo_depth)) { dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); ret = -ENXIO; - goto out_free; + goto err_free_netdev; } /* get hash filter settings for this instance */ @@ -1352,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev) of_property_read_bool(pdev->dev.of_node, "altr,has-hash-multicast-filter"); + /* Set hash filter to not set for now until the + * multicast filter receive issue is debugged + */ + priv->hash_filter = 0; + /* get supplemental address settings for this instance */ priv->added_unicast = of_property_read_bool(pdev->dev.of_node, @@ -1393,7 +1422,7 @@ static int altera_tse_probe(struct platform_device *pdev) ((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) { dev_err(&pdev->dev, "invalid phy-addr specified %d\n", priv->phy_addr); - goto out_free; + goto err_free_netdev; } /* Create/attach to MDIO bus */ @@ -1401,7 +1430,7 @@ static int altera_tse_probe(struct platform_device *pdev) atomic_add_return(1, &instance_count)); if (ret) - goto out_free; + goto err_free_netdev; /* initialize netdev */ ether_setup(ndev); @@ -1438,7 +1467,7 @@ static int altera_tse_probe(struct platform_device *pdev) ret = register_netdev(ndev); if (ret) { dev_err(&pdev->dev, "failed to register TSE net device\n"); - goto out_free_mdio; + goto err_register_netdev; } platform_set_drvdata(pdev, ndev); @@ -1455,13 +1484,16 @@ static int altera_tse_probe(struct platform_device *pdev) ret = init_phy(ndev); if (ret != 0) { netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret); - goto out_free_mdio; + goto err_init_phy; } return 0; -out_free_mdio: +err_init_phy: + unregister_netdev(ndev); +err_register_netdev: + netif_napi_del(&priv->napi); altera_tse_mdio_destroy(ndev); -out_free: +err_free_netdev: free_netdev(ndev); return ret; } @@ -1480,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev) return 0; } -struct altera_dmaops altera_dtype_sgdma = { +static const struct altera_dmaops altera_dtype_sgdma = { .altera_dtype = ALTERA_DTYPE_SGDMA, .dmamask = 32, .reset_dma = sgdma_reset, @@ -1496,9 +1528,10 @@ struct altera_dmaops altera_dtype_sgdma = { .get_rx_status = sgdma_rx_status, .init_dma = sgdma_initialize, .uninit_dma = sgdma_uninitialize, + .start_rxdma = sgdma_start_rxdma, }; -struct altera_dmaops altera_dtype_msgdma = { +static const struct altera_dmaops altera_dtype_msgdma = { .altera_dtype = ALTERA_DTYPE_MSGDMA, .dmamask = 64, .reset_dma = msgdma_reset, @@ -1514,6 +1547,7 @@ struct altera_dmaops altera_dtype_msgdma = { .get_rx_status = msgdma_rx_status, .init_dma = msgdma_initialize, .uninit_dma = msgdma_uninitialize, + .start_rxdma = msgdma_start_rxdma, }; static struct of_device_id altera_tse_ids[] = { diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c index 70fa13f486b..d7eeb1713ad 100644 --- a/drivers/net/ethernet/altera/altera_utils.c +++ b/drivers/net/ethernet/altera/altera_utils.c @@ -17,28 +17,28 @@ #include "altera_tse.h" #include "altera_utils.h" -void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) +void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); value |= bit_mask; - iowrite32(value, ioaddr); + csrwr32(value, ioaddr, offs); } -void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) +void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); value &= ~bit_mask; - iowrite32(value, ioaddr); + csrwr32(value, ioaddr, offs); } -int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) +int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); return (value & bit_mask) ? 1 : 0; } -int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) +int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); return (value & bit_mask) ? 0 : 1; } diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h index ce1db36d358..baf100ccf58 100644 --- a/drivers/net/ethernet/altera/altera_utils.h +++ b/drivers/net/ethernet/altera/altera_utils.h @@ -19,9 +19,9 @@ #ifndef __ALTERA_UTILS_H__ #define __ALTERA_UTILS_H__ -void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); -void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); -int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); -int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); +void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); +void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask); #endif /* __ALTERA_UTILS_H__*/ diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h index 928fac6dd10..53f85bf7152 100644 --- a/drivers/net/ethernet/arc/emac.h +++ b/drivers/net/ethernet/arc/emac.h @@ -11,6 +11,7 @@ #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/phy.h> +#include <linux/clk.h> /* STATUS and ENABLE Register bit masks */ #define TXINT_MASK (1<<0) /* Transmit interrupt */ @@ -131,6 +132,7 @@ struct arc_emac_priv { struct mii_bus *bus; void __iomem *regs; + struct clk *clk; struct napi_struct napi; struct net_device_stats stats; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index eeecc29cf5b..d647a7d115a 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -574,6 +574,18 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) return NETDEV_TX_OK; } +static void arc_emac_set_address_internal(struct net_device *ndev) +{ + struct arc_emac_priv *priv = netdev_priv(ndev); + unsigned int addr_low, addr_hi; + + addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); + addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); + + arc_reg_set(priv, R_ADDRL, addr_low); + arc_reg_set(priv, R_ADDRH, addr_hi); +} + /** * arc_emac_set_address - Set the MAC address for this device. * @ndev: Pointer to net_device structure. @@ -587,9 +599,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev) */ static int arc_emac_set_address(struct net_device *ndev, void *p) { - struct arc_emac_priv *priv = netdev_priv(ndev); struct sockaddr *addr = p; - unsigned int addr_low, addr_hi; if (netif_running(ndev)) return -EBUSY; @@ -599,11 +609,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p) memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); - addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); - addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); - - arc_reg_set(priv, R_ADDRL, addr_low); - arc_reg_set(priv, R_ADDRH, addr_hi); + arc_emac_set_address_internal(ndev); return 0; } @@ -643,13 +649,6 @@ static int arc_emac_probe(struct platform_device *pdev) return -ENODEV; } - /* Get CPU clock frequency from device tree */ - if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", - &clock_frequency)) { - dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n"); - return -EINVAL; - } - /* Get IRQ from device tree */ irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!irq) { @@ -677,17 +676,36 @@ static int arc_emac_probe(struct platform_device *pdev) priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs); if (IS_ERR(priv->regs)) { err = PTR_ERR(priv->regs); - goto out; + goto out_netdev; } dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs); + priv->clk = of_clk_get(pdev->dev.of_node, 0); + if (IS_ERR(priv->clk)) { + /* Get CPU clock frequency from device tree */ + if (of_property_read_u32(pdev->dev.of_node, "clock-frequency", + &clock_frequency)) { + dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n"); + err = -EINVAL; + goto out_netdev; + } + } else { + err = clk_prepare_enable(priv->clk); + if (err) { + dev_err(&pdev->dev, "failed to enable clock\n"); + goto out_clkget; + } + + clock_frequency = clk_get_rate(priv->clk); + } + id = arc_reg_get(priv, R_ID); /* Check for EMAC revision 5 or 7, magic number */ if (!(id == 0x0005fd02 || id == 0x0007fd02)) { dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id); err = -ENODEV; - goto out; + goto out_clken; } dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id); @@ -702,7 +720,7 @@ static int arc_emac_probe(struct platform_device *pdev) ndev->name, ndev); if (err) { dev_err(&pdev->dev, "could not allocate IRQ\n"); - goto out; + goto out_clken; } /* Get MAC address from device tree */ @@ -713,6 +731,7 @@ static int arc_emac_probe(struct platform_device *pdev) else eth_hw_addr_random(ndev); + arc_emac_set_address_internal(ndev); dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr); /* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */ @@ -722,7 +741,7 @@ static int arc_emac_probe(struct platform_device *pdev) if (!priv->rxbd) { dev_err(&pdev->dev, "failed to allocate data buffers\n"); err = -ENOMEM; - goto out; + goto out_clken; } priv->txbd = priv->rxbd + RX_BD_NUM; @@ -734,7 +753,7 @@ static int arc_emac_probe(struct platform_device *pdev) err = arc_mdio_probe(pdev, priv); if (err) { dev_err(&pdev->dev, "failed to probe MII bus\n"); - goto out; + goto out_clken; } priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0, @@ -742,7 +761,7 @@ static int arc_emac_probe(struct platform_device *pdev) if (!priv->phy_dev) { dev_err(&pdev->dev, "of_phy_connect() failed\n"); err = -ENODEV; - goto out; + goto out_mdio; } dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n", @@ -752,14 +771,25 @@ static int arc_emac_probe(struct platform_device *pdev) err = register_netdev(ndev); if (err) { - netif_napi_del(&priv->napi); dev_err(&pdev->dev, "failed to register network device\n"); - goto out; + goto out_netif_api; } return 0; -out: +out_netif_api: + netif_napi_del(&priv->napi); + phy_disconnect(priv->phy_dev); + priv->phy_dev = NULL; +out_mdio: + arc_mdio_remove(priv); +out_clken: + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); +out_clkget: + if (!IS_ERR(priv->clk)) + clk_put(priv->clk); +out_netdev: free_netdev(ndev); return err; } @@ -774,6 +804,12 @@ static int arc_emac_remove(struct platform_device *pdev) arc_mdio_remove(priv); unregister_netdev(ndev); netif_napi_del(&priv->napi); + + if (!IS_ERR(priv->clk)) { + clk_disable_unprepare(priv->clk); + clk_put(priv->clk); + } + free_netdev(ndev); return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index a78edacccee..3b0d43154e6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) #define BCM_5710_UNDI_FW_MF_MINOR (0x08) #define BCM_5710_UNDI_FW_MF_VERS (0x05) -#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) -#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) +#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) +#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) { u8 major, minor, version; @@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) /* Reset should be performed after BRB is emptied */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { u32 timer_count = 1000; + bool need_write = true; /* Close the MAC Rx to prevent BRB from filling up */ bnx2x_prev_unload_close_mac(bp, &mac_vals); @@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) * cleaning methods - might be redundant but harmless. */ if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { - bnx2x_prev_unload_undi_mf(bp); + if (need_write) { + bnx2x_prev_unload_undi_mf(bp); + need_write = false; + } } else if (prev_undi) { /* If UNDI resides in memory, * manually increment it @@ -13233,6 +13237,8 @@ static void __bnx2x_remove(struct pci_dev *pdev, iounmap(bp->doorbells); bnx2x_release_firmware(bp); + } else { + bnx2x_vf_pci_dealloc(bp); } bnx2x_free_mem_bp(bp); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 5c523b32db7..b8078d50261 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -427,7 +427,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp, if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN && (atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >= vf_vlan_rules_cnt(vf))) { - BNX2X_ERR("No credits for vlan\n"); + BNX2X_ERR("No credits for vlan [%d >= %d]\n", + atomic_read(&bnx2x_vfq(vf, qid, vlan_count)), + vf_vlan_rules_cnt(vf)); return -ENOMEM; } @@ -610,6 +612,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf, } /* add new mcasts */ + mcast.mcast_list_len = mc_num; rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD); if (rc) BNX2X_ERR("Faled to add multicasts\n"); @@ -837,6 +840,29 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid) return 0; } +static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp, + struct bnx2x_virtf *vf, + int new) +{ + int num = vf_vlan_rules_cnt(vf); + int diff = new - num; + bool rc = true; + + DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n", + vf->abs_vfid, new, num); + + if (diff > 0) + rc = bp->vlans_pool.get(&bp->vlans_pool, diff); + else if (diff < 0) + rc = bp->vlans_pool.put(&bp->vlans_pool, -diff); + + if (rc) + vf_vlan_rules_cnt(vf) = new; + else + DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n", + vf->abs_vfid); +} + /* must be called after the number of PF queues and the number of VFs are * both known */ @@ -854,9 +880,11 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf) resc->num_mac_filters = 1; /* divvy up vlan rules */ + bnx2x_iov_re_set_vlan_filters(bp, vf, 0); vlan_count = bp->vlans_pool.check(&bp->vlans_pool); vlan_count = 1 << ilog2(vlan_count); - resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp); + bnx2x_iov_re_set_vlan_filters(bp, vf, + vlan_count / BNX2X_NR_VIRTFN(bp)); /* no real limitation */ resc->num_mc_filters = 0; @@ -1478,10 +1506,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp) bnx2x_iov_static_resc(bp, vf); /* queues are initialized during VF-ACQUIRE */ - - /* reserve the vf vlan credit */ - bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf)); - vf->filter_state = 0; vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id); @@ -1912,11 +1936,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf, u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf); + /* Save a vlan filter for the Hypervisor */ return ((req_resc->num_rxqs <= rxq_cnt) && (req_resc->num_txqs <= txq_cnt) && (req_resc->num_sbs <= vf_sb_count(vf)) && (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) && - (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf))); + (req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf))); } /* CORE VF API */ @@ -1972,14 +1997,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf, vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf); if (resc->num_mac_filters) vf_mac_rules_cnt(vf) = resc->num_mac_filters; - if (resc->num_vlan_filters) - vf_vlan_rules_cnt(vf) = resc->num_vlan_filters; + /* Add an additional vlan filter credit for the hypervisor */ + bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1); DP(BNX2X_MSG_IOV, "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n", vf_sb_count(vf), vf_rxq_count(vf), vf_txq_count(vf), vf_mac_rules_cnt(vf), - vf_vlan_rules_cnt(vf)); + vf_vlan_rules_visible_cnt(vf)); /* Initialize the queues */ if (!vf->vfqs) { @@ -2670,7 +2695,7 @@ out: bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); } - return 0; + return rc; } int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) @@ -2896,6 +2921,14 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) return bp->regview + PXP_VF_ADDR_DB_START; } +void bnx2x_vf_pci_dealloc(struct bnx2x *bp) +{ + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, + sizeof(struct bnx2x_vf_mbx_msg)); + BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, + sizeof(union pf_vf_bulletin)); +} + int bnx2x_vf_pci_alloc(struct bnx2x *bp) { mutex_init(&bp->vf2pf_mutex); @@ -2915,10 +2948,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) return 0; alloc_mem_err: - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, - sizeof(struct bnx2x_vf_mbx_msg)); - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, - sizeof(union pf_vf_bulletin)); + bnx2x_vf_pci_dealloc(bp); return -ENOMEM; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 8bf764570ee..6929adba52f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -159,6 +159,8 @@ struct bnx2x_virtf { #define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters) #define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters) #define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters) + /* Hide a single vlan filter credit for the hypervisor */ +#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1) u8 sb_count; /* actual number of SBs */ u8 igu_base_id; /* base igu status block id */ @@ -502,6 +504,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp, enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); void bnx2x_timer_sriov(struct bnx2x *bp); void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp); +void bnx2x_vf_pci_dealloc(struct bnx2x *bp); int bnx2x_vf_pci_alloc(struct bnx2x *bp); int bnx2x_enable_sriov(struct bnx2x *bp); void bnx2x_disable_sriov(struct bnx2x *bp); @@ -568,6 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp) return NULL; } +static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; } static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {} static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 0622884596b..784c7155b98 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) out: bnx2x_vfpf_finalize(bp, &req->first_tlv); - return 0; + return rc; } /* request pf to config rss table for vf queues*/ @@ -1163,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf, bnx2x_vf_max_queue_cnt(bp, vf); resc->num_sbs = vf_sb_count(vf); resc->num_mac_filters = vf_mac_rules_cnt(vf); - resc->num_vlan_filters = vf_vlan_rules_cnt(vf); + resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf); resc->num_mc_filters = 0; if (status == PFVF_STATUS_SUCCESS) { diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index 7e49c43b7af..9e089d24466 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -4,7 +4,7 @@ config NET_CADENCE bool "Cadence devices" - depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST) + depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST) default y ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -30,7 +30,7 @@ config ARM_AT91_ETHER config MACB tristate "Cadence MACB/GEM support" - depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST) + depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST) select PHYLIB ---help--- The Cadence MACB ethernet interface is found on many Atmel AT32 and diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ca97005e24b..e9daa072ebb 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp) { unsigned int entry; struct sk_buff *skb; - struct macb_dma_desc *desc; dma_addr_t paddr; while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { - u32 addr, ctrl; - entry = macb_rx_ring_wrap(bp->rx_prepared_head); - desc = &bp->rx_ring[entry]; /* Make hw descriptor updates visible to CPU */ rmb(); - addr = desc->addr; - ctrl = desc->ctrl; bp->rx_prepared_head++; - if ((addr & MACB_BIT(RX_USED))) - continue; - if (bp->rx_skbuff[entry] == NULL) { /* allocate sk_buff for this free entry in ring */ skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); @@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget) if (!(addr & MACB_BIT(RX_USED))) break; - desc->addr &= ~MACB_BIT(RX_USED); bp->rx_tail++; count++; @@ -891,16 +881,15 @@ static int macb_poll(struct napi_struct *napi, int budget) if (work_done < budget) { napi_complete(napi); - /* - * We've done what we can to clean the buffers. Make sure we - * get notified when new packets arrive. - */ - macb_writel(bp, IER, MACB_RX_INT_FLAGS); - /* Packets received while interrupts were disabled */ status = macb_readl(bp, RSR); - if (unlikely(status)) + if (status) { + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(RCOMP)); napi_reschedule(napi); + } else { + macb_writel(bp, IER, MACB_RX_INT_FLAGS); + } } /* TODO: Handle errors */ @@ -951,6 +940,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (unlikely(status & (MACB_TX_ERR_FLAGS))) { macb_writel(bp, IDR, MACB_TX_INT_FLAGS); schedule_work(&bp->tx_error_task); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_TX_ERR_FLAGS); + break; } @@ -968,6 +961,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) bp->hw_stats.gem.rx_overruns++; else bp->hw_stats.macb.rx_overruns++; + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(ISR_ROVR)); } if (status & MACB_BIT(HRESP)) { @@ -977,6 +973,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) * (work queue?) */ netdev_err(dev, "DMA bus error: HRESP not OK\n"); + + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(HRESP)); } status = macb_readl(bp, ISR); @@ -1113,7 +1112,7 @@ static void gem_free_rx_buffers(struct macb *bp) desc = &bp->rx_ring[i]; addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); - dma_unmap_single(&bp->pdev->dev, addr, skb->len, + dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); skb = NULL; diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index d40c994a4f6..570222c3341 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -67,13 +67,13 @@ config CHELSIO_T3 will be called cxgb3. config CHELSIO_T4 - tristate "Chelsio Communications T4 Ethernet support" + tristate "Chelsio Communications T4/T5 Ethernet support" depends on PCI select FW_LOADER select MDIO ---help--- - This driver supports Chelsio T4-based gigabit and 10Gb Ethernet - adapters. + This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet + adapter and T5 based 40Gb Ethernet adapter. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. @@ -87,11 +87,12 @@ config CHELSIO_T4 will be called cxgb4. config CHELSIO_T4VF - tristate "Chelsio Communications T4 Virtual Function Ethernet support" + tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support" depends on PCI ---help--- - This driver supports Chelsio T4-based gigabit and 10Gb Ethernet - adapters with PCI-E SR-IOV Virtual Functions. + This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet + adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual + Functions. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 6fe58913403..24e16e3301e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -5870,6 +5870,8 @@ static void print_port_info(const struct net_device *dev) spd = " 2.5 GT/s"; else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) spd = " 5 GT/s"; + else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) + spd = " 8 GT/s"; if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) bufp += sprintf(bufp, "100/"); diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c new file mode 100644 index 00000000000..4884205e56e --- /dev/null +++ b/drivers/net/ethernet/ec_bhf.c @@ -0,0 +1,706 @@ + /* + * drivers/net/ethernet/beckhoff/ec_bhf.c + * + * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* This is a driver for EtherCAT master module present on CCAT FPGA. + * Those can be found on Bechhoff CX50xx industrial PCs. + */ + +#if 0 +#define DEBUG +#endif +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/skbuff.h> +#include <linux/hrtimer.h> +#include <linux/interrupt.h> +#include <linux/stat.h> + +#define TIMER_INTERVAL_NSEC 20000 + +#define INFO_BLOCK_SIZE 0x10 +#define INFO_BLOCK_TYPE 0x0 +#define INFO_BLOCK_REV 0x2 +#define INFO_BLOCK_BLK_CNT 0x4 +#define INFO_BLOCK_TX_CHAN 0x4 +#define INFO_BLOCK_RX_CHAN 0x5 +#define INFO_BLOCK_OFFSET 0x8 + +#define EC_MII_OFFSET 0x4 +#define EC_FIFO_OFFSET 0x8 +#define EC_MAC_OFFSET 0xc + +#define MAC_FRAME_ERR_CNT 0x0 +#define MAC_RX_ERR_CNT 0x1 +#define MAC_CRC_ERR_CNT 0x2 +#define MAC_LNK_LST_ERR_CNT 0x3 +#define MAC_TX_FRAME_CNT 0x10 +#define MAC_RX_FRAME_CNT 0x14 +#define MAC_TX_FIFO_LVL 0x20 +#define MAC_DROPPED_FRMS 0x28 +#define MAC_CONNECTED_CCAT_FLAG 0x78 + +#define MII_MAC_ADDR 0x8 +#define MII_MAC_FILT_FLAG 0xe +#define MII_LINK_STATUS 0xf + +#define FIFO_TX_REG 0x0 +#define FIFO_TX_RESET 0x8 +#define FIFO_RX_REG 0x10 +#define FIFO_RX_ADDR_VALID (1u << 31) +#define FIFO_RX_RESET 0x18 + +#define DMA_CHAN_OFFSET 0x1000 +#define DMA_CHAN_SIZE 0x8 + +#define DMA_WINDOW_SIZE_MASK 0xfffffffc + +static struct pci_device_id ids[] = { + { PCI_DEVICE(0x15ec, 0x5000), }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ids); + +struct rx_header { +#define RXHDR_NEXT_ADDR_MASK 0xffffffu +#define RXHDR_NEXT_VALID (1u << 31) + __le32 next; +#define RXHDR_NEXT_RECV_FLAG 0x1 + __le32 recv; +#define RXHDR_LEN_MASK 0xfffu + __le16 len; + __le16 port; + __le32 reserved; + u8 timestamp[8]; +} __packed; + +#define PKT_PAYLOAD_SIZE 0x7e8 +struct rx_desc { + struct rx_header header; + u8 data[PKT_PAYLOAD_SIZE]; +} __packed; + +struct tx_header { + __le16 len; +#define TX_HDR_PORT_0 0x1 +#define TX_HDR_PORT_1 0x2 + u8 port; + u8 ts_enable; +#define TX_HDR_SENT 0x1 + __le32 sent; + u8 timestamp[8]; +} __packed; + +struct tx_desc { + struct tx_header header; + u8 data[PKT_PAYLOAD_SIZE]; +} __packed; + +#define FIFO_SIZE 64 + +static long polling_frequency = TIMER_INTERVAL_NSEC; + +struct bhf_dma { + u8 *buf; + size_t len; + dma_addr_t buf_phys; + + u8 *alloc; + size_t alloc_len; + dma_addr_t alloc_phys; +}; + +struct ec_bhf_priv { + struct net_device *net_dev; + + struct pci_dev *dev; + + void * __iomem io; + void * __iomem dma_io; + + struct hrtimer hrtimer; + + int tx_dma_chan; + int rx_dma_chan; + void * __iomem ec_io; + void * __iomem fifo_io; + void * __iomem mii_io; + void * __iomem mac_io; + + struct bhf_dma rx_buf; + struct rx_desc *rx_descs; + int rx_dnext; + int rx_dcount; + + struct bhf_dma tx_buf; + struct tx_desc *tx_descs; + int tx_dcount; + int tx_dnext; + + u64 stat_rx_bytes; + u64 stat_tx_bytes; +}; + +#define PRIV_TO_DEV(priv) (&(priv)->dev->dev) + +#define ETHERCAT_MASTER_ID 0x14 + +static void ec_bhf_print_status(struct ec_bhf_priv *priv) +{ + struct device *dev = PRIV_TO_DEV(priv); + + dev_dbg(dev, "Frame error counter: %d\n", + ioread8(priv->mac_io + MAC_FRAME_ERR_CNT)); + dev_dbg(dev, "RX error counter: %d\n", + ioread8(priv->mac_io + MAC_RX_ERR_CNT)); + dev_dbg(dev, "CRC error counter: %d\n", + ioread8(priv->mac_io + MAC_CRC_ERR_CNT)); + dev_dbg(dev, "TX frame counter: %d\n", + ioread32(priv->mac_io + MAC_TX_FRAME_CNT)); + dev_dbg(dev, "RX frame counter: %d\n", + ioread32(priv->mac_io + MAC_RX_FRAME_CNT)); + dev_dbg(dev, "TX fifo level: %d\n", + ioread8(priv->mac_io + MAC_TX_FIFO_LVL)); + dev_dbg(dev, "Dropped frames: %d\n", + ioread8(priv->mac_io + MAC_DROPPED_FRMS)); + dev_dbg(dev, "Connected with CCAT slot: %d\n", + ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG)); + dev_dbg(dev, "Link status: %d\n", + ioread8(priv->mii_io + MII_LINK_STATUS)); +} + +static void ec_bhf_reset(struct ec_bhf_priv *priv) +{ + iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT); + iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT); + iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT); + iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS); + + iowrite8(0, priv->fifo_io + FIFO_TX_RESET); + iowrite8(0, priv->fifo_io + FIFO_RX_RESET); + + iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL); +} + +static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc) +{ + u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header); + u32 addr = (u8 *)desc - priv->tx_buf.buf; + + iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); + + dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n"); +} + +static int ec_bhf_desc_sent(struct tx_desc *desc) +{ + return le32_to_cpu(desc->header.sent) & TX_HDR_SENT; +} + +static void ec_bhf_process_tx(struct ec_bhf_priv *priv) +{ + if (unlikely(netif_queue_stopped(priv->net_dev))) { + /* Make sure that we perceive changes to tx_dnext. */ + smp_rmb(); + + if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) + netif_wake_queue(priv->net_dev); + } +} + +static int ec_bhf_pkt_received(struct rx_desc *desc) +{ + return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG; +} + +static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc) +{ + iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf), + priv->fifo_io + FIFO_RX_REG); +} + +static void ec_bhf_process_rx(struct ec_bhf_priv *priv) +{ + struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; + struct device *dev = PRIV_TO_DEV(priv); + + while (ec_bhf_pkt_received(desc)) { + int pkt_size = (le16_to_cpu(desc->header.len) & + RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4; + u8 *data = desc->data; + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); + dev_dbg(dev, "Received packet, size: %d\n", pkt_size); + + if (skb) { + memcpy(skb_put(skb, pkt_size), data, pkt_size); + skb->protocol = eth_type_trans(skb, priv->net_dev); + dev_dbg(dev, "Protocol type: %x\n", skb->protocol); + + priv->stat_rx_bytes += pkt_size; + + netif_rx(skb); + } else { + dev_err_ratelimited(dev, + "Couldn't allocate a skb_buff for a packet of size %u\n", + pkt_size); + } + + desc->header.recv = 0; + + ec_bhf_add_rx_desc(priv, desc); + + priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; + desc = &priv->rx_descs[priv->rx_dnext]; + } + +} + +static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer) +{ + struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv, + hrtimer); + ec_bhf_process_rx(priv); + ec_bhf_process_tx(priv); + + if (!netif_running(priv->net_dev)) + return HRTIMER_NORESTART; + + hrtimer_forward_now(timer, ktime_set(0, polling_frequency)); + return HRTIMER_RESTART; +} + +static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) +{ + struct device *dev = PRIV_TO_DEV(priv); + unsigned block_count, i; + void * __iomem ec_info; + + dev_dbg(dev, "Info block:\n"); + dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); + dev_dbg(dev, "Revision of function: %x\n", + (unsigned)ioread16(priv->io + INFO_BLOCK_REV)); + + block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); + dev_dbg(dev, "Number of function blocks: %x\n", block_count); + + for (i = 0; i < block_count; i++) { + u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + + INFO_BLOCK_TYPE); + if (type == ETHERCAT_MASTER_ID) + break; + } + if (i == block_count) { + dev_err(dev, "EtherCAT master with DMA block not found\n"); + return -ENODEV; + } + dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i); + + ec_info = priv->io + i * INFO_BLOCK_SIZE; + dev_dbg(dev, "EtherCAT master revision: %d\n", + ioread16(ec_info + INFO_BLOCK_REV)); + + priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); + dev_dbg(dev, "EtherCAT master tx dma channel: %d\n", + priv->tx_dma_chan); + + priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); + dev_dbg(dev, "EtherCAT master rx dma channel: %d\n", + priv->rx_dma_chan); + + priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); + priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); + priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); + priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); + + dev_dbg(dev, + "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n", + priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io); + + return 0; +} + +static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct tx_desc *desc; + unsigned len; + + dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n"); + + desc = &priv->tx_descs[priv->tx_dnext]; + + skb_copy_and_csum_dev(skb, desc->data); + len = skb->len; + + memset(&desc->header, 0, sizeof(desc->header)); + desc->header.len = cpu_to_le16(len); + desc->header.port = TX_HDR_PORT_0; + + ec_bhf_send_packet(priv, desc); + + priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; + + if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { + /* Make sure that update updates to tx_dnext are perceived + * by timer routine. + */ + smp_wmb(); + + netif_stop_queue(net_dev); + + dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n"); + ec_bhf_print_status(priv); + } + + priv->stat_tx_bytes += len; + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, + struct bhf_dma *buf, + int channel, + int size) +{ + int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET; + struct device *dev = PRIV_TO_DEV(priv); + u32 mask; + + iowrite32(0xffffffff, priv->dma_io + offset); + + mask = ioread32(priv->dma_io + offset); + mask &= DMA_WINDOW_SIZE_MASK; + dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel); + + /* We want to allocate a chunk of memory that is: + * - aligned to the mask we just read + * - is of size 2^mask bytes (at most) + * In order to ensure that we will allocate buffer of + * 2 * 2^mask bytes. + */ + buf->len = min_t(int, ~mask + 1, size); + buf->alloc_len = 2 * buf->len; + + dev_dbg(dev, "Allocating %d bytes for channel %d", + (int)buf->alloc_len, channel); + buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, + GFP_KERNEL); + if (buf->alloc == NULL) { + dev_info(dev, "Failed to allocate buffer\n"); + return -ENOMEM; + } + + buf->buf_phys = (buf->alloc_phys + buf->len) & mask; + buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys); + + iowrite32(0, priv->dma_io + offset + 4); + iowrite32(buf->buf_phys, priv->dma_io + offset); + dev_dbg(dev, "Buffer: %x and read from dev: %x", + (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset)); + + return 0; +} + +static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv) +{ + int i = 0; + + priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); + priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf; + priv->tx_dnext = 0; + + for (i = 0; i < priv->tx_dcount; i++) + priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT); +} + +static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv) +{ + int i; + + priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); + priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf; + priv->rx_dnext = 0; + + for (i = 0; i < priv->rx_dcount; i++) { + struct rx_desc *desc = &priv->rx_descs[i]; + u32 next; + + if (i != priv->rx_dcount - 1) + next = (u8 *)(desc + 1) - priv->rx_buf.buf; + else + next = 0; + next |= RXHDR_NEXT_VALID; + desc->header.next = cpu_to_le32(next); + desc->header.recv = 0; + ec_bhf_add_rx_desc(priv, desc); + } +} + +static int ec_bhf_open(struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct device *dev = PRIV_TO_DEV(priv); + int err = 0; + + dev_info(dev, "Opening device\n"); + + ec_bhf_reset(priv); + + err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, + FIFO_SIZE * sizeof(struct rx_desc)); + if (err) { + dev_err(dev, "Failed to allocate rx buffer\n"); + goto out; + } + ec_bhf_setup_rx_descs(priv); + + dev_info(dev, "RX buffer allocated, address: %x\n", + (unsigned)priv->rx_buf.buf_phys); + + err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, + FIFO_SIZE * sizeof(struct tx_desc)); + if (err) { + dev_err(dev, "Failed to allocate tx buffer\n"); + goto error_rx_free; + } + dev_dbg(dev, "TX buffer allocated, addres: %x\n", + (unsigned)priv->tx_buf.buf_phys); + + iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); + + ec_bhf_setup_tx_descs(priv); + + netif_start_queue(net_dev); + + hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + priv->hrtimer.function = ec_bhf_timer_fun; + hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), + HRTIMER_MODE_REL); + + dev_info(PRIV_TO_DEV(priv), "Device open\n"); + + ec_bhf_print_status(priv); + + return 0; + +error_rx_free: + dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc, + priv->rx_buf.alloc_len); +out: + return err; +} + +static int ec_bhf_stop(struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct device *dev = PRIV_TO_DEV(priv); + + hrtimer_cancel(&priv->hrtimer); + + ec_bhf_reset(priv); + + netif_tx_disable(net_dev); + + dma_free_coherent(dev, priv->tx_buf.alloc_len, + priv->tx_buf.alloc, priv->tx_buf.alloc_phys); + dma_free_coherent(dev, priv->rx_buf.alloc_len, + priv->rx_buf.alloc, priv->rx_buf.alloc_phys); + + return 0; +} + +static struct rtnl_link_stats64 * +ec_bhf_get_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + + stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) + + ioread8(priv->mac_io + MAC_CRC_ERR_CNT) + + ioread8(priv->mac_io + MAC_FRAME_ERR_CNT); + stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT); + stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT); + stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS); + + stats->tx_bytes = priv->stat_tx_bytes; + stats->rx_bytes = priv->stat_rx_bytes; + + return stats; +} + +static const struct net_device_ops ec_bhf_netdev_ops = { + .ndo_start_xmit = ec_bhf_start_xmit, + .ndo_open = ec_bhf_open, + .ndo_stop = ec_bhf_stop, + .ndo_get_stats64 = ec_bhf_get_stats, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr +}; + +static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct net_device *net_dev; + struct ec_bhf_priv *priv; + void * __iomem dma_io; + void * __iomem io; + int err = 0; + + err = pci_enable_device(dev); + if (err) + return err; + + pci_set_master(dev); + + err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&dev->dev, + "Required dma mask not supported, failed to initialize device\n"); + err = -EIO; + goto err_disable_dev; + } + + err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&dev->dev, + "Required dma mask not supported, failed to initialize device\n"); + goto err_disable_dev; + } + + err = pci_request_regions(dev, "ec_bhf"); + if (err) { + dev_err(&dev->dev, "Failed to request pci memory regions\n"); + goto err_disable_dev; + } + + io = pci_iomap(dev, 0, 0); + if (!io) { + dev_err(&dev->dev, "Failed to map pci card memory bar 0"); + err = -EIO; + goto err_release_regions; + } + + dma_io = pci_iomap(dev, 2, 0); + if (!dma_io) { + dev_err(&dev->dev, "Failed to map pci card memory bar 2"); + err = -EIO; + goto err_unmap; + } + + net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv)); + if (net_dev == 0) { + err = -ENOMEM; + goto err_unmap_dma_io; + } + + pci_set_drvdata(dev, net_dev); + SET_NETDEV_DEV(net_dev, &dev->dev); + + net_dev->features = 0; + net_dev->flags |= IFF_NOARP; + + net_dev->netdev_ops = &ec_bhf_netdev_ops; + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + priv->io = io; + priv->dma_io = dma_io; + priv->dev = dev; + + err = ec_bhf_setup_offsets(priv); + if (err < 0) + goto err_free_net_dev; + + memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); + + dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n", + net_dev->dev_addr); + + err = register_netdev(net_dev); + if (err < 0) + goto err_free_net_dev; + + return 0; + +err_free_net_dev: + free_netdev(net_dev); +err_unmap_dma_io: + pci_iounmap(dev, dma_io); +err_unmap: + pci_iounmap(dev, io); +err_release_regions: + pci_release_regions(dev); +err_disable_dev: + pci_clear_master(dev); + pci_disable_device(dev); + + return err; +} + +static void ec_bhf_remove(struct pci_dev *dev) +{ + struct net_device *net_dev = pci_get_drvdata(dev); + struct ec_bhf_priv *priv = netdev_priv(net_dev); + + unregister_netdev(net_dev); + free_netdev(net_dev); + + pci_iounmap(dev, priv->dma_io); + pci_iounmap(dev, priv->io); + pci_release_regions(dev); + pci_clear_master(dev); + pci_disable_device(dev); +} + +static struct pci_driver pci_driver = { + .name = "ec_bhf", + .id_table = ids, + .probe = ec_bhf_probe, + .remove = ec_bhf_remove, +}; + +static int __init ec_bhf_init(void) +{ + return pci_register_driver(&pci_driver); +} + +static void __exit ec_bhf_exit(void) +{ + pci_unregister_driver(&pci_driver); +} + +module_init(ec_bhf_init); +module_exit(ec_bhf_exit); + +module_param(polling_frequency, long, S_IRUGO); +MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns"); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>"); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a18645407d2..dc19bc5dec7 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev) if (status) goto err; + /* On some BE3 FW versions, after a HW reset, + * interrupts will remain disabled for each function. + * So, explicitly enable interrupts + */ + be_intr_set(adapter, true); + /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 9125d9abf09..e2d42475b00 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -121,6 +121,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id); static irqreturn_t gfar_transmit(int irq, void *dev_id); static irqreturn_t gfar_interrupt(int irq, void *dev_id); static void adjust_link(struct net_device *dev); +static noinline void gfar_update_link_state(struct gfar_private *priv); static int init_phy(struct net_device *dev); static int gfar_probe(struct platform_device *ofdev); static int gfar_remove(struct platform_device *ofdev); @@ -3076,41 +3077,6 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id) return IRQ_HANDLED; } -static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) -{ - struct phy_device *phydev = priv->phydev; - u32 val = 0; - - if (!phydev->duplex) - return val; - - if (!priv->pause_aneg_en) { - if (priv->tx_pause_en) - val |= MACCFG1_TX_FLOW; - if (priv->rx_pause_en) - val |= MACCFG1_RX_FLOW; - } else { - u16 lcl_adv, rmt_adv; - u8 flowctrl; - /* get link partner capabilities */ - rmt_adv = 0; - if (phydev->pause) - rmt_adv = LPA_PAUSE_CAP; - if (phydev->asym_pause) - rmt_adv |= LPA_PAUSE_ASYM; - - lcl_adv = mii_advertise_flowctrl(phydev->advertising); - - flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); - if (flowctrl & FLOW_CTRL_TX) - val |= MACCFG1_TX_FLOW; - if (flowctrl & FLOW_CTRL_RX) - val |= MACCFG1_RX_FLOW; - } - - return val; -} - /* Called every time the controller might need to be made * aware of new link state. The PHY code conveys this * information through variables in the phydev structure, and this @@ -3120,83 +3086,12 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) static void adjust_link(struct net_device *dev) { struct gfar_private *priv = netdev_priv(dev); - struct gfar __iomem *regs = priv->gfargrp[0].regs; struct phy_device *phydev = priv->phydev; - int new_state = 0; - if (test_bit(GFAR_RESETTING, &priv->state)) - return; - - if (phydev->link) { - u32 tempval1 = gfar_read(®s->maccfg1); - u32 tempval = gfar_read(®s->maccfg2); - u32 ecntrl = gfar_read(®s->ecntrl); - - /* Now we make sure that we can be in full duplex mode. - * If not, we operate in half-duplex mode. - */ - if (phydev->duplex != priv->oldduplex) { - new_state = 1; - if (!(phydev->duplex)) - tempval &= ~(MACCFG2_FULL_DUPLEX); - else - tempval |= MACCFG2_FULL_DUPLEX; - - priv->oldduplex = phydev->duplex; - } - - if (phydev->speed != priv->oldspeed) { - new_state = 1; - switch (phydev->speed) { - case 1000: - tempval = - ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); - - ecntrl &= ~(ECNTRL_R100); - break; - case 100: - case 10: - tempval = - ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); - - /* Reduced mode distinguishes - * between 10 and 100 - */ - if (phydev->speed == SPEED_100) - ecntrl |= ECNTRL_R100; - else - ecntrl &= ~(ECNTRL_R100); - break; - default: - netif_warn(priv, link, dev, - "Ack! Speed (%d) is not 10/100/1000!\n", - phydev->speed); - break; - } - - priv->oldspeed = phydev->speed; - } - - tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); - tempval1 |= gfar_get_flowctrl_cfg(priv); - - gfar_write(®s->maccfg1, tempval1); - gfar_write(®s->maccfg2, tempval); - gfar_write(®s->ecntrl, ecntrl); - - if (!priv->oldlink) { - new_state = 1; - priv->oldlink = 1; - } - } else if (priv->oldlink) { - new_state = 1; - priv->oldlink = 0; - priv->oldspeed = 0; - priv->oldduplex = -1; - } - - if (new_state && netif_msg_link(priv)) - phy_print_status(phydev); + if (unlikely(phydev->link != priv->oldlink || + phydev->duplex != priv->oldduplex || + phydev->speed != priv->oldspeed)) + gfar_update_link_state(priv); } /* Update the hash table based on the current list of multicast @@ -3442,6 +3337,114 @@ static irqreturn_t gfar_error(int irq, void *grp_id) return IRQ_HANDLED; } +static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) +{ + struct phy_device *phydev = priv->phydev; + u32 val = 0; + + if (!phydev->duplex) + return val; + + if (!priv->pause_aneg_en) { + if (priv->tx_pause_en) + val |= MACCFG1_TX_FLOW; + if (priv->rx_pause_en) + val |= MACCFG1_RX_FLOW; + } else { + u16 lcl_adv, rmt_adv; + u8 flowctrl; + /* get link partner capabilities */ + rmt_adv = 0; + if (phydev->pause) + rmt_adv = LPA_PAUSE_CAP; + if (phydev->asym_pause) + rmt_adv |= LPA_PAUSE_ASYM; + + lcl_adv = mii_advertise_flowctrl(phydev->advertising); + + flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); + if (flowctrl & FLOW_CTRL_TX) + val |= MACCFG1_TX_FLOW; + if (flowctrl & FLOW_CTRL_RX) + val |= MACCFG1_RX_FLOW; + } + + return val; +} + +static noinline void gfar_update_link_state(struct gfar_private *priv) +{ + struct gfar __iomem *regs = priv->gfargrp[0].regs; + struct phy_device *phydev = priv->phydev; + + if (unlikely(test_bit(GFAR_RESETTING, &priv->state))) + return; + + if (phydev->link) { + u32 tempval1 = gfar_read(®s->maccfg1); + u32 tempval = gfar_read(®s->maccfg2); + u32 ecntrl = gfar_read(®s->ecntrl); + + if (phydev->duplex != priv->oldduplex) { + if (!(phydev->duplex)) + tempval &= ~(MACCFG2_FULL_DUPLEX); + else + tempval |= MACCFG2_FULL_DUPLEX; + + priv->oldduplex = phydev->duplex; + } + + if (phydev->speed != priv->oldspeed) { + switch (phydev->speed) { + case 1000: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII); + + ecntrl &= ~(ECNTRL_R100); + break; + case 100: + case 10: + tempval = + ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII); + + /* Reduced mode distinguishes + * between 10 and 100 + */ + if (phydev->speed == SPEED_100) + ecntrl |= ECNTRL_R100; + else + ecntrl &= ~(ECNTRL_R100); + break; + default: + netif_warn(priv, link, priv->ndev, + "Ack! Speed (%d) is not 10/100/1000!\n", + phydev->speed); + break; + } + + priv->oldspeed = phydev->speed; + } + + tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); + tempval1 |= gfar_get_flowctrl_cfg(priv); + + gfar_write(®s->maccfg1, tempval1); + gfar_write(®s->maccfg2, tempval); + gfar_write(®s->ecntrl, ecntrl); + + if (!priv->oldlink) + priv->oldlink = 1; + + } else if (priv->oldlink) { + priv->oldlink = 0; + priv->oldspeed = 0; + priv->oldduplex = -1; + } + + if (netif_msg_link(priv)) + phy_print_status(phydev); +} + static struct of_device_id gfar_match[] = { { diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 891dbee6e6c..76d70708f86 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -533,6 +533,9 @@ static int gfar_spauseparam(struct net_device *dev, struct gfar __iomem *regs = priv->gfargrp[0].regs; u32 oldadv, newadv; + if (!phydev) + return -ENODEV; + if (!(phydev->supported & SUPPORTED_Pause) || (!(phydev->supported & SUPPORTED_Asym_Pause) && (epause->rx_pause != epause->tx_pause))) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 9866f264f55..f0bbd4246d7 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -186,7 +186,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) { u16 phy_reg = 0; u32 phy_id = 0; - s32 ret_val; + s32 ret_val = 0; u16 retry_count; u32 mac_reg = 0; @@ -217,11 +217,13 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ - hw->phy.ops.release(hw); - ret_val = e1000_set_mdio_slow_mode_hv(hw); - if (!ret_val) - ret_val = e1000e_get_phy_id(hw); - hw->phy.ops.acquire(hw); + if (hw->mac.type < e1000_pch_lpt) { + hw->phy.ops.release(hw); + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (!ret_val) + ret_val = e1000e_get_phy_id(hw); + hw->phy.ops.acquire(hw); + } if (ret_val) return false; @@ -842,6 +844,17 @@ s32 e1000_set_eee_pchlan(struct e1000_hw *hw) } } + if (hw->phy.type == e1000_phy_82579) { + ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + &data); + if (ret_val) + goto release; + + data &= ~I82579_LPI_100_PLL_SHUT; + ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + data); + } + /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); if (ret_val) @@ -1314,14 +1327,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) return ret_val; } - /* When connected at 10Mbps half-duplex, 82579 parts are excessively + /* When connected at 10Mbps half-duplex, some parts are excessively * aggressive resulting in many collisions. To avoid this, increase * the IPG and reduce Rx latency in the PHY. */ - if ((hw->mac.type == e1000_pch2lan) && link) { + if (((hw->mac.type == e1000_pch2lan) || + (hw->mac.type == e1000_pch_lpt)) && link) { u32 reg; reg = er32(STATUS); if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { + u16 emi_addr; + reg = er32(TIPG); reg &= ~E1000_TIPG_IPGT_MASK; reg |= 0xFF; @@ -1332,8 +1348,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; - ret_val = - e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0); hw->phy.ops.release(hw); @@ -2493,51 +2513,44 @@ release: * e1000_k1_gig_workaround_lv - K1 Si workaround * @hw: pointer to the HW structure * - * Workaround to set the K1 beacon duration for 82579 parts + * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps + * Disable K1 in 1000Mbps and 100Mbps **/ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) { s32 ret_val = 0; u16 status_reg = 0; - u32 mac_reg; - u16 phy_reg; if (hw->mac.type != e1000_pch2lan) return 0; - /* Set K1 beacon duration based on 1Gbps speed or otherwise */ + /* Set K1 beacon duration based on 10Mbs speed */ ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); if (ret_val) return ret_val; if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { - mac_reg = er32(FEXTNVM4); - mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; - - ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); - if (ret_val) - return ret_val; - - if (status_reg & HV_M_STATUS_SPEED_1000) { + if (status_reg & + (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { u16 pm_phy_reg; - mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; - phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; - /* LV 1G Packet drop issue wa */ + /* LV 1G/100 Packet drop issue wa */ ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); if (ret_val) return ret_val; - pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; + pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); if (ret_val) return ret_val; } else { + u32 mac_reg; + + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; - phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; + ew32(FEXTNVM4, mac_reg); } - ew32(FEXTNVM4, mac_reg); - ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); } return ret_val; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index bead50f9187..5515126c81c 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -232,16 +232,19 @@ #define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ #define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ #define I82579_RX_CONFIG 0x3412 /* Receive configuration */ +#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */ #define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ #define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ #define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ #define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ #define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ #define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ +#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */ #define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ #define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ #define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ #define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ +#define I217_RX_CONFIG 0xB20C /* Receive configuration */ #define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ #define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d50c91e5052..3e69386add0 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1165,7 +1165,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) dev_kfree_skb_any(adapter->tx_hwtstamp_skb); adapter->tx_hwtstamp_skb = NULL; adapter->tx_hwtstamp_timeouts++; - e_warn("clearing Tx timestamp hang"); + e_warn("clearing Tx timestamp hang\n"); } else { /* reschedule to check later */ schedule_work(&adapter->tx_hwtstamp_work); @@ -5687,7 +5687,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, static int e1000_change_mtu(struct net_device *netdev, int new_mtu) { struct e1000_adapter *adapter = netdev_priv(netdev); - int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN; /* Jumbo frame support */ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && @@ -6235,6 +6235,7 @@ static int __e1000_resume(struct pci_dev *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP static int e1000e_pm_thaw(struct device *dev) { struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); @@ -6255,7 +6256,6 @@ static int e1000e_pm_thaw(struct device *dev) return 0; } -#ifdef CONFIG_PM_SLEEP static int e1000e_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h index 3841bccf058..537d2780b40 100644 --- a/drivers/net/ethernet/intel/e1000e/phy.h +++ b/drivers/net/ethernet/intel/e1000e/phy.h @@ -164,6 +164,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw); #define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 #define HV_M_STATUS_SPEED_MASK 0x0300 #define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_SPEED_100 0x0100 #define HV_M_STATUS_LINK_UP 0x0040 #define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 861b722c267..cf0761f0891 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2897,12 +2897,9 @@ static irqreturn_t i40e_intr(int irq, void *data) u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { - ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; i40e_ptp_tx_hwtstamp(pf); - prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK; } - - wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat); } /* If a critical error is pending we have no choice but to reset the @@ -4271,6 +4268,14 @@ static int i40e_open(struct net_device *netdev) if (err) return err; + /* configure global TSO hardware offload settings */ + wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | + TCP_FLAG_FIN) >> 16); + wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | + TCP_FLAG_FIN | + TCP_FLAG_CWR) >> 16); + wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); + #ifdef CONFIG_I40E_VXLAN vxlan_get_rx_port(netdev); #endif @@ -6712,6 +6717,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_IPV6_CSUM | NETIF_F_TSO | + NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_RXCSUM | NETIF_F_NTUPLE | diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 262bdf11d22..81299189a47 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -160,7 +160,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw) udelay(5); } if (ret_code == I40E_ERR_TIMEOUT) - hw_dbg(hw, "Done bit in GLNVM_SRCTL not set"); + hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n"); return ret_code; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index e33ec6c842b..e61e6372080 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -239,7 +239,7 @@ static void i40e_ptp_tx_work(struct work_struct *work) dev_kfree_skb_any(pf->ptp_tx_skb); pf->ptp_tx_skb = NULL; pf->tx_hwtstamp_timeouts++; - dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang"); + dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n"); return; } @@ -321,7 +321,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi) pf->last_rx_ptp_check = jiffies; pf->rx_hwtstamp_cleared++; dev_warn(&vsi->back->pdev->dev, - "%s: clearing Rx timestamp hang", + "%s: clearing Rx timestamp hang\n", __func__); } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0f5d96ad281..9478ddc66ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -418,7 +418,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi, } break; default: - dev_info(&pf->pdev->dev, "Could not specify spec type %d", + dev_info(&pf->pdev->dev, "Could not specify spec type %d\n", input->flow_type); ret = -EINVAL; } @@ -478,7 +478,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT; } } else { - dev_info(&pdev->dev, "FD filter programming error"); + dev_info(&pdev->dev, "FD filter programming error\n"); } } else if (error == (0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) { @@ -1713,9 +1713,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, I40E_TX_FLAGS_VLAN_PRIO_SHIFT; if (tx_flags & I40E_TX_FLAGS_SW_VLAN) { struct vlan_ethhdr *vhdr; - if (skb_header_cloned(skb) && - pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) - return -ENOMEM; + int rc; + + rc = skb_cow_head(skb, 0); + if (rc < 0) + return rc; vhdr = (struct vlan_ethhdr *)skb->data; vhdr->h_vlan_TCI = htons(tx_flags >> I40E_TX_FLAGS_VLAN_SHIFT); @@ -1743,20 +1745,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling) { u32 cd_cmd, cd_tso_len, cd_mss; + struct ipv6hdr *ipv6h; struct tcphdr *tcph; struct iphdr *iph; u32 l4len; int err; - struct ipv6hdr *ipv6h; if (!skb_is_gso(skb)) return 0; - if (skb_header_cloned(skb)) { - err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); - if (err) - return err; - } + err = skb_cow_head(skb, 0); + if (err < 0) + return err; if (protocol == htons(ETH_P_IP)) { iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c index db963397cc2..f67f8a170b9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_i210.c +++ b/drivers/net/ethernet/intel/igb/e1000_i210.c @@ -365,7 +365,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); if (word_address == address) { *data = INVM_DWORD_TO_WORD_DATA(invm_dword); - hw_dbg("Read INVM Word 0x%02x = %x", + hw_dbg("Read INVM Word 0x%02x = %x\n", address, *data); status = E1000_SUCCESS; break; diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 5910a932ea7..1e0c404db81 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -929,11 +929,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) */ if (hw->fc.requested_mode == e1000_fc_full) { hw->fc.current_mode = e1000_fc_full; - hw_dbg("Flow Control = FULL.\r\n"); + hw_dbg("Flow Control = FULL.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = " - "RX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = RX PAUSE frames only.\n"); } } /* For receiving PAUSE frames ONLY. @@ -948,7 +947,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_tx_pause; - hw_dbg("Flow Control = TX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = TX PAUSE frames only.\n"); } /* For transmitting PAUSE frames ONLY. * @@ -962,7 +961,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = RX PAUSE frames only.\n"); } /* Per the IEEE spec, at this point flow control should be * disabled. However, we want to consider that we could @@ -988,10 +987,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw) (hw->fc.requested_mode == e1000_fc_tx_pause) || (hw->fc.strict_ieee)) { hw->fc.current_mode = e1000_fc_none; - hw_dbg("Flow Control = NONE.\r\n"); + hw_dbg("Flow Control = NONE.\n"); } else { hw->fc.current_mode = e1000_fc_rx_pause; - hw_dbg("Flow Control = RX PAUSE frames only.\r\n"); + hw_dbg("Flow Control = RX PAUSE frames only.\n"); } /* Now we need to do one last check... If we auto- diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index fb98d4602f9..16430a8440f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5193,8 +5193,10 @@ void igb_update_stats(struct igb_adapter *adapter, rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { - u32 rqdpc = rd32(E1000_RQDPC(i)); struct igb_ring *ring = adapter->rx_ring[i]; + u32 rqdpc = rd32(E1000_RQDPC(i)); + if (hw->mac.type >= e1000_i210) + wr32(E1000_RQDPC(i), 0); if (rqdpc) { ring->rx_stats.drops += rqdpc; diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 9209d652e1c..ab25e49365f 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -389,7 +389,7 @@ static void igb_ptp_tx_work(struct work_struct *work) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state); adapter->tx_hwtstamp_timeouts++; - dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang"); + dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n"); return; } @@ -451,7 +451,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter) rd32(E1000_RXSTMPH); adapter->last_rx_ptp_check = jiffies; adapter->rx_hwtstamp_cleared++; - dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang"); + dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n"); } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 1a12c1dd7a2..c6c4ca7d68e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -256,7 +256,6 @@ struct ixgbe_ring { struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info; }; - unsigned long last_rx_timestamp; unsigned long state; u8 __iomem *tail; dma_addr_t dma; /* phys. address of descriptor ring */ @@ -770,6 +769,7 @@ struct ixgbe_adapter { unsigned long ptp_tx_start; unsigned long last_overflow_check; unsigned long last_rx_ptp_check; + unsigned long last_rx_timestamp; spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; @@ -944,24 +944,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter); void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); -void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb); -static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, - union ixgbe_adv_rx_desc *rx_desc, - struct sk_buff *skb) -{ - if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) - return; - - __ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); - - /* - * Update the last_rx_timestamp timer in order to enable watchdog check - * for error case of latched timestamp on a dropped packet. - */ - rx_ring->last_rx_timestamp = jiffies; -} - +void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb); int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 24fba39e194..981b8a7b100 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -1195,7 +1195,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, */ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; - hw_dbg(hw, "Detected EEPROM page size = %d words.", + hw_dbg(hw, "Detected EEPROM page size = %d words.\n", hw->eeprom.word_page_size); out: return status; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c4c526b7f99..d62e7a25cf9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1664,7 +1664,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ixgbe_rx_checksum(rx_ring, rx_desc, skb); - ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); + if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) + ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 23f765263f1..a76af8e28a0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -536,7 +536,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) if (time_out == max_time_out) { status = IXGBE_ERR_LINK_SETUP; - hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out"); + hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n"); } return status; @@ -745,7 +745,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) if (time_out == max_time_out) { status = IXGBE_ERR_LINK_SETUP; - hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out"); + hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n"); } return status; @@ -1175,7 +1175,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) status = 0; } else { if (hw->allow_unsupported_sfp) { - e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules."); + e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); status = 0; } else { hw_dbg(hw, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 63515a6f67f..8902ae68345 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -435,10 +435,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; - struct ixgbe_ring *rx_ring; u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); unsigned long rx_event; - int n; /* if we don't have a valid timestamp in the registers, just update the * timeout counter and exit @@ -450,18 +448,15 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) /* determine the most recent watchdog or rx_timestamp event */ rx_event = adapter->last_rx_ptp_check; - for (n = 0; n < adapter->num_rx_queues; n++) { - rx_ring = adapter->rx_ring[n]; - if (time_after(rx_ring->last_rx_timestamp, rx_event)) - rx_event = rx_ring->last_rx_timestamp; - } + if (time_after(adapter->last_rx_timestamp, rx_event)) + rx_event = adapter->last_rx_timestamp; /* only need to read the high RXSTMP register to clear the lock */ if (time_is_before_jiffies(rx_event + 5*HZ)) { IXGBE_READ_REG(hw, IXGBE_RXSTMPH); adapter->last_rx_ptp_check = jiffies; - e_warn(drv, "clearing RX Timestamp hang"); + e_warn(drv, "clearing RX Timestamp hang\n"); } } @@ -517,7 +512,7 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) dev_kfree_skb_any(adapter->ptp_tx_skb); adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); - e_warn(drv, "clearing Tx Timestamp hang"); + e_warn(drv, "clearing Tx Timestamp hang\n"); return; } @@ -530,35 +525,22 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) } /** - * __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp - * @q_vector: structure containing interrupt and ring information + * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp + * @adapter: pointer to adapter struct * @skb: particular skb to send timestamp with * * if the timestamp is valid, we convert it into the timecounter ns * value, then store that result into the shhwtstamps structure which * is passed up the network stack */ -void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, - struct sk_buff *skb) +void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) { - struct ixgbe_adapter *adapter; - struct ixgbe_hw *hw; + struct ixgbe_hw *hw = &adapter->hw; struct skb_shared_hwtstamps *shhwtstamps; u64 regval = 0, ns; u32 tsyncrxctl; unsigned long flags; - /* we cannot process timestamps on a ring without a q_vector */ - if (!q_vector || !q_vector->adapter) - return; - - adapter = q_vector->adapter; - hw = &adapter->hw; - - /* - * Read the tsyncrxctl register afterwards in order to prevent taking an - * I/O hit on every packet. - */ tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) return; @@ -566,13 +548,17 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector, regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; - spin_lock_irqsave(&adapter->tmreg_lock, flags); ns = timecounter_cyc2time(&adapter->tc, regval); spin_unlock_irqrestore(&adapter->tmreg_lock, flags); shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = ns_to_ktime(ns); + + /* Update the last_rx_timestamp timer in order to enable watchdog check + * for error case of latched timestamp on a dropped packet. + */ + adapter->last_rx_timestamp = jiffies; } int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index b0c6050479e..b78378cea5e 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme, return idx; } -static void +static int jme_fill_tx_map(struct pci_dev *pdev, struct txdesc *txdesc, struct jme_buffer_info *txbi, @@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev, len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dmaaddr))) + return -EINVAL; + pci_dma_sync_single_for_device(pdev, dmaaddr, len, @@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev, txbi->mapping = dmaaddr; txbi->len = len; + return 0; } -static void +static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) +{ + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; + int mask = jme->tx_ring_mask; + int j; + + for (j = 0 ; j < count ; j++) { + ctxbi = txbi + ((startidx + j + 2) & (mask)); + pci_unmap_page(jme->pdev, + ctxbi->mapping, + ctxbi->len, + PCI_DMA_TODEVICE); + + ctxbi->mapping = 0; + ctxbi->len = 0; + } + +} + +static int jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) { struct jme_ring *txring = &(jme->txring[0]); @@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) int mask = jme->tx_ring_mask; const struct skb_frag_struct *frag; u32 len; + int ret = 0; for (i = 0 ; i < nr_frags ; ++i) { frag = &skb_shinfo(skb)->frags[i]; ctxdesc = txdesc + ((idx + i + 2) & (mask)); ctxbi = txbi + ((idx + i + 2) & (mask)); - jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, + ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, skb_frag_page(frag), frag->page_offset, skb_frag_size(frag), hidma); + if (ret) { + jme_drop_tx_map(jme, idx, i); + goto out; + } + } len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; ctxdesc = txdesc + ((idx + 1) & (mask)); ctxbi = txbi + ((idx + 1) & (mask)); - jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), + ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), offset_in_page(skb->data), len, hidma); + if (ret) + jme_drop_tx_map(jme, idx, i); + +out: + return ret; } + static int jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) { @@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) struct txdesc *txdesc; struct jme_buffer_info *txbi; u8 flags; + int ret = 0; txdesc = (struct txdesc *)txring->desc + idx; txbi = txring->bufinf + idx; @@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) jme_tx_csum(jme, skb, &flags); jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); - jme_map_tx_skb(jme, skb, idx); + ret = jme_map_tx_skb(jme, skb, idx); + if (ret) + return ret; + txdesc->desc1.flags = flags; /* * Set tx buffer info after telling NIC to send @@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } - jme_fill_tx_desc(jme, skb, idx); + if (jme_fill_tx_desc(jme, skb, idx)) + return NETDEV_TX_OK; jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0 | diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c index b161a525fc5..9d5ced263a5 100644 --- a/drivers/net/ethernet/marvell/mvmdio.c +++ b/drivers/net/ethernet/marvell/mvmdio.c @@ -232,7 +232,7 @@ static int orion_mdio_probe(struct platform_device *pdev) clk_prepare_enable(dev->clk); dev->err_interrupt = platform_get_irq(pdev, 0); - if (dev->err_interrupt != -ENXIO) { + if (dev->err_interrupt > 0) { ret = devm_request_irq(&pdev->dev, dev->err_interrupt, orion_mdio_err_irq, IRQF_SHARED, pdev->name, dev); @@ -241,6 +241,9 @@ static int orion_mdio_probe(struct platform_device *pdev) writel(MVMDIO_ERR_INT_SMI_DONE, dev->regs + MVMDIO_ERR_INT_MASK); + + } else if (dev->err_interrupt == -EPROBE_DEFER) { + return -EPROBE_DEFER; } mutex_init(&dev->lock); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 78099eab767..92d3249f63f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = { }, { .opcode = MLX4_CMD_UPDATE_QP, - .has_inbox = false, + .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = mlx4_CMD_EPERM_wrapper + .wrapper = mlx4_UPDATE_QP_wrapper }, { .opcode = MLX4_CMD_GET_OP_REQ, diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index cef267e24f9..7cf9dadcb47 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -754,10 +754,10 @@ static void mlx4_request_modules(struct mlx4_dev *dev) has_eth_port = true; } - if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) - request_module_nowait(IB_DRV_NAME); if (has_eth_port) request_module_nowait(EN_DRV_NAME); + if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) + request_module_nowait(IB_DRV_NAME); } /* @@ -2440,7 +2440,8 @@ slave_start: * No return code for this call, just warn the user in case of PCI * express device capabilities are under-satisfied by the bus. */ - mlx4_check_pcie_caps(dev); + if (!mlx4_is_slave(dev)) + mlx4_check_pcie_caps(dev); /* In master functions, the communication channel must be initialized * after obtaining its address from fw */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index f9c46510196..212cea440f9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index cfcad26ed40..b5b3549b0c8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -1106,6 +1106,9 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, } if (found_ix >= 0) { + /* Calculate a slave_gid which is the slave number in the gid + * table and not a globally unique slave number. + */ if (found_ix < MLX4_ROCE_PF_GIDS) slave_gid = 0; else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) * @@ -1118,41 +1121,43 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) / (vf_gids / num_vfs)) + vf_gids % num_vfs + 1; + /* Calculate the globally unique slave id */ if (slave_gid) { struct mlx4_active_ports exclusive_ports; struct mlx4_active_ports actv_ports; struct mlx4_slaves_pport slaves_pport_actv; unsigned max_port_p_one; - int num_slaves_before = 1; + int num_vfs_before = 0; + int candidate_slave_gid; + /* Calculate how many VFs are on the previous port, if exists */ for (i = 1; i < port; i++) { bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); - set_bit(i, exclusive_ports.ports); + set_bit(i - 1, exclusive_ports.ports); slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( dev, &exclusive_ports); - num_slaves_before += bitmap_weight( + num_vfs_before += bitmap_weight( slaves_pport_actv.slaves, dev->num_vfs + 1); } - if (slave_gid < num_slaves_before) { - bitmap_zero(exclusive_ports.ports, dev->caps.num_ports); - set_bit(port - 1, exclusive_ports.ports); - slaves_pport_actv = - mlx4_phys_to_slaves_pport_actv( - dev, &exclusive_ports); - slave_gid += bitmap_weight( - slaves_pport_actv.slaves, - dev->num_vfs + 1) - - num_slaves_before; - } - actv_ports = mlx4_get_active_ports(dev, slave_gid); + /* candidate_slave_gid isn't necessarily the correct slave, but + * it has the same number of ports and is assigned to the same + * ports as the real slave we're looking for. On dual port VF, + * slave_gid = [single port VFs on port <port>] + + * [offset of the current slave from the first dual port VF] + + * 1 (for the PF). + */ + candidate_slave_gid = slave_gid + num_vfs_before; + + actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid); max_port_p_one = find_first_bit( actv_ports.ports, dev->caps.num_ports) + bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1; + /* Calculate the real slave number */ for (i = 1; i < max_port_p_one; i++) { if (i == port) continue; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 61d64ebffd5..fbd32af89c7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -389,6 +389,41 @@ err_icm: EXPORT_SYMBOL_GPL(mlx4_qp_alloc); +#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC +int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, + enum mlx4_update_qp_attr attr, + struct mlx4_update_qp_params *params) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *cmd; + u64 pri_addr_path_mask = 0; + int err = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cmd = (struct mlx4_update_qp_context *)mailbox->buf; + + if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS)) + return -EINVAL; + + if (attr & MLX4_UPDATE_QP_SMAC) { + pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX; + cmd->qp_context.pri_path.grh_mylmc = params->smac_index; + } + + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); + + err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_update_qp); + void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 3b5f53ef29b..8f1254a7983 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3733,6 +3733,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, } } +static int mlx4_adjust_port(struct mlx4_dev *dev, int slave, + u8 *gid, enum mlx4_protocol prot) +{ + int real_port; + + if (prot != MLX4_PROT_ETH) + return 0; + + if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 || + dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { + real_port = mlx4_slave_convert_port(dev, slave, gid[5]); + if (real_port < 0) + return -EINVAL; + gid[5] = real_port; + } + + return 0; +} + int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, @@ -3768,6 +3787,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, if (err) goto ex_detach; } else { + err = mlx4_adjust_port(dev, slave, gid, prot); + if (err) + goto ex_put; + err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id); if (err) goto ex_put; @@ -3872,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave, } +#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX) +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd_info) +{ + int err; + u32 qpn = vhcr->in_modifier & 0xffffff; + struct res_qp *rqp; + u64 mac; + unsigned port; + u64 pri_addr_path_mask; + struct mlx4_update_qp_context *cmd; + int smac_index; + + cmd = (struct mlx4_update_qp_context *)inbox->buf; + + pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); + if (cmd->qp_mask || cmd->secondary_addr_path_mask || + (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) + return -EPERM; + + /* Just change the smac for the QP */ + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) { + mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); + return err; + } + + port = (rqp->sched_queue >> 6 & 1) + 1; + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } + + err = mlx4_cmd(dev, inbox->dma, + vhcr->in_modifier, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); + goto err_mac; + } + +err_mac: + put_res(dev, slave, qpn, RES_QP); + return err; +} + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 7b52a88923e..f785d01c7d1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) tx_ring->producer; } -static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, - struct net_device *netdev) -{ - int err; - - netdev->num_tx_queues = adapter->drv_tx_rings; - netdev->real_num_tx_queues = adapter->drv_tx_rings; - - err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); - if (err) - netdev_err(netdev, "failed to set %d Tx queues\n", - adapter->drv_tx_rings); - - return err; -} - struct qlcnic_nic_template { int (*config_bridged_mode) (struct qlcnic_adapter *, u32); int (*config_led) (struct qlcnic_adapter *, u32, u32); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index dbf75393f75..7e55e88a81b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter) ahw->max_uc_count = count; } +static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, + u8 tx_queues, u8 rx_queues) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (tx_queues) { + err = netif_set_real_num_tx_queues(netdev, tx_queues); + if (err) { + netdev_err(netdev, "failed to set %d Tx queues\n", + tx_queues); + return err; + } + } + + if (rx_queues) { + err = netif_set_real_num_rx_queues(netdev, rx_queues); + if (err) + netdev_err(netdev, "failed to set %d Rx queues\n", + rx_queues); + } + + return err; +} + int qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, int pci_using_dac) @@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; - err = qlcnic_set_real_num_queues(adapter, netdev); + err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings, + adapter->drv_sds_rings); if (err) return err; @@ -2374,6 +2400,14 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter) qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd); } +/* Reset firmware API lock */ +static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter) +{ + qlcnic_api_lock(adapter); + qlcnic_api_unlock(adapter); +} + + static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2476,6 +2510,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (qlcnic_82xx_check(adapter)) { qlcnic_check_vf(adapter, ent); adapter->portnum = adapter->ahw->pci_func; + qlcnic_reset_api_lock(adapter); err = qlcnic_start_firmware(adapter); if (err) { dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n" @@ -2934,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter) tx_ring->tx_stats.xmit_called, tx_ring->tx_stats.xmit_on, tx_ring->tx_stats.xmit_off); + + if (tx_ring->crb_intr_mask) + netdev_info(netdev, "crb_intr_mask=%d\n", + readl(tx_ring->crb_intr_mask)); + netdev_info(netdev, - "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", - readl(tx_ring->crb_intr_mask), + "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", readl(tx_ring->crb_cmd_producer), tx_ring->producer, tx_ring->sw_consumer, le32_to_cpu(*(tx_ring->hw_consumer))); @@ -3969,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, int qlcnic_setup_rings(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; + u8 tx_rings, rx_rings; int err; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; + tx_rings = adapter->drv_tss_rings; + rx_rings = adapter->drv_rss_rings; + netif_device_detach(netdev); + + err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings); + if (err) + goto done; + if (netif_running(netdev)) __qlcnic_down(adapter, netdev); @@ -3994,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter) return err; } - netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); + /* Check if we need to update real_num_{tx|rx}_queues because + * qlcnic_setup_intr() may change Tx/Rx rings size + */ + if ((tx_rings != adapter->drv_tx_rings) || + (rx_rings != adapter->drv_sds_rings)) { + err = qlcnic_set_real_num_queues(adapter, + adapter->drv_tx_rings, + adapter->drv_sds_rings); + if (err) + goto done; + } if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_initialize_nic(adapter, 1); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 0638c1810d5..6afe9c1f5ab 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -1370,7 +1370,7 @@ static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter, rsp = qlcnic_sriov_alloc_bc_trans(&trans); if (rsp) - return rsp; + goto free_cmd; rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND); if (rsp) @@ -1425,6 +1425,13 @@ err_out: cleanup_transaction: qlcnic_sriov_cleanup_transaction(trans); + +free_cmd: + if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) { + qlcnic_free_mbx_args(cmd); + kfree(cmd); + } + return rsp; } diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h index 6203c7d8550..45019649bbb 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h @@ -358,6 +358,8 @@ struct sxgbe_core_ops { /* Enable disable checksum offload operations */ void (*enable_rx_csum)(void __iomem *ioaddr); void (*disable_rx_csum)(void __iomem *ioaddr); + void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num); + void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num); }; const struct sxgbe_core_ops *sxgbe_get_core_ops(void); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c index c4da7a2b002..58c35692560 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c @@ -165,6 +165,26 @@ static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed) writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG); } +static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG); + reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num); + reg_val |= SXGBE_CORE_RXQ_ENABLE; + writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG); +} + +static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num) +{ + u32 reg_val; + + reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG); + reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num); + reg_val |= SXGBE_CORE_RXQ_DISABLE; + writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG); +} + static void sxgbe_set_eee_mode(void __iomem *ioaddr) { u32 ctrl; @@ -254,6 +274,8 @@ static const struct sxgbe_core_ops core_ops = { .set_eee_pls = sxgbe_set_eee_pls, .enable_rx_csum = sxgbe_enable_rx_csum, .disable_rx_csum = sxgbe_disable_rx_csum, + .enable_rxqueue = sxgbe_core_enable_rxqueue, + .disable_rxqueue = sxgbe_core_disable_rxqueue, }; const struct sxgbe_core_ops *sxgbe_get_core_ops(void) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c index e896dbbd2e1..2686bb5b676 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.c @@ -45,10 +45,10 @@ static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd, p->tdes23.tx_rd_des23.first_desc = is_fd; p->tdes23.tx_rd_des23.buf1_size = buf1_len; - p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len; + p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len; if (cksum) - p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full; + p->tdes23.tx_rd_des23.cksum_ctl = cic_full; } /* Set VLAN control information */ @@ -233,6 +233,12 @@ static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p) p->rdes23.rx_rd_des23.own_bit = 1; } +/* Set Interrupt on completion bit */ +static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p) +{ + p->rdes23.rx_rd_des23.int_on_com = 1; +} + /* Get the receive frame size */ static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p) { @@ -498,6 +504,7 @@ static const struct sxgbe_desc_ops desc_ops = { .init_rx_desc = sxgbe_init_rx_desc, .get_rx_owner = sxgbe_get_rx_owner, .set_rx_owner = sxgbe_set_rx_owner, + .set_rx_int_on_com = sxgbe_set_rx_int_on_com, .get_rx_frame_len = sxgbe_get_rx_frame_len, .get_rx_fd_status = sxgbe_get_rx_fd_status, .get_rx_ld_status = sxgbe_get_rx_ld_status, diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h index 838cb9fb0ea..18609324db7 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_desc.h @@ -39,22 +39,22 @@ struct sxgbe_tx_norm_desc { u32 int_on_com:1; /* TDES3 */ union { - u32 tcp_payload_len:18; + u16 tcp_payload_len; struct { u32 total_pkt_len:15; u32 reserved1:1; - u32 cksum_ctl:2; - } cksum_pktlen; + } pkt_len; } tx_pkt_len; - u32 tse_bit:1; - u32 tcp_hdr_len:4; - u32 sa_insert_ctl:3; - u32 crc_pad_ctl:2; - u32 last_desc:1; - u32 first_desc:1; - u32 ctxt_bit:1; - u32 own_bit:1; + u16 cksum_ctl:2; + u16 tse_bit:1; + u16 tcp_hdr_len:4; + u16 sa_insert_ctl:3; + u16 crc_pad_ctl:2; + u16 last_desc:1; + u16 first_desc:1; + u16 ctxt_bit:1; + u16 own_bit:1; } tx_rd_des23; /* tx write back Desc 2,3 */ @@ -70,25 +70,20 @@ struct sxgbe_tx_norm_desc { struct sxgbe_rx_norm_desc { union { - u32 rdes0; /* buf1 address */ - struct { + u64 rdes01; /* buf1 address */ + union { u32 out_vlan_tag:16; u32 in_vlan_tag:16; - } wb_rx_des0; - } rd_wb_des0; - - union { - u32 rdes1; /* buf2 address or buf1[63:32] */ - u32 rss_hash; /* Write-back RX */ - } rd_wb_des1; + u32 rss_hash; + } rx_wb_des01; + } rdes01; union { /* RX Read format Desc 2,3 */ struct{ /* RDES2 */ - u32 buf2_addr; + u64 buf2_addr:62; /* RDES3 */ - u32 buf2_hi_addr:30; u32 int_on_com:1; u32 own_bit:1; } rx_rd_des23; @@ -263,6 +258,9 @@ struct sxgbe_desc_ops { /* Set own bit */ void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p); + /* Set Interrupt on completion bit */ + void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p); + /* Get the receive frame size */ int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c index 4d989ff6c97..bb9b5b8afc5 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_dma.c @@ -23,21 +23,8 @@ /* DMA core initialization */ static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map) { - int retry_count = 10; u32 reg_val; - /* reset the DMA */ - writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG); - while (retry_count--) { - if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) & - SXGBE_DMA_SOFT_RESET)) - break; - mdelay(10); - } - - if (retry_count < 0) - return -EBUSY; - reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG); /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register. diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 27e8c824b20..82a9a983869 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -1076,6 +1076,9 @@ static int sxgbe_open(struct net_device *dev) /* Initialize the MAC Core */ priv->hw->mac->core_init(priv->ioaddr); + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num); + } /* Request the IRQ lines */ ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, @@ -1453,6 +1456,7 @@ static void sxgbe_rx_refill(struct sxgbe_priv_data *priv) /* Added memory barrier for RX descriptor modification */ wmb(); priv->hw->desc->set_rx_owner(p); + priv->hw->desc->set_rx_int_on_com(p); /* Added memory barrier for RX descriptor modification */ wmb(); } @@ -2070,6 +2074,24 @@ static int sxgbe_hw_init(struct sxgbe_priv_data * const priv) return 0; } +static int sxgbe_sw_reset(void __iomem *addr) +{ + int retry_count = 10; + + writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG); + while (retry_count--) { + if (!(readl(addr + SXGBE_DMA_MODE_REG) & + SXGBE_DMA_SOFT_RESET)) + break; + mdelay(10); + } + + if (retry_count < 0) + return -EBUSY; + + return 0; +} + /** * sxgbe_drv_probe * @device: device pointer @@ -2102,6 +2124,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device, priv->plat = plat_dat; priv->ioaddr = addr; + ret = sxgbe_sw_reset(priv->ioaddr); + if (ret) + goto error_free_netdev; + /* Verify driver arguments */ sxgbe_verify_args(); @@ -2218,9 +2244,14 @@ error_free_netdev: int sxgbe_drv_remove(struct net_device *ndev) { struct sxgbe_priv_data *priv = netdev_priv(ndev); + u8 queue_num; netdev_info(ndev, "%s: removing driver\n", __func__); + SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) { + priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num); + } + priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c index 01af2cbb479..43ccb4a6de1 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c @@ -27,7 +27,7 @@ #define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */ #define SXGBE_SMA_READ_CMD 0x03 /* read command */ #define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */ -#define SXGBE_MII_BUSY 0x00800000 /* mii busy */ +#define SXGBE_MII_BUSY 0x00400000 /* mii busy */ static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data) { @@ -147,6 +147,7 @@ int sxgbe_mdio_register(struct net_device *ndev) struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data; int err, phy_addr; int *irqlist; + bool phy_found = false; bool act; /* allocate the new mdio bus */ @@ -162,7 +163,7 @@ int sxgbe_mdio_register(struct net_device *ndev) irqlist = priv->mii_irq; /* assign mii bus fields */ - mdio_bus->name = "samsxgbe"; + mdio_bus->name = "sxgbe"; mdio_bus->read = &sxgbe_mdio_read; mdio_bus->write = &sxgbe_mdio_write; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x", @@ -216,13 +217,22 @@ int sxgbe_mdio_register(struct net_device *ndev) netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n", phy->phy_id, phy_addr, irq_str, dev_name(&phy->dev), act ? " active" : ""); + phy_found = true; } } + if (!phy_found) { + netdev_err(ndev, "PHY not found\n"); + goto phyfound_err; + } + priv->mii = mdio_bus; return 0; +phyfound_err: + err = -ENODEV; + mdiobus_unregister(mdio_bus); mdiobus_err: mdiobus_free(mdio_bus); return err; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h index 5a89acb4c50..56f8bf5a3f1 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h @@ -52,6 +52,10 @@ #define SXGBE_CORE_RX_CTL2_REG 0x00A8 #define SXGBE_CORE_RX_CTL3_REG 0x00AC +#define SXGBE_CORE_RXQ_ENABLE_MASK 0x0003 +#define SXGBE_CORE_RXQ_ENABLE 0x0002 +#define SXGBE_CORE_RXQ_DISABLE 0x0000 + /* Interrupt Registers */ #define SXGBE_CORE_INT_STATUS_REG 0x00B0 #define SXGBE_CORE_INT_ENABLE_REG 0x00B4 diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 32d969e857f..89b83e59e1d 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) efx->net_dev->rx_cpu_rmap = NULL; #endif - /* Disable MSI/MSI-X interrupts */ - efx_for_each_channel(channel, efx) - free_irq(channel->irq, &efx->msi_context[channel->channel]); - - /* Disable legacy interrupt */ - if (efx->legacy_irq) + if (EFX_INT_MODE_USE_MSI(efx)) { + /* Disable MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + free_irq(channel->irq, + &efx->msi_context[channel->channel]); + } else { + /* Disable legacy interrupt */ free_irq(efx->legacy_irq, efx); + } } /* Register dump */ diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index d1b4dca53a9..bcaa41af1e6 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -147,18 +147,19 @@ MODULE_ALIAS("platform:smc91x"); */ #define MII_DELAY 1 -#if SMC_DEBUG > 0 -#define DBG(n, dev, args...) \ - do { \ - if (SMC_DEBUG >= (n)) \ - netdev_dbg(dev, args); \ +#define DBG(n, dev, fmt, ...) \ + do { \ + if (SMC_DEBUG >= (n)) \ + netdev_dbg(dev, fmt, ##__VA_ARGS__); \ } while (0) -#define PRINTK(dev, args...) netdev_info(dev, args) -#else -#define DBG(n, dev, args...) do { } while (0) -#define PRINTK(dev, args...) netdev_dbg(dev, args) -#endif +#define PRINTK(dev, fmt, ...) \ + do { \ + if (SMC_DEBUG > 0) \ + netdev_info(dev, fmt, ##__VA_ARGS__); \ + else \ + netdev_dbg(dev, fmt, ##__VA_ARGS__); \ + } while (0) #if SMC_DEBUG > 3 static void PRINT_PKT(u_char *buf, int length) @@ -191,7 +192,7 @@ static void PRINT_PKT(u_char *buf, int length) pr_cont("\n"); } #else -#define PRINT_PKT(x...) do { } while (0) +static inline void PRINT_PKT(u_char *buf, int length) { } #endif @@ -1781,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp) int timeout = 20; unsigned long cookie; - DBG(2, dev, "%s: %s\n", CARDNAME, __func__); + DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__); cookie = probe_irq_on(); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d940034acdd..0f4841d2e8d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev) if (ret) { pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); - goto phy_error; + return ret; } } @@ -1779,8 +1779,6 @@ init_error: dma_desc_error: if (priv->phydev) phy_disconnect(priv->phydev); -phy_error: - clk_disable_unprepare(priv->stmmac_clk); return ret; } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index df8d383acf4..b9ac20f4265 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp) int i; for (i = 0; i < N_TX_RINGS; i++) - spin_lock(&cp->tx_lock[i]); + spin_lock_nested(&cp->tx_lock[i], i); } static inline void cas_lock_all(struct cas *cp) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 36aa109416c..c331b7ebc81 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); phyid = be32_to_cpup(parp+1); mdio = of_find_device_by_node(mdio_node); - - if (strncmp(mdio->name, "gpio", 4) == 0) { - /* GPIO bitbang MDIO driver attached */ - struct mii_bus *bus = dev_get_drvdata(&mdio->dev); - - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, bus->id, phyid); - } else { - /* davinci MDIO driver attached */ - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, mdio->name, phyid); + of_node_put(mdio_node); + if (!mdio) { + pr_err("Missing mdio platform device\n"); + return -EINVAL; } + snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), + PHY_ID_FMT, mdio->name, phyid); mac_addr = of_get_mac_address(slave_node); if (mac_addr) diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 31e55fba7ca..7918d5132c1 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -382,6 +382,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) if (skb_is_gso(skb)) goto do_lso; + if ((skb->ip_summed == CHECKSUM_NONE) || + (skb->ip_summed == CHECKSUM_UNNECESSARY)) + goto do_send; + rndis_msg_size += NDIS_CSUM_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, TCPIP_CHKSUM_PKTINFO); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 753a8c23d15..d53e299ae1d 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -263,11 +263,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; - __u8 ip_summed = skb->ip_summed; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = (void *)skb->data; - skb->ip_summed = CHECKSUM_UNNECESSARY; /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { @@ -285,7 +283,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) } xmit_world: - skb->ip_summed = ip_summed; skb->dev = vlan->lowerdev; return dev_queue_xmit(skb); } @@ -461,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - if (change & IFF_ALLMULTI) - dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (dev->flags & IFF_UP) { + if (change & IFF_ALLMULTI) + dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + } } static void macvlan_set_mac_lists(struct net_device *dev) @@ -518,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) +static int macvlan_get_nest_level(struct net_device *dev) +{ + return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; +} + static void macvlan_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) @@ -528,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev, static void macvlan_set_lockdep_class(struct net_device *dev) { - lockdep_set_class(&dev->addr_list_lock, - &macvlan_netdev_addr_lock_key); + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &macvlan_netdev_addr_lock_key, + macvlan_get_nest_level(dev)); netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); } @@ -724,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_fdb_add = macvlan_fdb_add, .ndo_fdb_del = macvlan_fdb_del, .ndo_fdb_dump = ndo_dflt_fdb_dump, + .ndo_get_lock_subclass = macvlan_get_nest_level, }; void macvlan_common_setup(struct net_device *dev) @@ -852,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, vlan->dev = dev; vlan->port = port; vlan->set_features = MACVLAN_FEATURES; + vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index ff111a89e17..3381c4f91a8 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -322,6 +322,15 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb) segs = nskb; } } else { + /* If we receive a partial checksum and the tap side + * doesn't support checksum offload, compute the checksum. + * Note: it doesn't matter which checksum feature to + * check, we either support them all or none. + */ + if (skb->ip_summed == CHECKSUM_PARTIAL && + !(features & NETIF_F_ALL_CSUM) && + skb_checksum_help(skb)) + goto drop; skb_queue_tail(&q->sk.sk_receive_queue, skb); } diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 9c4defdec67..5f1a2250018 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev) if (pdev->dev.of_node) { pdata = mdio_gpio_of_get_data(pdev); bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); + if (bus_id < 0) { + dev_warn(&pdev->dev, "failed to get alias id\n"); + bus_id = 0; + } } else { pdata = dev_get_platdata(&pdev->dev); bus_id = pdev->id; diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 5ad971a55c5..d849684231c 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -246,13 +246,13 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev, if (val1 != -1) newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0); - if (val2 != -1) + if (val2 != -2) newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4); - if (val3 != -1) + if (val3 != -3) newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8); - if (val4 != -1) + if (val4 != -4) newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12); return kszphy_extended_write(phydev, reg, newval); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1b6d09aef42..3bc079a67a3 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct phy_device *phydev = container_of(dwork, struct phy_device, state_queue); - int needs_aneg = 0, do_suspend = 0; + bool needs_aneg = false, do_suspend = false, do_resume = false; int err = 0; mutex_lock(&phydev->lock); @@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work) case PHY_PENDING: break; case PHY_UP: - needs_aneg = 1; + needs_aneg = true; phydev->link_timeout = PHY_AN_TIMEOUT; @@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work) phydev->adjust_link(phydev->attached_dev); } else if (0 == phydev->link_timeout--) - needs_aneg = 1; + needs_aneg = true; break; case PHY_NOLINK: err = phy_read_status(phydev); @@ -765,6 +765,17 @@ void phy_state_machine(struct work_struct *work) break; if (phydev->link) { + if (AUTONEG_ENABLE == phydev->autoneg) { + err = phy_aneg_done(phydev); + if (err < 0) + break; + + if (!err) { + phydev->state = PHY_AN; + phydev->link_timeout = PHY_AN_TIMEOUT; + break; + } + } phydev->state = PHY_RUNNING; netif_carrier_on(phydev->attached_dev); phydev->adjust_link(phydev->attached_dev); @@ -780,7 +791,7 @@ void phy_state_machine(struct work_struct *work) netif_carrier_on(phydev->attached_dev); } else { if (0 == phydev->link_timeout--) - needs_aneg = 1; + needs_aneg = true; } phydev->adjust_link(phydev->attached_dev); @@ -816,7 +827,7 @@ void phy_state_machine(struct work_struct *work) phydev->link = 0; netif_carrier_off(phydev->attached_dev); phydev->adjust_link(phydev->attached_dev); - do_suspend = 1; + do_suspend = true; } break; case PHY_RESUMING: @@ -865,6 +876,7 @@ void phy_state_machine(struct work_struct *work) } phydev->adjust_link(phydev->attached_dev); } + do_resume = true; break; } @@ -872,9 +884,10 @@ void phy_state_machine(struct work_struct *work) if (needs_aneg) err = phy_start_aneg(phydev); - - if (do_suspend) + else if (do_suspend) phy_suspend(phydev); + else if (do_resume) + phy_resume(phydev); if (err < 0) phy_error(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0ce60662429..4987a1c6dc5 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, err = phy_init_hw(phydev); if (err) phy_detach(phydev); - - phy_resume(phydev); + else + phy_resume(phydev); return err; } diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index cc70ecfc706..ad4a94e9ff5 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty) if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) return; - spin_lock(&sl->lock); + spin_lock_bh(&sl->lock); if (sl->xleft <= 0) { /* Now serial buffer is almost free & we can start * transmission of another packet */ sl->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); - spin_unlock(&sl->lock); + spin_unlock_bh(&sl->lock); sl_unlock(sl); return; } @@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty) actual = tty->ops->write(tty, sl->xhead, sl->xleft); sl->xleft -= actual; sl->xhead += actual; - spin_unlock(&sl->lock); + spin_unlock_bh(&sl->lock); } static void sl_tx_timeout(struct net_device *dev) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 33008c1d1d6..767fe61b5ac 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2834,8 +2834,10 @@ static int team_device_event(struct notifier_block *unused, case NETDEV_UP: if (netif_carrier_ok(dev)) team_port_change_check(port, true); + break; case NETDEV_DOWN: team_port_change_check(port, false); + break; case NETDEV_CHANGE: if (netif_running(port->dev)) team_port_change_check(port, diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index c9f3281506a..2e025ddcef2 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf) cdc_ncm_unbind(dev, intf); } +/* verify that the ethernet protocol is IPv4 or IPv6 */ +static bool is_ip_proto(__be16 proto) +{ + switch (proto) { + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + return true; + } + return false; +} static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { @@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb struct cdc_ncm_ctx *ctx = info->ctx; __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); u16 tci = 0; + bool is_ip; u8 *c; if (!ctx) @@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb if (skb->len <= ETH_HLEN) goto error; + /* Some applications using e.g. packet sockets will + * bypass the VLAN acceleration and create tagged + * ethernet frames directly. We primarily look for + * the accelerated out-of-band tag, but fall back if + * required + */ + skb_reset_mac_header(skb); + if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN && + __vlan_get_tag(skb, &tci) == 0) { + is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); + skb_pull(skb, VLAN_ETH_HLEN); + } else { + is_ip = is_ip_proto(eth_hdr(skb)->h_proto); + skb_pull(skb, ETH_HLEN); + } + /* mapping VLANs to MBIM sessions: * no tag => IPS session <0> * 1 - 255 => IPS session <vlanid> * 256 - 511 => DSS session <vlanid - 256> * 512 - 4095 => unsupported, drop */ - vlan_get_tag(skb, &tci); - switch (tci & 0x0f00) { case 0x0000: /* VLAN ID 0 - 255 */ - /* verify that datagram is IPv4 or IPv6 */ - skb_reset_mac_header(skb); - switch (eth_hdr(skb)->h_proto) { - case htons(ETH_P_IP): - case htons(ETH_P_IPV6): - break; - default: + if (!is_ip) goto error; - } c = (u8 *)&sign; c[3] = tci; break; @@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb "unsupported tci=0x%04x\n", tci); goto error; } - skb_pull(skb, ETH_HLEN); } spin_lock_bh(&ctx->mtx); @@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci) return; /* need to send the NA on the VLAN dev, if any */ - if (tci) + rcu_read_lock(); + if (tci) { netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), tci); - else + if (!netdev) { + rcu_read_unlock(); + return; + } + } else { netdev = dev->net; - if (!netdev) - return; + } + dev_hold(netdev); + rcu_read_unlock(); in6_dev = in6_dev_get(netdev); if (!in6_dev) - return; + goto out; is_router = !!in6_dev->cnf.forwarding; in6_dev_put(in6_dev); @@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci) true /* solicited */, false /* override */, true /* inc_opt */); +out: + dev_put(netdev); } static bool is_neigh_solicit(u8 *buf, size_t len) diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 549dbac710e..9a2bd11943e 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -785,7 +785,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) skb_out->len > CDC_NCM_MIN_TX_PKT) memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, ctx->tx_max - skb_out->len); - else if ((skb_out->len % dev->maxpacket) == 0) + else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) *skb_put(skb_out, 1) = 0; /* force short packet */ /* set final frame length */ diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index e3458e3c44f..83208d4fdc5 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -669,6 +669,22 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ + {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ + {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ + {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ + {QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */ + {QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */ + {QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */ + {QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */ + {QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */ + {QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */ + {QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */ + {QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */ + {QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */ + {QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */ + {QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */ + {QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */ + {QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */ {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, @@ -730,16 +746,28 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ + {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */ + {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */ + {QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */ {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ + {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ + {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */ {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */ + {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ + {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 7b687469199..8a852b5f215 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1285,7 +1285,7 @@ static int virtnet_set_channels(struct net_device *dev, if (channels->rx_count || channels->tx_count || channels->other_count) return -EINVAL; - if (queue_pairs > vi->max_queue_pairs) + if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) return -EINVAL; get_online_cpus(); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 82355d5d155..4dbb2ed85b9 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -389,8 +389,8 @@ static inline size_t vxlan_nlmsg_size(void) + nla_total_size(sizeof(struct nda_cacheinfo)); } -static void vxlan_fdb_notify(struct vxlan_dev *vxlan, - struct vxlan_fdb *fdb, int type) +static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, + struct vxlan_rdst *rd, int type) { struct net *net = dev_net(vxlan->dev); struct sk_buff *skb; @@ -400,8 +400,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan, if (skb == NULL) goto errout; - err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, - first_remote_rtnl(fdb)); + err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd); if (err < 0) { /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); @@ -427,10 +426,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) .remote_vni = VXLAN_N_VID, }; - INIT_LIST_HEAD(&f.remotes); - list_add_rcu(&remote.list, &f.remotes); - - vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); + vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); } static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) @@ -438,11 +434,11 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) struct vxlan_fdb f = { .state = NUD_STALE, }; + struct vxlan_rdst remote = { }; - INIT_LIST_HEAD(&f.remotes); memcpy(f.eth_addr, eth_addr, ETH_ALEN); - vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); + vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); } /* Hash Ethernet address */ @@ -533,7 +529,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f, /* Add/update destinations for multicast */ static int vxlan_fdb_append(struct vxlan_fdb *f, - union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) + union vxlan_addr *ip, __be16 port, __u32 vni, + __u32 ifindex, struct vxlan_rdst **rdp) { struct vxlan_rdst *rd; @@ -551,6 +548,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, list_add_tail_rcu(&rd->list, &f->remotes); + *rdp = rd; return 1; } @@ -690,6 +688,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, __be16 port, __u32 vni, __u32 ifindex, __u8 ndm_flags) { + struct vxlan_rdst *rd = NULL; struct vxlan_fdb *f; int notify = 0; @@ -726,7 +725,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, if ((flags & NLM_F_APPEND) && (is_multicast_ether_addr(f->eth_addr) || is_zero_ether_addr(f->eth_addr))) { - int rc = vxlan_fdb_append(f, ip, port, vni, ifindex); + int rc = vxlan_fdb_append(f, ip, port, vni, ifindex, + &rd); if (rc < 0) return rc; @@ -756,15 +756,18 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, INIT_LIST_HEAD(&f->remotes); memcpy(f->eth_addr, mac, ETH_ALEN); - vxlan_fdb_append(f, ip, port, vni, ifindex); + vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); ++vxlan->addrcnt; hlist_add_head_rcu(&f->hlist, vxlan_fdb_head(vxlan, mac)); } - if (notify) - vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); + if (notify) { + if (rd == NULL) + rd = first_remote_rtnl(f); + vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH); + } return 0; } @@ -785,7 +788,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) "delete %pM\n", f->eth_addr); --vxlan->addrcnt; - vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH); + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); hlist_del_rcu(&f->hlist); call_rcu(&f->rcu, vxlan_fdb_free); @@ -919,6 +922,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], */ if (rd && !list_is_singular(&f->remotes)) { list_del_rcu(&rd->list); + vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); kfree_rcu(rd, rcu); goto out; } @@ -993,7 +997,7 @@ static bool vxlan_snoop(struct net_device *dev, rdst->remote_ip = *src_ip; f->updated = jiffies; - vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH); + vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH); } else { /* learned new entry */ spin_lock(&vxlan->hash_lock); diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index a0398fe3eb2..be3eb2a8d60 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -86,7 +86,6 @@ static int ath_ahb_probe(struct platform_device *pdev) int irq; int ret = 0; struct ath_hw *ah; - struct ath_common *common; char hw_name[64]; if (!dev_get_platdata(&pdev->dev)) { @@ -146,9 +145,6 @@ static int ath_ahb_probe(struct platform_device *pdev) wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", hw_name, (unsigned long)mem, irq); - common = ath9k_hw_common(sc->sc_ah); - /* Will be cleared in ath9k_start() */ - set_bit(ATH_OP_INVALID, &common->op_flags); return 0; err_irq: diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 6d47783f2e5..ba502a2d199 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -155,6 +155,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel, ATH9K_ANI_RSSI_THR_LOW, ATH9K_ANI_RSSI_THR_HIGH); + if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_OFDM_DEF_LEVEL) + immunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL; + if (!scan) aniState->ofdmNoiseImmunityLevel = immunityLevel; @@ -235,6 +238,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel, BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW, ATH9K_ANI_RSSI_THR_HIGH); + if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_CCK_DEF_LEVEL) + immunityLevel = ATH9K_ANI_CCK_DEF_LEVEL; + if (ah->opmode == NL80211_IFTYPE_STATION && BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW && immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI) diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 44d74495c4d..3ba03dde421 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -251,7 +251,6 @@ struct ath_atx_tid { s8 bar_index; bool sched; - bool paused; bool active; }; diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c index d76e6e0120d..ffca918ff16 100644 --- a/drivers/net/wireless/ath/ath9k/debug_sta.c +++ b/drivers/net/wireless/ath/ath9k/debug_sta.c @@ -72,7 +72,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, ath_txq_lock(sc, txq); if (tid->active) { len += scnprintf(buf + len, size - len, - "%3d%11d%10d%10d%10d%10d%9d%6d%8d\n", + "%3d%11d%10d%10d%10d%10d%9d%6d\n", tid->tidno, tid->seq_start, tid->seq_next, @@ -80,8 +80,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf, tid->baw_head, tid->baw_tail, tid->bar_index, - tid->sched, - tid->paused); + tid->sched); } ath_txq_unlock(sc, txq); } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index f46cd0250e4..5627917c5ff 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) if ((vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT) && - bss_conf->enable_beacon) + bss_conf->enable_beacon) { priv->reconfig_beacon = true; + priv->rearm_ani = true; + } if (bss_conf->assoc) { priv->rearm_ani = true; @@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, ath9k_htc_ps_wakeup(priv); + ath9k_htc_stop_ani(priv); del_timer_sync(&priv->tx.cleanup_timer); ath9k_htc_tx_drain(priv); diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index cbbb02a6b13..36ae6490e55 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -783,6 +783,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, common = ath9k_hw_common(ah); ath9k_set_hw_capab(sc, hw); + /* Will be cleared in ath9k_start() */ + set_bit(ATH_OP_INVALID, &common->op_flags); + /* Initialize regulatory */ error = ath_regd_init(&common->regulatory, sc->hw->wiphy, ath9k_reg_notifier); diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 25304adece5..914dbc6b172 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -784,7 +784,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct ath_softc *sc; struct ieee80211_hw *hw; - struct ath_common *common; u8 csz; u32 val; int ret = 0; @@ -877,10 +876,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n", hw_name, (unsigned long)sc->mem, pdev->irq); - /* Will be cleared in ath9k_start() */ - common = ath9k_hw_common(sc->sc_ah); - set_bit(ATH_OP_INVALID, &common->op_flags); - return 0; err_init: diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 6c9accdb52e..19df969ec90 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -975,6 +975,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) u64 tsf = 0; unsigned long flags; dma_addr_t new_buf_addr; + unsigned int budget = 512; if (edma) dma_type = DMA_BIDIRECTIONAL; @@ -1113,15 +1114,17 @@ requeue_drop_frag: } requeue: list_add_tail(&bf->list, &sc->rx.rxbuf); - if (flush) - continue; if (edma) { ath_rx_edma_buf_link(sc, qtype); } else { ath_rx_buf_relink(sc, bf); - ath9k_hw_rxena(ah); + if (!flush) + ath9k_hw_rxena(ah); } + + if (!budget--) + break; } while (1); if (!(ah->imask & ATH9K_INT_RXEOL)) { diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 87cbec47fb4..66acb2cbd9d 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -107,9 +107,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) { struct ath_atx_ac *ac = tid->ac; - if (tid->paused) - return; - if (tid->sched) return; @@ -1407,7 +1404,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, ath_tx_tid_change_state(sc, txtid); txtid->active = true; - txtid->paused = true; *ssn = txtid->seq_start = txtid->seq_next; txtid->bar_index = -1; @@ -1427,7 +1423,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) ath_txq_lock(sc, txq); txtid->active = false; - txtid->paused = false; ath_tx_flush_tid(sc, txtid); ath_tx_tid_change_state(sc, txtid); ath_txq_unlock_complete(sc, txq); @@ -1487,7 +1482,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an) ath_txq_lock(sc, txq); ac->clear_ps_filter = true; - if (!tid->paused && ath_tid_has_buffered(tid)) { + if (ath_tid_has_buffered(tid)) { ath_tx_queue_tid(txq, tid); ath_txq_schedule(sc, txq); } @@ -1510,7 +1505,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, ath_txq_lock(sc, txq); tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; - tid->paused = false; if (ath_tid_has_buffered(tid)) { ath_tx_queue_tid(txq, tid); @@ -1544,8 +1538,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw, continue; tid = ATH_AN_2_TID(an, i); - if (tid->paused) - continue; ath_txq_lock(sc, tid->ac->txq); while (nframes > 0) { @@ -1844,9 +1836,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) list_del(&tid->list); tid->sched = false; - if (tid->paused) - continue; - if (ath_tx_sched_aggr(sc, txq, tid, &stop)) sent = true; @@ -2698,7 +2687,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) tid->baw_size = WME_MAX_BA; tid->baw_head = tid->baw_tail = 0; tid->sched = false; - tid->paused = false; tid->active = false; __skb_queue_head_init(&tid->buf_q); __skb_queue_head_init(&tid->retry_q); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c index df130ef53d1..c7c9f15c0fe 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c @@ -303,10 +303,10 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core, ci = core->chip; - /* if core is already in reset, just return */ + /* if core is already in reset, skip reset */ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL); if ((regdata & BCMA_RESET_CTL_RESET) != 0) - return; + goto in_reset_configure; /* configure reset */ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, @@ -322,6 +322,7 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core, SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) != BCMA_RESET_CTL_RESET, 300); +in_reset_configure: /* in-reset configure */ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL, reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index afb3d15e38f..be1985296bd 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp) if (!err) { /* only set 2G bandwidth using bw_cap command */ band_bwcap.band = cpu_to_le32(WLC_BAND_2G); - band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT); + band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ); err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, sizeof(band_bwcap)); } else { diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index fa858d548d1..0489314425c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); if (IWL_MVM_BT_COEX_CORUNNING) { - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 | - BT_VALID_CORUN_LUT_40); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | + BT_VALID_CORUN_LUT_40); bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); } if (IWL_MVM_BT_COEX_MPLUT) { bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); } if (mvm->cfg->bt_shared_single_ant) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 9426905de6b..d73a89ecd78 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -183,9 +183,9 @@ enum iwl_scan_type { * this number of packets were received (typically 1) * @passive2active: is auto switching from passive to active during scan allowed * @rxchain_sel_flags: RXON_RX_CHAIN_* - * @max_out_time: in usecs, max out of serving channel time + * @max_out_time: in TUs, max out of serving channel time * @suspend_time: how long to pause scan when returning to service channel: - * bits 0-19: beacon interal in usecs (suspend before executing) + * bits 0-19: beacon interal in TUs (suspend before executing) * bits 20-23: reserved * bits 24-31: number of beacons (suspend between channels) * @rxon_flags: RXON_FLG_* @@ -383,8 +383,8 @@ enum scan_framework_client { * @quiet_plcp_th: quiet channel num of packets threshold * @good_CRC_th: passive to active promotion threshold * @rx_chain: RXON rx chain. - * @max_out_time: max uSec to be out of assoceated channel - * @suspend_time: pause scan this long when returning to service channel + * @max_out_time: max TUs to be out of assoceated channel + * @suspend_time: pause scan this TUs when returning to service channel * @flags: RXON flags * @filter_flags: RXONfilter * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index f0cebf12c7b..b41dc84e943 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); - ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); if (ret) IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); } @@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) return; - ieee80211_iterate_active_interfaces( + ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); } @@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + if (!iwl_mvm_is_idle(mvm)) { + ret = -EBUSY; + goto out; + } + switch (mvm->scan_status) { case IWL_MVM_SCAN_OS: IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index d564233a65d..f1ec0986c3c 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) return mvmvif->low_latency; } +/* Assoc status */ +bool iwl_mvm_is_idle(struct iwl_mvm *mvm); + /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 9f52c5b3f0e..e1c83889936 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, return; } -#ifdef CPTCFG_MAC80211_DEBUGFS +#ifdef CONFIG_MAC80211_DEBUGFS /* Disable last tx check if we are debugging with fixed rate */ if (lq_sta->dbg_fixed_rate) { IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index c91dc849885..c28de54c75d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_condition_iterator, &global_bound); - /* - * Under low latency traffic passive scan is fragmented meaning - * that dwell on a particular channel will be fragmented. Each fragment - * dwell time is 20ms and fragments period is 105ms. Skipping to next - * channel will be delayed by the same period - 105ms. So suspend_time - * parameter describing both fragments and channels skipping periods is - * set to 105ms. This value is chosen so that overall passive scan - * duration will not be too long. Max_out_time in this case is set to - * 70ms, so for active scanning operating channel will be left for 70ms - * while for passive still for 20ms (fragment dwell). - */ - if (global_bound) { - if (!iwl_mvm_low_latency(mvm)) { - params->suspend_time = ieee80211_tu_to_usec(100); - params->max_out_time = ieee80211_tu_to_usec(600); - } else { - params->suspend_time = ieee80211_tu_to_usec(105); - /* P2P doesn't support fragmented passive scan, so - * configure max_out_time to be at least longest dwell - * time for passive scan. - */ - if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { - params->max_out_time = ieee80211_tu_to_usec(70); - params->passive_fragmented = true; - } else { - u32 passive_dwell; - /* - * Use band G so that passive channel dwell time - * will be assigned with maximum value. - */ - band = IEEE80211_BAND_2GHZ; - passive_dwell = iwl_mvm_get_passive_dwell(band); - params->max_out_time = - ieee80211_tu_to_usec(passive_dwell); - } - } + if (!global_bound) + goto not_bound; + + params->suspend_time = 100; + params->max_out_time = 600; + + if (iwl_mvm_low_latency(mvm)) { + params->suspend_time = 250; + params->max_out_time = 250; } +not_bound: + for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { - if (params->passive_fragmented) - params->dwell[band].passive = 20; - else - params->dwell[band].passive = - iwl_mvm_get_passive_dwell(band); + params->dwell[band].passive = iwl_mvm_get_passive_dwell(band); params->dwell[band].active = iwl_mvm_get_active_dwell(band, n_ssids); } @@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; int head = 0; - int tail = band_2ghz + band_5ghz; + int tail = band_2ghz + band_5ghz - 1; u32 ssid_bitmap; int cmd_len; int ret; diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index d619851745a..2180902266a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm) return result; } + +static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) +{ + bool *idle = _data; + + if (!vif->bss_conf.idle) + *idle = false; +} + +bool iwl_mvm_is_idle(struct iwl_mvm *mvm) +{ + bool idle = true; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_idle_iter, &idle); + + return idle; +} diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index dcfd6d866d0..2365553f1ef 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + trans->dev = &pdev->dev; + trans_pcie->pci_dev = pdev; + iwl_disable_interrupts(trans); + err = pci_enable_msi(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); @@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } - trans->dev = &pdev->dev; - trans_pcie->pci_dev = pdev; trans->hw_rev = iwl_read32(trans, CSR_HW_REV); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), @@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_pci_disable_msi; } - trans_pcie->inta_mask = CSR_INI_SET_MASK; - if (iwl_pcie_alloc_ict(trans)) goto out_free_cmd_pool; @@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_free_ict; } + trans_pcie->inta_mask = CSR_INI_SET_MASK; + return trans; out_free_ict: diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c index ddeb5a709aa..a87ee9b6585 100644 --- a/drivers/net/wireless/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/rt2x00/rt2x00mac.c @@ -621,20 +621,18 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, bss_conf->bssid); /* - * Update the beacon. This is only required on USB devices. PCI - * devices fetch beacons periodically. - */ - if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev)) - rt2x00queue_update_beacon(rt2x00dev, vif); - - /* * Start/stop beaconing. */ if (changes & BSS_CHANGED_BEACON_ENABLED) { if (!bss_conf->enable_beacon && intf->enable_beacon) { - rt2x00queue_clear_beacon(rt2x00dev, vif); rt2x00dev->intf_beaconing--; intf->enable_beacon = false; + /* + * Clear beacon in the H/W for this vif. This is needed + * to disable beaconing on this particular interface + * and keep it running on other interfaces. + */ + rt2x00queue_clear_beacon(rt2x00dev, vif); if (rt2x00dev->intf_beaconing == 0) { /* @@ -645,11 +643,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, rt2x00queue_stop_queue(rt2x00dev->bcn); mutex_unlock(&intf->beacon_skb_mutex); } - - } else if (bss_conf->enable_beacon && !intf->enable_beacon) { rt2x00dev->intf_beaconing++; intf->enable_beacon = true; + /* + * Upload beacon to the H/W. This is only required on + * USB devices. PCI devices fetch beacons periodically. + */ + if (rt2x00_is_usb(rt2x00dev)) + rt2x00queue_update_beacon(rt2x00dev, vif); if (rt2x00dev->intf_beaconing == 1) { /* diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c index 06ef47cd620..5b4c225396f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c @@ -293,7 +293,7 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw, u8 *psaddr; __le16 fc; u16 type, ufc; - bool match_bssid, packet_toself, packet_beacon, addr; + bool match_bssid, packet_toself, packet_beacon = false, addr; tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c index 68b5c7e92cf..07cb06da672 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c @@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw) err = _rtl92cu_init_mac(hw); if (err) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n"); - return err; + goto exit; } err = rtl92c_download_fw(hw); if (err) { diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c index 36b48be8329..2b3c78baa9f 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c +++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c @@ -49,6 +49,12 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue) if (ieee80211_is_nullfunc(fc)) return QSLT_HIGH; + /* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use + * queue V0 at priority 7; however, the RTL8192SE appears to have + * that queue at priority 6 + */ + if (skb->priority == 7) + return QSLT_VO; return skb->priority; } diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 630a3fcf65b..0d4a285cbd7 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif, grant_ref_t rx_ring_ref); /* Check for SKBs from frontend and schedule backend processing */ -void xenvif_check_rx_xenvif(struct xenvif *vif); +void xenvif_napi_schedule_or_enable_events(struct xenvif *vif); /* Prevent the device from generating any further traffic. */ void xenvif_carrier_off(struct xenvif *vif); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index ef05c5c49d4..20e9defa106 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget) work_done = xenvif_tx_action(vif, budget); if (work_done < budget) { - int more_to_do = 0; - unsigned long flags; - - /* It is necessary to disable IRQ before calling - * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might - * lose event from the frontend. - * - * Consider: - * RING_HAS_UNCONSUMED_REQUESTS - * <frontend generates event to trigger napi_schedule> - * __napi_complete - * - * This handler is still in scheduled state so the - * event has no effect at all. After __napi_complete - * this handler is descheduled and cannot get - * scheduled again. We lose event in this case and the ring - * will be completely stalled. - */ - - local_irq_save(flags); - - RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); - if (!more_to_do) - __napi_complete(napi); - - local_irq_restore(flags); + napi_complete(napi); + xenvif_napi_schedule_or_enable_events(vif); } return work_done; @@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif) enable_irq(vif->tx_irq); if (vif->tx_irq != vif->rx_irq) enable_irq(vif->rx_irq); - xenvif_check_rx_xenvif(vif); + xenvif_napi_schedule_or_enable_events(vif); } static void xenvif_down(struct xenvif *vif) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 76665405c5a..7367208ee8c 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif, /* Find the containing VIF's structure from a pointer in pending_tx_info array */ -static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) +static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf) { u16 pending_idx = ubuf->desc; struct pending_tx_info *temp = @@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, } /* + * Find the grant ref for a given frag in a chain of struct ubuf_info's + * skb: the skb itself + * i: the frag's number + * ubuf: a pointer to an element in the chain. It should not be NULL + * + * Returns a pointer to the element in the chain where the page were found. If + * not found, returns NULL. + * See the definition of callback_struct in common.h for more details about + * the chain. + */ +static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb, + const int i, + const struct ubuf_info *ubuf) +{ + struct xenvif *foreign_vif = ubuf_to_vif(ubuf); + + do { + u16 pending_idx = ubuf->desc; + + if (skb_shinfo(skb)->frags[i].page.p == + foreign_vif->mmap_pages[pending_idx]) + break; + ubuf = (struct ubuf_info *) ubuf->ctx; + } while (ubuf); + + return ubuf; +} + +/* * Prepare an SKB to be transmitted to the frontend. * * This function is responsible for allocating grant operations, meta @@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb, int head = 1; int old_meta_prod; int gso_type; - struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; - grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; - struct xenvif *foreign_vif = NULL; + const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; + const struct ubuf_info *const head_ubuf = ubuf; old_meta_prod = npo->meta_prod; @@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb, npo->copy_off = 0; npo->copy_gref = req->gref; - if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && - (ubuf->callback == &xenvif_zerocopy_callback)) { - int i = 0; - foreign_vif = ubuf_to_vif(ubuf); - - do { - u16 pending_idx = ubuf->desc; - foreign_grefs[i++] = - foreign_vif->pending_tx_info[pending_idx].req.gref; - ubuf = (struct ubuf_info *) ubuf->ctx; - } while (ubuf); - } - data = skb->data; while (data < skb_tail_pointer(skb)) { unsigned int offset = offset_in_page(data); @@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb, } for (i = 0; i < nr_frags; i++) { + /* This variable also signals whether foreign_gref has a real + * value or not. + */ + struct xenvif *foreign_vif = NULL; + grant_ref_t foreign_gref; + + if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && + (ubuf->callback == &xenvif_zerocopy_callback)) { + const struct ubuf_info *const startpoint = ubuf; + + /* Ideally ubuf points to the chain element which + * belongs to this frag. Or if frags were removed from + * the beginning, then shortly before it. + */ + ubuf = xenvif_find_gref(skb, i, ubuf); + + /* Try again from the beginning of the list, if we + * haven't tried from there. This only makes sense in + * the unlikely event of reordering the original frags. + * For injected local pages it's an unnecessary second + * run. + */ + if (unlikely(!ubuf) && startpoint != head_ubuf) + ubuf = xenvif_find_gref(skb, i, head_ubuf); + + if (likely(ubuf)) { + u16 pending_idx = ubuf->desc; + + foreign_vif = ubuf_to_vif(ubuf); + foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref; + /* Just a safety measure. If this was the last + * element on the list, the for loop will + * iterate again if a local page were added to + * the end. Using head_ubuf here prevents the + * second search on the chain. Or the original + * frags changed order, but that's less likely. + * In any way, ubuf shouldn't be NULL. + */ + ubuf = ubuf->ctx ? + (struct ubuf_info *) ubuf->ctx : + head_ubuf; + } else + /* This frag was a local page, added to the + * array after the skb left netback. + */ + ubuf = head_ubuf; + } xenvif_gop_frag_copy(vif, skb, npo, skb_frag_page(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_shinfo(skb)->frags[i].page_offset, &head, foreign_vif, - foreign_grefs[i]); + foreign_vif ? foreign_gref : UINT_MAX); } return npo->meta_prod - old_meta_prod; @@ -654,7 +716,7 @@ done: notify_remote_via_irq(vif->rx_irq); } -void xenvif_check_rx_xenvif(struct xenvif *vif) +void xenvif_napi_schedule_or_enable_events(struct xenvif *vif) { int more_to_do; @@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data) { struct xenvif *vif = (struct xenvif *)data; tx_add_credit(vif); - xenvif_check_rx_xenvif(vif); + xenvif_napi_schedule_or_enable_events(vif); } static void xenvif_tx_err(struct xenvif *vif, diff --git a/drivers/of/base.c b/drivers/of/base.c index 6d4ee22708c..32e969d9531 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -1831,6 +1831,10 @@ int of_update_property(struct device_node *np, struct property *newprop) if (!found) return -ENODEV; + /* At early boot, bail out and defer setup to of_init() */ + if (!of_kset) + return found ? 0 : -ENODEV; + /* Update the sysfs attribute */ sysfs_remove_bin_file(&np->kobj, &oldprop->attr); __of_add_property_sysfs(np, newprop); diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 9bcf2cf1935..5aeb8941135 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -364,7 +364,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) memset(r, 0, sizeof(*r)); /* - * Get optional "interrupts-names" property to add a name + * Get optional "interrupt-names" property to add a name * to the resource. */ of_property_read_string_index(dev, "interrupt-names", index, @@ -380,6 +380,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) EXPORT_SYMBOL_GPL(of_irq_to_resource); /** + * of_irq_get - Decode a node's IRQ and return it as a Linux irq number + * @dev: pointer to device tree node + * @index: zero-based index of the irq + * + * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain + * is not yet created. + * + */ +int of_irq_get(struct device_node *dev, int index) +{ + int rc; + struct of_phandle_args oirq; + struct irq_domain *domain; + + rc = of_irq_parse_one(dev, index, &oirq); + if (rc) + return rc; + + domain = irq_find_host(oirq.np); + if (!domain) + return -EPROBE_DEFER; + + return irq_create_of_mapping(&oirq); +} + +/** * of_irq_count - Count the number of IRQs a node uses * @dev: pointer to device tree node */ diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 404d1daebef..e8376d646d9 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np, rc = of_address_to_resource(np, i, res); WARN_ON(rc); } - WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq); + if (of_irq_to_resource_table(np, res, num_irq) != num_irq) + pr_debug("not all legacy IRQ resources mapped for %s\n", + np->name); } dev->dev.of_node = of_node_get(np); @@ -204,12 +206,13 @@ static struct platform_device *of_platform_device_create_pdata( { struct platform_device *dev; - if (!of_device_is_available(np)) + if (!of_device_is_available(np) || + of_node_test_and_set_flag(np, OF_POPULATED)) return NULL; dev = of_device_alloc(np, bus_id, parent); if (!dev) - return NULL; + goto err_clear_flag; #if defined(CONFIG_MICROBLAZE) dev->archdata.dma_mask = 0xffffffffUL; @@ -227,10 +230,14 @@ static struct platform_device *of_platform_device_create_pdata( if (of_device_add(dev) != 0) { platform_device_put(dev); - return NULL; + goto err_clear_flag; } return dev; + +err_clear_flag: + of_node_clear_flag(np, OF_POPULATED); + return NULL; } /** @@ -262,14 +269,15 @@ static struct amba_device *of_amba_device_create(struct device_node *node, pr_debug("Creating amba device %s\n", node->full_name); - if (!of_device_is_available(node)) + if (!of_device_is_available(node) || + of_node_test_and_set_flag(node, OF_POPULATED)) return NULL; dev = amba_device_alloc(NULL, 0, 0); if (!dev) { pr_err("%s(): amba_device_alloc() failed for %s\n", __func__, node->full_name); - return NULL; + goto err_clear_flag; } /* setup generic device info */ @@ -309,6 +317,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node, err_free: amba_device_put(dev); +err_clear_flag: + of_node_clear_flag(node, OF_POPULATED); return NULL; } #else /* CONFIG_ARM_AMBA */ @@ -485,4 +495,60 @@ int of_platform_populate(struct device_node *root, return rc; } EXPORT_SYMBOL_GPL(of_platform_populate); + +static int of_platform_device_destroy(struct device *dev, void *data) +{ + bool *children_left = data; + + /* Do not touch devices not populated from the device tree */ + if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED)) { + *children_left = true; + return 0; + } + + /* Recurse, but don't touch this device if it has any children left */ + if (of_platform_depopulate(dev) != 0) { + *children_left = true; + return 0; + } + + if (dev->bus == &platform_bus_type) + platform_device_unregister(to_platform_device(dev)); +#ifdef CONFIG_ARM_AMBA + else if (dev->bus == &amba_bustype) + amba_device_unregister(to_amba_device(dev)); +#endif + else { + *children_left = true; + return 0; + } + + of_node_clear_flag(dev->of_node, OF_POPULATED); + + return 0; +} + +/** + * of_platform_depopulate() - Remove devices populated from device tree + * @parent: device which childred will be removed + * + * Complementary to of_platform_populate(), this function removes children + * of the given device (and, recurrently, their children) that have been + * created from their respective device tree nodes (and only those, + * leaving others - eg. manually created - unharmed). + * + * Returns 0 when all children devices have been removed or + * -EBUSY when some children remained. + */ +int of_platform_depopulate(struct device *parent) +{ + bool children_left = false; + + device_for_each_child(parent, &children_left, + of_platform_device_destroy); + + return children_left ? -EBUSY : 0; +} +EXPORT_SYMBOL_GPL(of_platform_depopulate); + #endif /* CONFIG_OF_ADDRESS */ diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c index ae445007050..fe70b86bcff 100644 --- a/drivers/of/selftest.c +++ b/drivers/of/selftest.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_irq.h> +#include <linux/of_platform.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/slab.h> @@ -427,6 +428,36 @@ static void __init of_selftest_match_node(void) } } +static void __init of_selftest_platform_populate(void) +{ + int irq; + struct device_node *np; + struct platform_device *pdev; + + np = of_find_node_by_path("/testcase-data"); + of_platform_populate(np, of_default_bus_match_table, NULL, NULL); + + /* Test that a missing irq domain returns -EPROBE_DEFER */ + np = of_find_node_by_path("/testcase-data/testcase-device1"); + pdev = of_find_device_by_node(np); + if (!pdev) + selftest(0, "device 1 creation failed\n"); + irq = platform_get_irq(pdev, 0); + if (irq != -EPROBE_DEFER) + selftest(0, "device deferred probe failed - %d\n", irq); + + /* Test that a parsing failure does not return -EPROBE_DEFER */ + np = of_find_node_by_path("/testcase-data/testcase-device2"); + pdev = of_find_device_by_node(np); + if (!pdev) + selftest(0, "device 2 creation failed\n"); + irq = platform_get_irq(pdev, 0); + if (irq >= 0 || irq == -EPROBE_DEFER) + selftest(0, "device parsing error failed - %d\n", irq); + + selftest(1, "passed"); +} + static int __init of_selftest(void) { struct device_node *np; @@ -445,6 +476,7 @@ static int __init of_selftest(void) of_selftest_parse_interrupts(); of_selftest_parse_interrupts_extended(); of_selftest_match_node(); + of_selftest_platform_populate(); pr_info("end of selftest - %i passed, %i failed\n", selftest_results.passed, selftest_results.failed); return 0; diff --git a/drivers/of/testcase-data/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi index c843720bd3e..da4695f6035 100644 --- a/drivers/of/testcase-data/tests-interrupts.dtsi +++ b/drivers/of/testcase-data/tests-interrupts.dtsi @@ -54,5 +54,18 @@ <&test_intmap1 1 2>; }; }; + + testcase-device1 { + compatible = "testcase-device"; + interrupt-parent = <&test_intc0>; + interrupts = <1>; + }; + + testcase-device2 { + compatible = "testcase-device"; + interrupt-parent = <&test_intc2>; + interrupts = <1>; /* invalid specifier - too short */ + }; }; + }; diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 7f8b78c0887..8c148f39e8d 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -148,7 +148,7 @@ static noinline void pci_wait_cfg(struct pci_dev *dev) int pci_user_read_config_##size \ (struct pci_dev *dev, int pos, type *val) \ { \ - int ret = 0; \ + int ret = PCIBIOS_SUCCESSFUL; \ u32 data = -1; \ if (PCI_##size##_BAD) \ return -EINVAL; \ @@ -159,9 +159,7 @@ int pci_user_read_config_##size \ pos, sizeof(type), &data); \ raw_spin_unlock_irq(&pci_lock); \ *val = (type)data; \ - if (ret > 0) \ - ret = -EINVAL; \ - return ret; \ + return pcibios_err_to_errno(ret); \ } \ EXPORT_SYMBOL_GPL(pci_user_read_config_##size); @@ -170,7 +168,7 @@ EXPORT_SYMBOL_GPL(pci_user_read_config_##size); int pci_user_write_config_##size \ (struct pci_dev *dev, int pos, type val) \ { \ - int ret = -EIO; \ + int ret = PCIBIOS_SUCCESSFUL; \ if (PCI_##size##_BAD) \ return -EINVAL; \ raw_spin_lock_irq(&pci_lock); \ @@ -179,9 +177,7 @@ int pci_user_write_config_##size \ ret = dev->bus->ops->write(dev->bus, dev->devfn, \ pos, sizeof(type), val); \ raw_spin_unlock_irq(&pci_lock); \ - if (ret > 0) \ - ret = -EINVAL; \ - return ret; \ + return pcibios_err_to_errno(ret); \ } \ EXPORT_SYMBOL_GPL(pci_user_write_config_##size); diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index fb8aed307c2..447d393725e 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -13,7 +13,6 @@ #include <linux/errno.h> #include <linux/ioport.h> #include <linux/proc_fs.h> -#include <linux/init.h> #include <linux/slab.h> #include "pci.h" @@ -236,7 +235,7 @@ void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } * * This adds add sysfs entries and start device drivers */ -int pci_bus_add_device(struct pci_dev *dev) +void pci_bus_add_device(struct pci_dev *dev) { int retval; @@ -253,8 +252,6 @@ int pci_bus_add_device(struct pci_dev *dev) WARN_ON(retval < 0); dev->is_added = 1; - - return 0; } /** @@ -267,16 +264,12 @@ void pci_bus_add_devices(const struct pci_bus *bus) { struct pci_dev *dev; struct pci_bus *child; - int retval; list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip already-added devices */ if (dev->is_added) continue; - retval = pci_bus_add_device(dev); - if (retval) - dev_err(&dev->dev, "Error adding device (%d)\n", - retval); + pci_bus_add_device(dev); } list_for_each_entry(dev, &bus->devices, bus_list) { diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c index 47aaf22d814..0e5f3c95af5 100644 --- a/drivers/pci/host-bridge.c +++ b/drivers/pci/host-bridge.c @@ -3,7 +3,6 @@ */ #include <linux/kernel.h> -#include <linux/init.h> #include <linux/pci.h> #include <linux/module.h> diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index a6f67ec8882..21df477be0c 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -33,4 +33,17 @@ config PCI_RCAR_GEN2 There are 3 internal PCI controllers available with a single built-in EHCI/OHCI host controller present on each one. +config PCI_RCAR_GEN2_PCIE + bool "Renesas R-Car PCIe controller" + depends on ARCH_SHMOBILE || (ARM && COMPILE_TEST) + help + Say Y here if you want PCIe controller support on R-Car Gen2 SoCs. + +config PCI_HOST_GENERIC + bool "Generic PCI host controller" + depends on ARM && OF + help + Say Y here if you want to support a simple generic PCI host + controller, such as the one emulated by kvmtool. + endmenu diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 13fb3333aa0..611ba4b48c9 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -4,3 +4,5 @@ obj-$(CONFIG_PCI_IMX6) += pci-imx6.o obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o +obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o +obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c index 3de6bfbbe8e..1632661c5b7 100644 --- a/drivers/pci/host/pci-exynos.c +++ b/drivers/pci/host/pci-exynos.c @@ -415,9 +415,7 @@ static irqreturn_t exynos_pcie_msi_irq_handler(int irq, void *arg) { struct pcie_port *pp = arg; - dw_handle_msi_irq(pp); - - return IRQ_HANDLED; + return dw_handle_msi_irq(pp); } static void exynos_pcie_msi_init(struct pcie_port *pp) @@ -511,7 +509,8 @@ static struct pcie_host_ops exynos_pcie_host_ops = { .host_init = exynos_pcie_host_init, }; -static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) +static int __init add_pcie_port(struct pcie_port *pp, + struct platform_device *pdev) { int ret; @@ -568,10 +567,8 @@ static int __init exynos_pcie_probe(struct platform_device *pdev) exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie), GFP_KERNEL); - if (!exynos_pcie) { - dev_err(&pdev->dev, "no memory for exynos pcie\n"); + if (!exynos_pcie) return -ENOMEM; - } pp = &exynos_pcie->pp; diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c new file mode 100644 index 00000000000..44fe6aa6a43 --- /dev/null +++ b/drivers/pci/host/pci-host-generic.c @@ -0,0 +1,388 @@ +/* + * Simple, generic PCI host controller driver targetting firmware-initialised + * systems and virtual machines (e.g. the PCI emulation provided by kvmtool). + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Copyright (C) 2014 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/platform_device.h> + +struct gen_pci_cfg_bus_ops { + u32 bus_shift; + void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int); +}; + +struct gen_pci_cfg_windows { + struct resource res; + struct resource bus_range; + void __iomem **win; + + const struct gen_pci_cfg_bus_ops *ops; +}; + +struct gen_pci { + struct pci_host_bridge host; + struct gen_pci_cfg_windows cfg; + struct list_head resources; +}; + +static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus, + unsigned int devfn, + int where) +{ + struct pci_sys_data *sys = bus->sysdata; + struct gen_pci *pci = sys->private_data; + resource_size_t idx = bus->number - pci->cfg.bus_range.start; + + return pci->cfg.win[idx] + ((devfn << 8) | where); +} + +static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = { + .bus_shift = 16, + .map_bus = gen_pci_map_cfg_bus_cam, +}; + +static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus, + unsigned int devfn, + int where) +{ + struct pci_sys_data *sys = bus->sysdata; + struct gen_pci *pci = sys->private_data; + resource_size_t idx = bus->number - pci->cfg.bus_range.start; + + return pci->cfg.win[idx] + ((devfn << 12) | where); +} + +static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = { + .bus_shift = 20, + .map_bus = gen_pci_map_cfg_bus_ecam, +}; + +static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + void __iomem *addr; + struct pci_sys_data *sys = bus->sysdata; + struct gen_pci *pci = sys->private_data; + + addr = pci->cfg.ops->map_bus(bus, devfn, where); + + switch (size) { + case 1: + *val = readb(addr); + break; + case 2: + *val = readw(addr); + break; + default: + *val = readl(addr); + } + + return PCIBIOS_SUCCESSFUL; +} + +static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + void __iomem *addr; + struct pci_sys_data *sys = bus->sysdata; + struct gen_pci *pci = sys->private_data; + + addr = pci->cfg.ops->map_bus(bus, devfn, where); + + switch (size) { + case 1: + writeb(val, addr); + break; + case 2: + writew(val, addr); + break; + default: + writel(val, addr); + } + + return PCIBIOS_SUCCESSFUL; +} + +static struct pci_ops gen_pci_ops = { + .read = gen_pci_config_read, + .write = gen_pci_config_write, +}; + +static const struct of_device_id gen_pci_of_match[] = { + { .compatible = "pci-host-cam-generic", + .data = &gen_pci_cfg_cam_bus_ops }, + + { .compatible = "pci-host-ecam-generic", + .data = &gen_pci_cfg_ecam_bus_ops }, + + { }, +}; +MODULE_DEVICE_TABLE(of, gen_pci_of_match); + +static int gen_pci_calc_io_offset(struct device *dev, + struct of_pci_range *range, + struct resource *res, + resource_size_t *offset) +{ + static atomic_t wins = ATOMIC_INIT(0); + int err, idx, max_win; + unsigned int window; + + if (!PAGE_ALIGNED(range->cpu_addr)) + return -EINVAL; + + max_win = (IO_SPACE_LIMIT + 1) / SZ_64K; + idx = atomic_inc_return(&wins); + if (idx > max_win) + return -ENOSPC; + + window = (idx - 1) * SZ_64K; + err = pci_ioremap_io(window, range->cpu_addr); + if (err) + return err; + + of_pci_range_to_resource(range, dev->of_node, res); + res->start = window; + res->end = res->start + range->size - 1; + *offset = window - range->pci_addr; + return 0; +} + +static int gen_pci_calc_mem_offset(struct device *dev, + struct of_pci_range *range, + struct resource *res, + resource_size_t *offset) +{ + of_pci_range_to_resource(range, dev->of_node, res); + *offset = range->cpu_addr - range->pci_addr; + return 0; +} + +static void gen_pci_release_of_pci_ranges(struct gen_pci *pci) +{ + struct pci_host_bridge_window *win; + + list_for_each_entry(win, &pci->resources, list) + release_resource(win->res); + + pci_free_resource_list(&pci->resources); +} + +static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + int err, res_valid = 0; + struct device *dev = pci->host.dev.parent; + struct device_node *np = dev->of_node; + + if (of_pci_range_parser_init(&parser, np)) { + dev_err(dev, "missing \"ranges\" property\n"); + return -EINVAL; + } + + for_each_of_pci_range(&parser, &range) { + struct resource *parent, *res; + resource_size_t offset; + u32 restype = range.flags & IORESOURCE_TYPE_BITS; + + res = devm_kmalloc(dev, sizeof(*res), GFP_KERNEL); + if (!res) { + err = -ENOMEM; + goto out_release_res; + } + + switch (restype) { + case IORESOURCE_IO: + parent = &ioport_resource; + err = gen_pci_calc_io_offset(dev, &range, res, &offset); + break; + case IORESOURCE_MEM: + parent = &iomem_resource; + err = gen_pci_calc_mem_offset(dev, &range, res, &offset); + res_valid |= !(res->flags & IORESOURCE_PREFETCH || err); + break; + default: + err = -EINVAL; + continue; + } + + if (err) { + dev_warn(dev, + "error %d: failed to add resource [type 0x%x, %lld bytes]\n", + err, restype, range.size); + continue; + } + + err = request_resource(parent, res); + if (err) + goto out_release_res; + + pci_add_resource_offset(&pci->resources, res, offset); + } + + if (!res_valid) { + dev_err(dev, "non-prefetchable memory resource required\n"); + err = -EINVAL; + goto out_release_res; + } + + return 0; + +out_release_res: + gen_pci_release_of_pci_ranges(pci); + return err; +} + +static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) +{ + int err; + u8 bus_max; + resource_size_t busn; + struct resource *bus_range; + struct device *dev = pci->host.dev.parent; + struct device_node *np = dev->of_node; + + if (of_pci_parse_bus_range(np, &pci->cfg.bus_range)) + pci->cfg.bus_range = (struct resource) { + .name = np->name, + .start = 0, + .end = 0xff, + .flags = IORESOURCE_BUS, + }; + + err = of_address_to_resource(np, 0, &pci->cfg.res); + if (err) { + dev_err(dev, "missing \"reg\" property\n"); + return err; + } + + pci->cfg.win = devm_kcalloc(dev, resource_size(&pci->cfg.bus_range), + sizeof(*pci->cfg.win), GFP_KERNEL); + if (!pci->cfg.win) + return -ENOMEM; + + /* Limit the bus-range to fit within reg */ + bus_max = pci->cfg.bus_range.start + + (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1; + pci->cfg.bus_range.end = min_t(resource_size_t, pci->cfg.bus_range.end, + bus_max); + + /* Map our Configuration Space windows */ + if (!devm_request_mem_region(dev, pci->cfg.res.start, + resource_size(&pci->cfg.res), + "Configuration Space")) + return -ENOMEM; + + bus_range = &pci->cfg.bus_range; + for (busn = bus_range->start; busn <= bus_range->end; ++busn) { + u32 idx = busn - bus_range->start; + u32 sz = 1 << pci->cfg.ops->bus_shift; + + pci->cfg.win[idx] = devm_ioremap(dev, + pci->cfg.res.start + busn * sz, + sz); + if (!pci->cfg.win[idx]) + return -ENOMEM; + } + + /* Register bus resource */ + pci_add_resource(&pci->resources, bus_range); + return 0; +} + +static int gen_pci_setup(int nr, struct pci_sys_data *sys) +{ + struct gen_pci *pci = sys->private_data; + list_splice_init(&pci->resources, &sys->resources); + return 1; +} + +static int gen_pci_probe(struct platform_device *pdev) +{ + int err; + const char *type; + const struct of_device_id *of_id; + const int *prop; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); + struct hw_pci hw = { + .nr_controllers = 1, + .private_data = (void **)&pci, + .setup = gen_pci_setup, + .map_irq = of_irq_parse_and_map_pci, + .ops = &gen_pci_ops, + }; + + if (!pci) + return -ENOMEM; + + type = of_get_property(np, "device_type", NULL); + if (!type || strcmp(type, "pci")) { + dev_err(dev, "invalid \"device_type\" %s\n", type); + return -EINVAL; + } + + prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL); + if (prop) { + if (*prop) + pci_add_flags(PCI_PROBE_ONLY); + else + pci_clear_flags(PCI_PROBE_ONLY); + } + + of_id = of_match_node(gen_pci_of_match, np); + pci->cfg.ops = of_id->data; + pci->host.dev.parent = dev; + INIT_LIST_HEAD(&pci->host.windows); + INIT_LIST_HEAD(&pci->resources); + + /* Parse our PCI ranges and request their resources */ + err = gen_pci_parse_request_of_pci_ranges(pci); + if (err) + return err; + + /* Parse and map our Configuration Space windows */ + err = gen_pci_parse_map_cfg_windows(pci); + if (err) { + gen_pci_release_of_pci_ranges(pci); + return err; + } + + pci_common_init_dev(dev, &hw); + return 0; +} + +static struct platform_driver gen_pci_driver = { + .driver = { + .name = "pci-host-generic", + .owner = THIS_MODULE, + .of_match_table = gen_pci_of_match, + }, + .probe = gen_pci_probe, +}; +module_platform_driver(gen_pci_driver); + +MODULE_DESCRIPTION("Generic PCI host driver"); +MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); +MODULE_LICENSE("GPLv2"); diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c index ee082509b0b..a5645ae4aef 100644 --- a/drivers/pci/host/pci-imx6.c +++ b/drivers/pci/host/pci-imx6.c @@ -25,6 +25,7 @@ #include <linux/resource.h> #include <linux/signal.h> #include <linux/types.h> +#include <linux/interrupt.h> #include "pcie-designware.h" @@ -32,13 +33,9 @@ struct imx6_pcie { int reset_gpio; - int power_on_gpio; - int wake_up_gpio; - int disable_gpio; - struct clk *lvds_gate; - struct clk *sata_ref_100m; - struct clk *pcie_ref_125m; - struct clk *pcie_axi; + struct clk *pcie_bus; + struct clk *pcie_phy; + struct clk *pcie; struct pcie_port pp; struct regmap *iomuxc_gpr; void __iomem *mem_base; @@ -231,36 +228,27 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); int ret; - if (gpio_is_valid(imx6_pcie->power_on_gpio)) - gpio_set_value(imx6_pcie->power_on_gpio, 1); - regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); - ret = clk_prepare_enable(imx6_pcie->sata_ref_100m); - if (ret) { - dev_err(pp->dev, "unable to enable sata_ref_100m\n"); - goto err_sata_ref; - } - - ret = clk_prepare_enable(imx6_pcie->pcie_ref_125m); + ret = clk_prepare_enable(imx6_pcie->pcie_phy); if (ret) { - dev_err(pp->dev, "unable to enable pcie_ref_125m\n"); - goto err_pcie_ref; + dev_err(pp->dev, "unable to enable pcie_phy clock\n"); + goto err_pcie_phy; } - ret = clk_prepare_enable(imx6_pcie->lvds_gate); + ret = clk_prepare_enable(imx6_pcie->pcie_bus); if (ret) { - dev_err(pp->dev, "unable to enable lvds_gate\n"); - goto err_lvds_gate; + dev_err(pp->dev, "unable to enable pcie_bus clock\n"); + goto err_pcie_bus; } - ret = clk_prepare_enable(imx6_pcie->pcie_axi); + ret = clk_prepare_enable(imx6_pcie->pcie); if (ret) { - dev_err(pp->dev, "unable to enable pcie_axi\n"); - goto err_pcie_axi; + dev_err(pp->dev, "unable to enable pcie clock\n"); + goto err_pcie; } /* allow the clocks to stabilize */ @@ -274,13 +262,11 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp) } return 0; -err_pcie_axi: - clk_disable_unprepare(imx6_pcie->lvds_gate); -err_lvds_gate: - clk_disable_unprepare(imx6_pcie->pcie_ref_125m); -err_pcie_ref: - clk_disable_unprepare(imx6_pcie->sata_ref_100m); -err_sata_ref: +err_pcie: + clk_disable_unprepare(imx6_pcie->pcie_bus); +err_pcie_bus: + clk_disable_unprepare(imx6_pcie->pcie_phy); +err_pcie_phy: return ret; } @@ -329,6 +315,13 @@ static int imx6_pcie_wait_for_link(struct pcie_port *pp) return 0; } +static irqreturn_t imx6_pcie_msi_handler(int irq, void *arg) +{ + struct pcie_port *pp = arg; + + return dw_handle_msi_irq(pp); +} + static int imx6_pcie_start_link(struct pcie_port *pp) { struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp); @@ -403,6 +396,9 @@ static void imx6_pcie_host_init(struct pcie_port *pp) dw_pcie_setup_rc(pp); imx6_pcie_start_link(pp); + + if (IS_ENABLED(CONFIG_PCI_MSI)) + dw_pcie_msi_init(pp); } static void imx6_pcie_reset_phy(struct pcie_port *pp) @@ -487,15 +483,25 @@ static struct pcie_host_ops imx6_pcie_host_ops = { .host_init = imx6_pcie_host_init, }; -static int imx6_add_pcie_port(struct pcie_port *pp, +static int __init imx6_add_pcie_port(struct pcie_port *pp, struct platform_device *pdev) { int ret; - pp->irq = platform_get_irq(pdev, 0); - if (!pp->irq) { - dev_err(&pdev->dev, "failed to get irq\n"); - return -ENODEV; + if (IS_ENABLED(CONFIG_PCI_MSI)) { + pp->msi_irq = platform_get_irq_byname(pdev, "msi"); + if (pp->msi_irq <= 0) { + dev_err(&pdev->dev, "failed to get MSI irq\n"); + return -ENODEV; + } + + ret = devm_request_irq(&pdev->dev, pp->msi_irq, + imx6_pcie_msi_handler, + IRQF_SHARED, "mx6-pcie-msi", pp); + if (ret) { + dev_err(&pdev->dev, "failed to request MSI irq\n"); + return -ENODEV; + } } pp->root_bus_nr = -1; @@ -546,69 +552,26 @@ static int __init imx6_pcie_probe(struct platform_device *pdev) } } - imx6_pcie->power_on_gpio = of_get_named_gpio(np, "power-on-gpio", 0); - if (gpio_is_valid(imx6_pcie->power_on_gpio)) { - ret = devm_gpio_request_one(&pdev->dev, - imx6_pcie->power_on_gpio, - GPIOF_OUT_INIT_LOW, - "PCIe power enable"); - if (ret) { - dev_err(&pdev->dev, "unable to get power-on gpio\n"); - return ret; - } - } - - imx6_pcie->wake_up_gpio = of_get_named_gpio(np, "wake-up-gpio", 0); - if (gpio_is_valid(imx6_pcie->wake_up_gpio)) { - ret = devm_gpio_request_one(&pdev->dev, - imx6_pcie->wake_up_gpio, - GPIOF_IN, - "PCIe wake up"); - if (ret) { - dev_err(&pdev->dev, "unable to get wake-up gpio\n"); - return ret; - } - } - - imx6_pcie->disable_gpio = of_get_named_gpio(np, "disable-gpio", 0); - if (gpio_is_valid(imx6_pcie->disable_gpio)) { - ret = devm_gpio_request_one(&pdev->dev, - imx6_pcie->disable_gpio, - GPIOF_OUT_INIT_HIGH, - "PCIe disable endpoint"); - if (ret) { - dev_err(&pdev->dev, "unable to get disable-ep gpio\n"); - return ret; - } - } - /* Fetch clocks */ - imx6_pcie->lvds_gate = devm_clk_get(&pdev->dev, "lvds_gate"); - if (IS_ERR(imx6_pcie->lvds_gate)) { - dev_err(&pdev->dev, - "lvds_gate clock select missing or invalid\n"); - return PTR_ERR(imx6_pcie->lvds_gate); - } - - imx6_pcie->sata_ref_100m = devm_clk_get(&pdev->dev, "sata_ref_100m"); - if (IS_ERR(imx6_pcie->sata_ref_100m)) { + imx6_pcie->pcie_phy = devm_clk_get(&pdev->dev, "pcie_phy"); + if (IS_ERR(imx6_pcie->pcie_phy)) { dev_err(&pdev->dev, - "sata_ref_100m clock source missing or invalid\n"); - return PTR_ERR(imx6_pcie->sata_ref_100m); + "pcie_phy clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_phy); } - imx6_pcie->pcie_ref_125m = devm_clk_get(&pdev->dev, "pcie_ref_125m"); - if (IS_ERR(imx6_pcie->pcie_ref_125m)) { + imx6_pcie->pcie_bus = devm_clk_get(&pdev->dev, "pcie_bus"); + if (IS_ERR(imx6_pcie->pcie_bus)) { dev_err(&pdev->dev, - "pcie_ref_125m clock source missing or invalid\n"); - return PTR_ERR(imx6_pcie->pcie_ref_125m); + "pcie_bus clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie_bus); } - imx6_pcie->pcie_axi = devm_clk_get(&pdev->dev, "pcie_axi"); - if (IS_ERR(imx6_pcie->pcie_axi)) { + imx6_pcie->pcie = devm_clk_get(&pdev->dev, "pcie"); + if (IS_ERR(imx6_pcie->pcie)) { dev_err(&pdev->dev, - "pcie_axi clock source missing or invalid\n"); - return PTR_ERR(imx6_pcie->pcie_axi); + "pcie clock source missing or invalid\n"); + return PTR_ERR(imx6_pcie->pcie); } /* Grab GPR config register range */ diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index d3d1cfd51e0..e384e253459 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -293,6 +293,58 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, return PCIBIOS_SUCCESSFUL; } +/* + * Remove windows, starting from the largest ones to the smallest + * ones. + */ +static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, + phys_addr_t base, size_t size) +{ + while (size) { + size_t sz = 1 << (fls(size) - 1); + + mvebu_mbus_del_window(base, sz); + base += sz; + size -= sz; + } +} + +/* + * MBus windows can only have a power of two size, but PCI BARs do not + * have this constraint. Therefore, we have to split the PCI BAR into + * areas each having a power of two size. We start from the largest + * one (i.e highest order bit set in the size). + */ +static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, + unsigned int target, unsigned int attribute, + phys_addr_t base, size_t size, + phys_addr_t remap) +{ + size_t size_mapped = 0; + + while (size) { + size_t sz = 1 << (fls(size) - 1); + int ret; + + ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, + sz, remap); + if (ret) { + dev_err(&port->pcie->pdev->dev, + "Could not create MBus window at 0x%x, size 0x%x: %d\n", + base, sz, ret); + mvebu_pcie_del_windows(port, base - size_mapped, + size_mapped); + return; + } + + size -= sz; + size_mapped += sz; + base += sz; + if (remap != MVEBU_MBUS_NO_REMAP) + remap += sz; + } +} + static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) { phys_addr_t iobase; @@ -304,8 +356,8 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) /* If a window was configured, remove it */ if (port->iowin_base) { - mvebu_mbus_del_window(port->iowin_base, - port->iowin_size); + mvebu_pcie_del_windows(port, port->iowin_base, + port->iowin_size); port->iowin_base = 0; port->iowin_size = 0; } @@ -331,11 +383,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) port->iowin_base = port->pcie->io.start + iobase; port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | (port->bridge.iolimitupper << 16)) - - iobase); + iobase) + 1; - mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr, - port->iowin_base, port->iowin_size, - iobase); + mvebu_pcie_add_windows(port, port->io_target, port->io_attr, + port->iowin_base, port->iowin_size, + iobase); } static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) @@ -346,8 +398,8 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) /* If a window was configured, remove it */ if (port->memwin_base) { - mvebu_mbus_del_window(port->memwin_base, - port->memwin_size); + mvebu_pcie_del_windows(port, port->memwin_base, + port->memwin_size); port->memwin_base = 0; port->memwin_size = 0; } @@ -364,10 +416,11 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16); port->memwin_size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - - port->memwin_base; + port->memwin_base + 1; - mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr, - port->memwin_base, port->memwin_size); + mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr, + port->memwin_base, port->memwin_size, + MVEBU_MBUS_NO_REMAP); } /* @@ -743,14 +796,21 @@ static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, /* * On the PCI-to-PCI bridge side, the I/O windows must have at - * least a 64 KB size and be aligned on their size, and the - * memory windows must have at least a 1 MB size and be - * aligned on their size + * least a 64 KB size and the memory windows must have at + * least a 1 MB size. Moreover, MBus windows need to have a + * base address aligned on their size, and their size must be + * a power of two. This means that if the BAR doesn't have a + * power of two size, several MBus windows will actually be + * created. We need to ensure that the biggest MBus window + * (which will be the first one) is aligned on its size, which + * explains the rounddown_pow_of_two() being done here. */ if (res->flags & IORESOURCE_IO) - return round_up(start, max_t(resource_size_t, SZ_64K, size)); + return round_up(start, max_t(resource_size_t, SZ_64K, + rounddown_pow_of_two(size))); else if (res->flags & IORESOURCE_MEM) - return round_up(start, max_t(resource_size_t, SZ_1M, size)); + return round_up(start, max_t(resource_size_t, SZ_1M, + rounddown_pow_of_two(size))); else return start; } diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index 4fe349dcaf5..3ef854f5a5b 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c @@ -99,6 +99,7 @@ struct rcar_pci_priv { struct resource io_res; struct resource mem_res; struct resource *cfg_res; + unsigned busnr; int irq; unsigned long window_size; }; @@ -318,8 +319,8 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) pci_add_resource(&sys->resources, &priv->io_res); pci_add_resource(&sys->resources, &priv->mem_res); - /* Setup bus number based on platform device id */ - sys->busnr = to_platform_device(priv->dev)->id; + /* Setup bus number based on platform device id / of bus-range */ + sys->busnr = priv->busnr; return 1; } @@ -372,6 +373,23 @@ static int rcar_pci_probe(struct platform_device *pdev) priv->window_size = SZ_1G; + if (pdev->dev.of_node) { + struct resource busnr; + int ret; + + ret = of_pci_parse_bus_range(pdev->dev.of_node, &busnr); + if (ret < 0) { + dev_err(&pdev->dev, "failed to parse bus-range\n"); + return ret; + } + + priv->busnr = busnr.start; + if (busnr.end != busnr.start) + dev_warn(&pdev->dev, "only one bus number supported\n"); + } else { + priv->busnr = pdev->id; + } + hw_private[0] = priv; memset(&hw, 0, sizeof(hw)); hw.nr_controllers = ARRAY_SIZE(hw_private); @@ -383,11 +401,20 @@ static int rcar_pci_probe(struct platform_device *pdev) return 0; } +static struct of_device_id rcar_pci_of_match[] = { + { .compatible = "renesas,pci-r8a7790", }, + { .compatible = "renesas,pci-r8a7791", }, + { }, +}; + +MODULE_DEVICE_TABLE(of, rcar_pci_of_match); + static struct platform_driver rcar_pci_driver = { .driver = { .name = "pci-rcar-gen2", .owner = THIS_MODULE, .suppress_bind_attrs = true, + .of_match_table = rcar_pci_of_match, }, .probe = rcar_pci_probe, }; diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c index c4e37329447..e3bf9e6d5d9 100644 --- a/drivers/pci/host/pcie-designware.c +++ b/drivers/pci/host/pcie-designware.c @@ -156,15 +156,17 @@ static struct irq_chip dw_msi_irq_chip = { }; /* MSI int handler */ -void dw_handle_msi_irq(struct pcie_port *pp) +irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) { unsigned long val; int i, pos, irq; + irqreturn_t ret = IRQ_NONE; for (i = 0; i < MAX_MSI_CTRLS; i++) { dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, (u32 *)&val); if (val) { + ret = IRQ_HANDLED; pos = 0; while ((pos = find_next_bit(&val, 32, pos)) != 32) { irq = irq_find_mapping(pp->irq_domain, @@ -177,6 +179,8 @@ void dw_handle_msi_irq(struct pcie_port *pp) } } } + + return ret; } void dw_pcie_msi_init(struct pcie_port *pp) diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h index 3063b3594d8..a169d22d517 100644 --- a/drivers/pci/host/pcie-designware.h +++ b/drivers/pci/host/pcie-designware.h @@ -68,7 +68,7 @@ struct pcie_host_ops { int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val); -void dw_handle_msi_irq(struct pcie_port *pp); +irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); void dw_pcie_msi_init(struct pcie_port *pp); int dw_pcie_link_up(struct pcie_port *pp); void dw_pcie_setup_rc(struct pcie_port *pp); diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c new file mode 100644 index 00000000000..8e06124aa80 --- /dev/null +++ b/drivers/pci/host/pcie-rcar.c @@ -0,0 +1,1008 @@ +/* + * PCIe driver for Renesas R-Car SoCs + * Copyright (C) 2014 Renesas Electronics Europe Ltd + * + * Based on: + * arch/sh/drivers/pci/pcie-sh7786.c + * arch/sh/drivers/pci/ops-sh7786.c + * Copyright (C) 2009 - 2011 Paul Mundt + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/msi.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#define DRV_NAME "rcar-pcie" + +#define PCIECAR 0x000010 +#define PCIECCTLR 0x000018 +#define CONFIG_SEND_ENABLE (1 << 31) +#define TYPE0 (0 << 8) +#define TYPE1 (1 << 8) +#define PCIECDR 0x000020 +#define PCIEMSR 0x000028 +#define PCIEINTXR 0x000400 +#define PCIEMSITXR 0x000840 + +/* Transfer control */ +#define PCIETCTLR 0x02000 +#define CFINIT 1 +#define PCIETSTR 0x02004 +#define DATA_LINK_ACTIVE 1 +#define PCIEERRFR 0x02020 +#define UNSUPPORTED_REQUEST (1 << 4) +#define PCIEMSIFR 0x02044 +#define PCIEMSIALR 0x02048 +#define MSIFE 1 +#define PCIEMSIAUR 0x0204c +#define PCIEMSIIER 0x02050 + +/* root port address */ +#define PCIEPRAR(x) (0x02080 + ((x) * 0x4)) + +/* local address reg & mask */ +#define PCIELAR(x) (0x02200 + ((x) * 0x20)) +#define PCIELAMR(x) (0x02208 + ((x) * 0x20)) +#define LAM_PREFETCH (1 << 3) +#define LAM_64BIT (1 << 2) +#define LAR_ENABLE (1 << 1) + +/* PCIe address reg & mask */ +#define PCIEPARL(x) (0x03400 + ((x) * 0x20)) +#define PCIEPARH(x) (0x03404 + ((x) * 0x20)) +#define PCIEPAMR(x) (0x03408 + ((x) * 0x20)) +#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20)) +#define PAR_ENABLE (1 << 31) +#define IO_SPACE (1 << 8) + +/* Configuration */ +#define PCICONF(x) (0x010000 + ((x) * 0x4)) +#define PMCAP(x) (0x010040 + ((x) * 0x4)) +#define EXPCAP(x) (0x010070 + ((x) * 0x4)) +#define VCCAP(x) (0x010100 + ((x) * 0x4)) + +/* link layer */ +#define IDSETR1 0x011004 +#define TLCTLR 0x011048 +#define MACSR 0x011054 +#define MACCTLR 0x011058 +#define SCRAMBLE_DISABLE (1 << 27) + +/* R-Car H1 PHY */ +#define H1_PCIEPHYADRR 0x04000c +#define WRITE_CMD (1 << 16) +#define PHY_ACK (1 << 24) +#define RATE_POS 12 +#define LANE_POS 8 +#define ADR_POS 0 +#define H1_PCIEPHYDOUTR 0x040014 +#define H1_PCIEPHYSR 0x040018 + +#define INT_PCI_MSI_NR 32 + +#define RCONF(x) (PCICONF(0)+(x)) +#define RPMCAP(x) (PMCAP(0)+(x)) +#define REXPCAP(x) (EXPCAP(0)+(x)) +#define RVCCAP(x) (VCCAP(0)+(x)) + +#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24) +#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) +#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) + +#define PCI_MAX_RESOURCES 4 +#define MAX_NR_INBOUND_MAPS 6 + +struct rcar_msi { + DECLARE_BITMAP(used, INT_PCI_MSI_NR); + struct irq_domain *domain; + struct msi_chip chip; + unsigned long pages; + struct mutex lock; + int irq1; + int irq2; +}; + +static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip) +{ + return container_of(chip, struct rcar_msi, chip); +} + +/* Structure representing the PCIe interface */ +struct rcar_pcie { + struct device *dev; + void __iomem *base; + struct resource res[PCI_MAX_RESOURCES]; + struct resource busn; + int root_bus_nr; + struct clk *clk; + struct clk *bus_clk; + struct rcar_msi msi; +}; + +static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys) +{ + return sys->private_data; +} + +static void pci_write_reg(struct rcar_pcie *pcie, unsigned long val, + unsigned long reg) +{ + writel(val, pcie->base + reg); +} + +static unsigned long pci_read_reg(struct rcar_pcie *pcie, unsigned long reg) +{ + return readl(pcie->base + reg); +} + +enum { + PCI_ACCESS_READ, + PCI_ACCESS_WRITE, +}; + +static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) +{ + int shift = 8 * (where & 3); + u32 val = pci_read_reg(pcie, where & ~3); + + val &= ~(mask << shift); + val |= data << shift; + pci_write_reg(pcie, val, where & ~3); +} + +static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) +{ + int shift = 8 * (where & 3); + u32 val = pci_read_reg(pcie, where & ~3); + + return val >> shift; +} + +/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ +static int rcar_pcie_config_access(struct rcar_pcie *pcie, + unsigned char access_type, struct pci_bus *bus, + unsigned int devfn, int where, u32 *data) +{ + int dev, func, reg, index; + + dev = PCI_SLOT(devfn); + func = PCI_FUNC(devfn); + reg = where & ~3; + index = reg / 4; + + /* + * While each channel has its own memory-mapped extended config + * space, it's generally only accessible when in endpoint mode. + * When in root complex mode, the controller is unable to target + * itself with either type 0 or type 1 accesses, and indeed, any + * controller initiated target transfer to its own config space + * result in a completer abort. + * + * Each channel effectively only supports a single device, but as + * the same channel <-> device access works for any PCI_SLOT() + * value, we cheat a bit here and bind the controller's config + * space to devfn 0 in order to enable self-enumeration. In this + * case the regular ECAR/ECDR path is sidelined and the mangled + * config access itself is initiated as an internal bus transaction. + */ + if (pci_is_root_bus(bus)) { + if (dev != 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (access_type == PCI_ACCESS_READ) { + *data = pci_read_reg(pcie, PCICONF(index)); + } else { + /* Keep an eye out for changes to the root bus number */ + if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) + pcie->root_bus_nr = *data & 0xff; + + pci_write_reg(pcie, *data, PCICONF(index)); + } + + return PCIBIOS_SUCCESSFUL; + } + + if (pcie->root_bus_nr < 0) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Clear errors */ + pci_write_reg(pcie, pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); + + /* Set the PIO address */ + pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) | + PCIE_CONF_FUNC(func) | reg, PCIECAR); + + /* Enable the configuration access */ + if (bus->parent->number == pcie->root_bus_nr) + pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); + else + pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); + + /* Check for errors */ + if (pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) + return PCIBIOS_DEVICE_NOT_FOUND; + + /* Check for master and target aborts */ + if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) & + (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) + return PCIBIOS_DEVICE_NOT_FOUND; + + if (access_type == PCI_ACCESS_READ) + *data = pci_read_reg(pcie, PCIECDR); + else + pci_write_reg(pcie, *data, PCIECDR); + + /* Disable the configuration access */ + pci_write_reg(pcie, 0, PCIECCTLR); + + return PCIBIOS_SUCCESSFUL; +} + +static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata); + int ret; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ, + bus, devfn, where, val); + if (ret != PCIBIOS_SUCCESSFUL) { + *val = 0xffffffff; + return ret; + } + + if (size == 1) + *val = (*val >> (8 * (where & 3))) & 0xff; + else if (size == 2) + *val = (*val >> (8 * (where & 2))) & 0xffff; + + dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x " + "where=0x%04x size=%d val=0x%08lx\n", bus->number, + devfn, where, size, (unsigned long)*val); + + return ret; +} + +/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ +static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata); + int shift, ret; + u32 data; + + if ((size == 2) && (where & 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + else if ((size == 4) && (where & 3)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ, + bus, devfn, where, &data); + if (ret != PCIBIOS_SUCCESSFUL) + return ret; + + dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x " + "where=0x%04x size=%d val=0x%08lx\n", bus->number, + devfn, where, size, (unsigned long)val); + + if (size == 1) { + shift = 8 * (where & 3); + data &= ~(0xff << shift); + data |= ((val & 0xff) << shift); + } else if (size == 2) { + shift = 8 * (where & 2); + data &= ~(0xffff << shift); + data |= ((val & 0xffff) << shift); + } else + data = val; + + ret = rcar_pcie_config_access(pcie, PCI_ACCESS_WRITE, + bus, devfn, where, &data); + + return ret; +} + +static struct pci_ops rcar_pcie_ops = { + .read = rcar_pcie_read_conf, + .write = rcar_pcie_write_conf, +}; + +static void rcar_pcie_setup_window(int win, struct resource *res, + struct rcar_pcie *pcie) +{ + /* Setup PCIe address space mappings for each resource */ + resource_size_t size; + u32 mask; + + pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); + + /* + * The PAMR mask is calculated in units of 128Bytes, which + * keeps things pretty simple. + */ + size = resource_size(res); + mask = (roundup_pow_of_two(size) / SZ_128) - 1; + pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); + + pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win)); + pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win)); + + /* First resource is for IO */ + mask = PAR_ENABLE; + if (res->flags & IORESOURCE_IO) + mask |= IO_SPACE; + + pci_write_reg(pcie, mask, PCIEPTCTLR(win)); +} + +static int rcar_pcie_setup(int nr, struct pci_sys_data *sys) +{ + struct rcar_pcie *pcie = sys_to_pcie(sys); + struct resource *res; + int i; + + pcie->root_bus_nr = -1; + + /* Setup PCI resources */ + for (i = 0; i < PCI_MAX_RESOURCES; i++) { + + res = &pcie->res[i]; + if (!res->flags) + continue; + + rcar_pcie_setup_window(i, res, pcie); + + if (res->flags & IORESOURCE_IO) + pci_ioremap_io(nr * SZ_64K, res->start); + else + pci_add_resource(&sys->resources, res); + } + pci_add_resource(&sys->resources, &pcie->busn); + + return 1; +} + +static void rcar_pcie_add_bus(struct pci_bus *bus) +{ + if (IS_ENABLED(CONFIG_PCI_MSI)) { + struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata); + + bus->msi = &pcie->msi.chip; + } +} + +struct hw_pci rcar_pci = { + .setup = rcar_pcie_setup, + .map_irq = of_irq_parse_and_map_pci, + .ops = &rcar_pcie_ops, + .add_bus = rcar_pcie_add_bus, +}; + +static void rcar_pcie_enable(struct rcar_pcie *pcie) +{ + struct platform_device *pdev = to_platform_device(pcie->dev); + + rcar_pci.nr_controllers = 1; + rcar_pci.private_data = (void **)&pcie; + + pci_common_init_dev(&pdev->dev, &rcar_pci); +#ifdef CONFIG_PCI_DOMAINS + rcar_pci.domain++; +#endif +} + +static int phy_wait_for_ack(struct rcar_pcie *pcie) +{ + unsigned int timeout = 100; + + while (timeout--) { + if (pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) + return 0; + + udelay(100); + } + + dev_err(pcie->dev, "Access to PCIe phy timed out\n"); + + return -ETIMEDOUT; +} + +static void phy_write_reg(struct rcar_pcie *pcie, + unsigned int rate, unsigned int addr, + unsigned int lane, unsigned int data) +{ + unsigned long phyaddr; + + phyaddr = WRITE_CMD | + ((rate & 1) << RATE_POS) | + ((lane & 0xf) << LANE_POS) | + ((addr & 0xff) << ADR_POS); + + /* Set write data */ + pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); + pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); + + /* Ignore errors as they will be dealt with if the data link is down */ + phy_wait_for_ack(pcie); + + /* Clear command */ + pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); + pci_write_reg(pcie, 0, H1_PCIEPHYADRR); + + /* Ignore errors as they will be dealt with if the data link is down */ + phy_wait_for_ack(pcie); +} + +static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie) +{ + unsigned int timeout = 10; + + while (timeout--) { + if ((pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) + return 0; + + msleep(5); + } + + return -ETIMEDOUT; +} + +static int rcar_pcie_hw_init(struct rcar_pcie *pcie) +{ + int err; + + /* Begin initialization */ + pci_write_reg(pcie, 0, PCIETCTLR); + + /* Set mode */ + pci_write_reg(pcie, 1, PCIEMSR); + + /* + * Initial header for port config space is type 1, set the device + * class to match. Hardware takes care of propagating the IDSETR + * settings, so there is no need to bother with a quirk. + */ + pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); + + /* + * Setup Secondary Bus Number & Subordinate Bus Number, even though + * they aren't used, to avoid bridge being detected as broken. + */ + rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1); + rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); + + /* Initialize default capabilities. */ + rcar_rmw32(pcie, REXPCAP(0), 0, PCI_CAP_ID_EXP); + rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), + PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); + rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, + PCI_HEADER_TYPE_BRIDGE); + + /* Enable data link layer active state reporting */ + rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), 0, PCI_EXP_LNKCAP_DLLLARC); + + /* Write out the physical slot number = 0 */ + rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); + + /* Set the completion timer timeout to the maximum 50ms. */ + rcar_rmw32(pcie, TLCTLR+1, 0x3f, 50); + + /* Terminate list of capabilities (Next Capability Offset=0) */ + rcar_rmw32(pcie, RVCCAP(0), 0xfff0, 0); + + /* Enable MAC data scrambling. */ + rcar_rmw32(pcie, MACCTLR, SCRAMBLE_DISABLE, 0); + + /* Enable MSI */ + if (IS_ENABLED(CONFIG_PCI_MSI)) + pci_write_reg(pcie, 0x101f0000, PCIEMSITXR); + + /* Finish initialization - establish a PCI Express link */ + pci_write_reg(pcie, CFINIT, PCIETCTLR); + + /* This will timeout if we don't have a link. */ + err = rcar_pcie_wait_for_dl(pcie); + if (err) + return err; + + /* Enable INTx interrupts */ + rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); + + /* Enable slave Bus Mastering */ + rcar_rmw32(pcie, RCONF(PCI_STATUS), PCI_STATUS_DEVSEL_MASK, + PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST); + + wmb(); + + return 0; +} + +static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie) +{ + unsigned int timeout = 10; + + /* Initialize the phy */ + phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191); + phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180); + phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188); + phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188); + phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014); + phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014); + phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0); + phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB); + phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062); + phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000); + phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000); + phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806); + + phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5); + phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F); + phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); + + while (timeout--) { + if (pci_read_reg(pcie, H1_PCIEPHYSR)) + return rcar_pcie_hw_init(pcie); + + msleep(5); + } + + return -ETIMEDOUT; +} + +static int rcar_msi_alloc(struct rcar_msi *chip) +{ + int msi; + + mutex_lock(&chip->lock); + + msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); + if (msi < INT_PCI_MSI_NR) + set_bit(msi, chip->used); + else + msi = -ENOSPC; + + mutex_unlock(&chip->lock); + + return msi; +} + +static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) +{ + mutex_lock(&chip->lock); + clear_bit(irq, chip->used); + mutex_unlock(&chip->lock); +} + +static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) +{ + struct rcar_pcie *pcie = data; + struct rcar_msi *msi = &pcie->msi; + unsigned long reg; + + reg = pci_read_reg(pcie, PCIEMSIFR); + + /* MSI & INTx share an interrupt - we only handle MSI here */ + if (!reg) + return IRQ_NONE; + + while (reg) { + unsigned int index = find_first_bit(®, 32); + unsigned int irq; + + /* clear the interrupt */ + pci_write_reg(pcie, 1 << index, PCIEMSIFR); + + irq = irq_find_mapping(msi->domain, index); + if (irq) { + if (test_bit(index, msi->used)) + generic_handle_irq(irq); + else + dev_info(pcie->dev, "unhandled MSI\n"); + } else { + /* Unknown MSI, just clear it */ + dev_dbg(pcie->dev, "unexpected MSI\n"); + } + + /* see if there's any more pending in this vector */ + reg = pci_read_reg(pcie, PCIEMSIFR); + } + + return IRQ_HANDLED; +} + +static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, + struct msi_desc *desc) +{ + struct rcar_msi *msi = to_rcar_msi(chip); + struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); + struct msi_msg msg; + unsigned int irq; + int hwirq; + + hwirq = rcar_msi_alloc(msi); + if (hwirq < 0) + return hwirq; + + irq = irq_create_mapping(msi->domain, hwirq); + if (!irq) { + rcar_msi_free(msi, hwirq); + return -EINVAL; + } + + irq_set_msi_desc(irq, desc); + + msg.address_lo = pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; + msg.address_hi = pci_read_reg(pcie, PCIEMSIAUR); + msg.data = hwirq; + + write_msi_msg(irq, &msg); + + return 0; +} + +static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) +{ + struct rcar_msi *msi = to_rcar_msi(chip); + struct irq_data *d = irq_get_irq_data(irq); + + rcar_msi_free(msi, d->hwirq); +} + +static struct irq_chip rcar_msi_irq_chip = { + .name = "R-Car PCIe MSI", + .irq_enable = unmask_msi_irq, + .irq_disable = mask_msi_irq, + .irq_mask = mask_msi_irq, + .irq_unmask = unmask_msi_irq, +}; + +static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, + irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); + irq_set_chip_data(irq, domain->host_data); + set_irq_flags(irq, IRQF_VALID); + + return 0; +} + +static const struct irq_domain_ops msi_domain_ops = { + .map = rcar_msi_map, +}; + +static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) +{ + struct platform_device *pdev = to_platform_device(pcie->dev); + struct rcar_msi *msi = &pcie->msi; + unsigned long base; + int err; + + mutex_init(&msi->lock); + + msi->chip.dev = pcie->dev; + msi->chip.setup_irq = rcar_msi_setup_irq; + msi->chip.teardown_irq = rcar_msi_teardown_irq; + + msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR, + &msi_domain_ops, &msi->chip); + if (!msi->domain) { + dev_err(&pdev->dev, "failed to create IRQ domain\n"); + return -ENOMEM; + } + + /* Two irqs are for MSI, but they are also used for non-MSI irqs */ + err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq, + IRQF_SHARED, rcar_msi_irq_chip.name, pcie); + if (err < 0) { + dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); + goto err; + } + + err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq, + IRQF_SHARED, rcar_msi_irq_chip.name, pcie); + if (err < 0) { + dev_err(&pdev->dev, "failed to request IRQ: %d\n", err); + goto err; + } + + /* setup MSI data target */ + msi->pages = __get_free_pages(GFP_KERNEL, 0); + base = virt_to_phys((void *)msi->pages); + + pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); + pci_write_reg(pcie, 0, PCIEMSIAUR); + + /* enable all MSI interrupts */ + pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); + + return 0; + +err: + irq_domain_remove(msi->domain); + return err; +} + +static int rcar_pcie_get_resources(struct platform_device *pdev, + struct rcar_pcie *pcie) +{ + struct resource res; + int err, i; + + err = of_address_to_resource(pdev->dev.of_node, 0, &res); + if (err) + return err; + + pcie->clk = devm_clk_get(&pdev->dev, "pcie"); + if (IS_ERR(pcie->clk)) { + dev_err(pcie->dev, "cannot get platform clock\n"); + return PTR_ERR(pcie->clk); + } + err = clk_prepare_enable(pcie->clk); + if (err) + goto fail_clk; + + pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus"); + if (IS_ERR(pcie->bus_clk)) { + dev_err(pcie->dev, "cannot get pcie bus clock\n"); + err = PTR_ERR(pcie->bus_clk); + goto fail_clk; + } + err = clk_prepare_enable(pcie->bus_clk); + if (err) + goto err_map_reg; + + i = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (i < 0) { + dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); + err = -ENOENT; + goto err_map_reg; + } + pcie->msi.irq1 = i; + + i = irq_of_parse_and_map(pdev->dev.of_node, 1); + if (i < 0) { + dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n"); + err = -ENOENT; + goto err_map_reg; + } + pcie->msi.irq2 = i; + + pcie->base = devm_ioremap_resource(&pdev->dev, &res); + if (IS_ERR(pcie->base)) { + err = PTR_ERR(pcie->base); + goto err_map_reg; + } + + return 0; + +err_map_reg: + clk_disable_unprepare(pcie->bus_clk); +fail_clk: + clk_disable_unprepare(pcie->clk); + + return err; +} + +static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, + struct of_pci_range *range, + int *index) +{ + u64 restype = range->flags; + u64 cpu_addr = range->cpu_addr; + u64 cpu_end = range->cpu_addr + range->size; + u64 pci_addr = range->pci_addr; + u32 flags = LAM_64BIT | LAR_ENABLE; + u64 mask; + u64 size; + int idx = *index; + + if (restype & IORESOURCE_PREFETCH) + flags |= LAM_PREFETCH; + + /* + * If the size of the range is larger than the alignment of the start + * address, we have to use multiple entries to perform the mapping. + */ + if (cpu_addr > 0) { + unsigned long nr_zeros = __ffs64(cpu_addr); + u64 alignment = 1ULL << nr_zeros; + size = min(range->size, alignment); + } else { + size = range->size; + } + /* Hardware supports max 4GiB inbound region */ + size = min(size, 1ULL << 32); + + mask = roundup_pow_of_two(size) - 1; + mask &= ~0xf; + + while (cpu_addr < cpu_end) { + /* + * Set up 64-bit inbound regions as the range parser doesn't + * distinguish between 32 and 64-bit types. + */ + pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx)); + pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); + pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx)); + + pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1)); + pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1)); + pci_write_reg(pcie, 0, PCIELAMR(idx+1)); + + pci_addr += size; + cpu_addr += size; + idx += 2; + + if (idx > MAX_NR_INBOUND_MAPS) { + dev_err(pcie->dev, "Failed to map inbound regions!\n"); + return -EINVAL; + } + } + *index = idx; + + return 0; +} + +static int pci_dma_range_parser_init(struct of_pci_range_parser *parser, + struct device_node *node) +{ + const int na = 3, ns = 2; + int rlen; + + parser->node = node; + parser->pna = of_n_addr_cells(node); + parser->np = parser->pna + na + ns; + + parser->range = of_get_property(node, "dma-ranges", &rlen); + if (!parser->range) + return -ENOENT; + + parser->end = parser->range + rlen / sizeof(__be32); + return 0; +} + +static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, + struct device_node *np) +{ + struct of_pci_range range; + struct of_pci_range_parser parser; + int index = 0; + int err; + + if (pci_dma_range_parser_init(&parser, np)) + return -EINVAL; + + /* Get the dma-ranges from DT */ + for_each_of_pci_range(&parser, &range) { + u64 end = range.cpu_addr + range.size - 1; + dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", + range.flags, range.cpu_addr, end, range.pci_addr); + + err = rcar_pcie_inbound_ranges(pcie, &range, &index); + if (err) + return err; + } + + return 0; +} + +static const struct of_device_id rcar_pcie_of_match[] = { + { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 }, + { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init }, + { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init }, + {}, +}; +MODULE_DEVICE_TABLE(of, rcar_pcie_of_match); + +static int rcar_pcie_probe(struct platform_device *pdev) +{ + struct rcar_pcie *pcie; + unsigned int data; + struct of_pci_range range; + struct of_pci_range_parser parser; + const struct of_device_id *of_id; + int err, win = 0; + int (*hw_init_fn)(struct rcar_pcie *); + + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = &pdev->dev; + platform_set_drvdata(pdev, pcie); + + /* Get the bus range */ + if (of_pci_parse_bus_range(pdev->dev.of_node, &pcie->busn)) { + dev_err(&pdev->dev, "failed to parse bus-range property\n"); + return -EINVAL; + } + + if (of_pci_range_parser_init(&parser, pdev->dev.of_node)) { + dev_err(&pdev->dev, "missing ranges property\n"); + return -EINVAL; + } + + err = rcar_pcie_get_resources(pdev, pcie); + if (err < 0) { + dev_err(&pdev->dev, "failed to request resources: %d\n", err); + return err; + } + + for_each_of_pci_range(&parser, &range) { + of_pci_range_to_resource(&range, pdev->dev.of_node, + &pcie->res[win++]); + + if (win > PCI_MAX_RESOURCES) + break; + } + + err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node); + if (err) + return err; + + if (IS_ENABLED(CONFIG_PCI_MSI)) { + err = rcar_pcie_enable_msi(pcie); + if (err < 0) { + dev_err(&pdev->dev, + "failed to enable MSI support: %d\n", + err); + return err; + } + } + + of_id = of_match_device(rcar_pcie_of_match, pcie->dev); + if (!of_id || !of_id->data) + return -EINVAL; + hw_init_fn = of_id->data; + + /* Failure to get a link might just be that no cards are inserted */ + err = hw_init_fn(pcie); + if (err) { + dev_info(&pdev->dev, "PCIe link down\n"); + return 0; + } + + data = pci_read_reg(pcie, MACSR); + dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); + + rcar_pcie_enable(pcie); + + return 0; +} + +static struct platform_driver rcar_pcie_driver = { + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = rcar_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = rcar_pcie_probe, +}; +module_platform_driver(rcar_pcie_driver); + +MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>"); +MODULE_DESCRIPTION("Renesas R-Car PCIe driver"); +MODULE_LICENSE("GPLv2"); diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c index 6258dc260d9..c68366cee6b 100644 --- a/drivers/pci/hotplug-pci.c +++ b/drivers/pci/hotplug-pci.c @@ -4,7 +4,7 @@ #include <linux/export.h> #include "pci.h" -int __ref pci_hp_add_bridge(struct pci_dev *dev) +int pci_hp_add_bridge(struct pci_dev *dev) { struct pci_bus *parent = dev->bus; int pass, busnr, start = parent->busn_res.start; diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index bccc27ee103..75e17833021 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -41,7 +41,6 @@ #define pr_fmt(fmt) "acpiphp_glue: " fmt -#include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> @@ -501,7 +500,7 @@ static int acpiphp_rescan_slot(struct acpiphp_slot *slot) * This function should be called per *physical slot*, * not per each slot object in ACPI namespace. */ -static void __ref enable_slot(struct acpiphp_slot *slot) +static void enable_slot(struct acpiphp_slot *slot) { struct pci_dev *dev; struct pci_bus *bus = slot->bus; @@ -516,8 +515,7 @@ static void __ref enable_slot(struct acpiphp_slot *slot) if (PCI_SLOT(dev->devfn) != slot->device) continue; - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { + if (pci_is_bridge(dev)) { max = pci_scan_bridge(bus, dev, max, pass); if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index 8c146485176..f6ef64c2ccb 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -250,7 +250,7 @@ int cpci_led_off(struct slot* slot) * Device configuration functions */ -int __ref cpci_configure_slot(struct slot *slot) +int cpci_configure_slot(struct slot *slot) { struct pci_dev *dev; struct pci_bus *parent; @@ -289,8 +289,7 @@ int __ref cpci_configure_slot(struct slot *slot) list_for_each_entry(dev, &parent->devices, bus_list) if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn)) continue; - if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || - (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) + if (pci_is_bridge(dev)) pci_hp_add_bridge(dev); diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index 11845b79679..f593585f278 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -709,7 +709,8 @@ static struct pci_resource *get_max_resource(struct pci_resource **head, u32 siz temp = temp->next; } - temp->next = max->next; + if (temp) + temp->next = max->next; } max->next = NULL; diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c index 76ba8a1c774..9600a392eaa 100644 --- a/drivers/pci/hotplug/cpqphp_nvram.c +++ b/drivers/pci/hotplug/cpqphp_nvram.c @@ -34,7 +34,6 @@ #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> -#include <linux/init.h> #include <asm/uaccess.h> #include "cpqphp.h" #include "cpqphp_nvram.h" diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 8a66866b8cf..8e9012dca45 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -127,7 +127,7 @@ struct controller { #define HP_SUPR_RM(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_HPS) #define EMI(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_EIP) #define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS) -#define PSN(ctrl) ((ctrl)->slot_cap >> 19) +#define PSN(ctrl) (((ctrl)->slot_cap & PCI_EXP_SLTCAP_PSN) >> 19) int pciehp_sysfs_enable_slot(struct slot *slot); int pciehp_sysfs_disable_slot(struct slot *slot); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index d7d058fa19a..1463412cf7f 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -159,6 +159,8 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status); if (slot_status & PCI_EXP_SLTSTA_CC) { + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, + PCI_EXP_SLTSTA_CC); if (!ctrl->no_cmd_complete) { /* * After 1 sec and CMD_COMPLETED still not set, just diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 1b533060ce6..b6cb1df6709 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -62,8 +62,7 @@ int pciehp_configure_device(struct slot *p_slot) } list_for_each_entry(dev, &parent->devices, bus_list) - if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || - (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) + if (pci_is_bridge(dev)) pci_hp_add_bridge(dev); pci_assign_unassigned_bridge_resources(bridge); diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c index 16f92035231..e246a10a0d2 100644 --- a/drivers/pci/hotplug/pcihp_slot.c +++ b/drivers/pci/hotplug/pcihp_slot.c @@ -160,8 +160,7 @@ void pci_configure_slot(struct pci_dev *dev) (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI))) return; - if (dev->bus) - pcie_bus_configure_settings(dev->bus); + pcie_bus_configure_settings(dev->bus); memset(&hpp, 0, sizeof(hpp)); ret = pci_get_hp_params(dev, &hpp); diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 4fcdeedda31..7660232ef46 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -157,8 +157,7 @@ static void dlpar_pci_add_bus(struct device_node *dn) } /* Scan below the new bridge */ - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + if (pci_is_bridge(dev)) of_scan_pci_bridge(dev); /* Map IO space for child bus, which may or may not succeed */ diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 4796c15fba9..984d708552f 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -223,16 +223,16 @@ int rpaphp_get_drc_props(struct device_node *dn, int *drc_index, type_tmp = (char *) &types[1]; /* Iterate through parent properties, looking for my-drc-index */ - for (i = 0; i < indexes[0]; i++) { + for (i = 0; i < be32_to_cpu(indexes[0]); i++) { if ((unsigned int) indexes[i + 1] == *my_index) { if (drc_name) *drc_name = name_tmp; if (drc_type) *drc_type = type_tmp; if (drc_index) - *drc_index = *my_index; + *drc_index = be32_to_cpu(*my_index); if (drc_power_domain) - *drc_power_domain = domains[i+1]; + *drc_power_domain = be32_to_cpu(domains[i+1]); return 0; } name_tmp += (strlen(name_tmp) + 1); @@ -321,16 +321,19 @@ int rpaphp_add_slot(struct device_node *dn) /* register PCI devices */ name = (char *) &names[1]; type = (char *) &types[1]; - for (i = 0; i < indexes[0]; i++) { + for (i = 0; i < be32_to_cpu(indexes[0]); i++) { + int index; - slot = alloc_slot_struct(dn, indexes[i + 1], name, power_domains[i + 1]); + index = be32_to_cpu(indexes[i + 1]); + slot = alloc_slot_struct(dn, index, name, + be32_to_cpu(power_domains[i + 1])); if (!slot) return -ENOMEM; slot->type = simple_strtoul(type, NULL, 10); dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n", - indexes[i + 1], name, type); + index, name, type); retval = rpaphp_enable_slot(slot); if (!retval) diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c index 8d2ce22151e..d1332d2f873 100644 --- a/drivers/pci/hotplug/s390_pci_hpc.c +++ b/drivers/pci/hotplug/s390_pci_hpc.c @@ -15,7 +15,6 @@ #include <linux/slab.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> -#include <linux/init.h> #include <asm/pci_debug.h> #include <asm/sclp.h> diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index 58499277903..6efc2ec5e4d 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -282,8 +282,8 @@ static int board_added(struct slot *p_slot) return WRONG_BUS_FREQUENCY; } - bsp = ctrl->pci_dev->bus->cur_bus_speed; - msp = ctrl->pci_dev->bus->max_bus_speed; + bsp = ctrl->pci_dev->subordinate->cur_bus_speed; + msp = ctrl->pci_dev->subordinate->max_bus_speed; /* Check if there are other slots or devices on the same bus */ if (!list_empty(&ctrl->pci_dev->subordinate->devices)) diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index 2bf69fe1926..9202d133485 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c @@ -34,7 +34,7 @@ #include "../pci.h" #include "shpchp.h" -int __ref shpchp_configure_device(struct slot *p_slot) +int shpchp_configure_device(struct slot *p_slot) { struct pci_dev *dev; struct controller *ctrl = p_slot->ctrl; @@ -64,8 +64,7 @@ int __ref shpchp_configure_device(struct slot *p_slot) list_for_each_entry(dev, &parent->devices, bus_list) { if (PCI_SLOT(dev->devfn) != p_slot->device) continue; - if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) || - (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) + if (pci_is_bridge(dev)) pci_hp_add_bridge(dev); } diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index de7a74782f9..cb6f24740ee 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -106,7 +106,7 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) pci_device_add(virtfn, virtfn->bus); mutex_unlock(&iov->dev->sriov->lock); - rc = pci_bus_add_device(virtfn); + pci_bus_add_device(virtfn); sprintf(buf, "virtfn%u", id); rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); if (rc) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 955ab7990c5..27a7e67ddfe 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -10,7 +10,6 @@ #include <linux/mm.h> #include <linux/irq.h> #include <linux/interrupt.h> -#include <linux/init.h> #include <linux/export.h> #include <linux/ioport.h> #include <linux/pci.h> @@ -544,22 +543,18 @@ static int populate_msi_sysfs(struct pci_dev *pdev) if (!msi_attrs) return -ENOMEM; list_for_each_entry(entry, &pdev->msi_list, list) { - char *name = kmalloc(20, GFP_KERNEL); - if (!name) - goto error_attrs; - msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); - if (!msi_dev_attr) { - kfree(name); + if (!msi_dev_attr) goto error_attrs; - } + msi_attrs[count] = &msi_dev_attr->attr; - sprintf(name, "%d", entry->irq); sysfs_attr_init(&msi_dev_attr->attr); - msi_dev_attr->attr.name = name; + msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d", + entry->irq); + if (!msi_dev_attr->attr.name) + goto error_attrs; msi_dev_attr->attr.mode = S_IRUGO; msi_dev_attr->show = msi_mode_show; - msi_attrs[count] = &msi_dev_attr->attr; ++count; } @@ -883,50 +878,6 @@ int pci_msi_vec_count(struct pci_dev *dev) } EXPORT_SYMBOL(pci_msi_vec_count); -/** - * pci_enable_msi_block - configure device's MSI capability structure - * @dev: device to configure - * @nvec: number of interrupts to configure - * - * Allocate IRQs for a device with the MSI capability. - * This function returns a negative errno if an error occurs. If it - * is unable to allocate the number of interrupts requested, it returns - * the number of interrupts it might be able to allocate. If it successfully - * allocates at least the number of interrupts requested, it returns 0 and - * updates the @dev's irq member to the lowest new interrupt number; the - * other interrupt numbers allocated to this device are consecutive. - */ -int pci_enable_msi_block(struct pci_dev *dev, int nvec) -{ - int status, maxvec; - - if (dev->current_state != PCI_D0) - return -EINVAL; - - maxvec = pci_msi_vec_count(dev); - if (maxvec < 0) - return maxvec; - if (nvec > maxvec) - return maxvec; - - status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI); - if (status) - return status; - - WARN_ON(!!dev->msi_enabled); - - /* Check whether driver already requested MSI-X irqs */ - if (dev->msix_enabled) { - dev_info(&dev->dev, "can't enable MSI " - "(MSI-X already enabled)\n"); - return -EINVAL; - } - - status = msi_capability_init(dev, nvec); - return status; -} -EXPORT_SYMBOL(pci_enable_msi_block); - void pci_msi_shutdown(struct pci_dev *dev) { struct msi_desc *desc; @@ -1132,14 +1083,45 @@ void pci_msi_init_pci_dev(struct pci_dev *dev) **/ int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec) { - int nvec = maxvec; + int nvec; int rc; + if (dev->current_state != PCI_D0) + return -EINVAL; + + WARN_ON(!!dev->msi_enabled); + + /* Check whether driver already requested MSI-X irqs */ + if (dev->msix_enabled) { + dev_info(&dev->dev, + "can't enable MSI (MSI-X already enabled)\n"); + return -EINVAL; + } + if (maxvec < minvec) return -ERANGE; + nvec = pci_msi_vec_count(dev); + if (nvec < 0) + return nvec; + else if (nvec < minvec) + return -EINVAL; + else if (nvec > maxvec) + nvec = maxvec; + + do { + rc = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI); + if (rc < 0) { + return rc; + } else if (rc > 0) { + if (rc < minvec) + return -ENOSPC; + nvec = rc; + } + } while (rc); + do { - rc = pci_enable_msi_block(dev, nvec); + rc = msi_capability_init(dev, nvec); if (rc < 0) { return rc; } else if (rc > 0) { diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index f49abef8848..ca4927ba843 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -309,13 +309,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev) bool check_children; u64 addr; - /* - * pci_is_bridge() is not suitable here, because pci_dev->subordinate - * is set only after acpi_pci_find_device() has been called for the - * given device. - */ - check_children = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE - || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; + check_children = pci_is_bridge(pci_dev); /* Please ref to ACPI spec for the syntax of _ADR */ addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); return acpi_find_child_device(ACPI_COMPANION(dev->parent), addr, diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index d911e0c1f35..837d71f5390 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -107,7 +107,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) subdevice=PCI_ANY_ID, class=0, class_mask=0; unsigned long driver_data=0; int fields=0; - int retval; + int retval = 0; fields = sscanf(buf, "%x %x %x %x %x %x %lx", &vendor, &device, &subvendor, &subdevice, @@ -115,6 +115,26 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) if (fields < 2) return -EINVAL; + if (fields != 7) { + struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); + if (!pdev) + return -ENOMEM; + + pdev->vendor = vendor; + pdev->device = device; + pdev->subsystem_vendor = subvendor; + pdev->subsystem_device = subdevice; + pdev->class = class; + + if (pci_match_id(pdrv->id_table, pdev)) + retval = -EEXIST; + + kfree(pdev); + + if (retval) + return retval; + } + /* Only accept driver_data values that match an existing id_table entry */ if (ids) { @@ -216,6 +236,13 @@ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, return NULL; } +static const struct pci_device_id pci_device_id_any = { + .vendor = PCI_ANY_ID, + .device = PCI_ANY_ID, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, +}; + /** * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure * @drv: the PCI driver to match against @@ -229,18 +256,30 @@ static const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev) { struct pci_dynid *dynid; + const struct pci_device_id *found_id = NULL; + + /* When driver_override is set, only bind to the matching driver */ + if (dev->driver_override && strcmp(dev->driver_override, drv->name)) + return NULL; /* Look at the dynamic ids first, before the static ones */ spin_lock(&drv->dynids.lock); list_for_each_entry(dynid, &drv->dynids.list, node) { if (pci_match_one_device(&dynid->id, dev)) { - spin_unlock(&drv->dynids.lock); - return &dynid->id; + found_id = &dynid->id; + break; } } spin_unlock(&drv->dynids.lock); - return pci_match_id(drv->id_table, dev); + if (!found_id) + found_id = pci_match_id(drv->id_table, dev); + + /* driver_override will always match, send a dummy id */ + if (!found_id && dev->driver_override) + found_id = &pci_device_id_any; + + return found_id; } struct drv_dev_and_id { @@ -580,14 +619,14 @@ static void pci_pm_default_resume(struct pci_dev *pci_dev) { pci_fixup_device(pci_fixup_resume, pci_dev); - if (!pci_is_bridge(pci_dev)) + if (!pci_has_subordinate(pci_dev)) pci_enable_wake(pci_dev, PCI_D0, false); } static void pci_pm_default_suspend(struct pci_dev *pci_dev) { /* Disable non-bridge devices without PM support */ - if (!pci_is_bridge(pci_dev)) + if (!pci_has_subordinate(pci_dev)) pci_disable_enabled_device(pci_dev); } @@ -717,7 +756,7 @@ static int pci_pm_suspend_noirq(struct device *dev) if (!pci_dev->state_saved) { pci_save_state(pci_dev); - if (!pci_is_bridge(pci_dev)) + if (!pci_has_subordinate(pci_dev)) pci_prepare_to_sleep(pci_dev); } @@ -971,7 +1010,7 @@ static int pci_pm_poweroff_noirq(struct device *dev) return error; } - if (!pci_dev->state_saved && !pci_is_bridge(pci_dev)) + if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev)) pci_prepare_to_sleep(pci_dev); /* @@ -1325,8 +1364,6 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) return -ENODEV; pdev = to_pci_dev(dev); - if (!pdev) - return -ENODEV; if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class)) return -ENOMEM; @@ -1347,6 +1384,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), (u8)(pdev->class))) return -ENOMEM; + return 0; } diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 4e0acefb756..84c350994b0 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/vgaarb.h> #include <linux/pm_runtime.h> +#include <linux/of.h> #include "pci.h" static int sysfs_initialized; /* = 0 */ @@ -416,6 +417,20 @@ static ssize_t d3cold_allowed_show(struct device *dev, static DEVICE_ATTR_RW(d3cold_allowed); #endif +#ifdef CONFIG_OF +static ssize_t devspec_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct device_node *np = pci_device_to_OF_node(pdev); + + if (np == NULL || np->full_name == NULL) + return 0; + return sprintf(buf, "%s", np->full_name); +} +static DEVICE_ATTR_RO(devspec); +#endif + #ifdef CONFIG_PCI_IOV static ssize_t sriov_totalvfs_show(struct device *dev, struct device_attribute *attr, @@ -499,6 +514,45 @@ static struct device_attribute sriov_numvfs_attr = sriov_numvfs_show, sriov_numvfs_store); #endif /* CONFIG_PCI_IOV */ +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = to_pci_dev(dev); + char *driver_override, *old = pdev->driver_override, *cp; + + if (count > PATH_MAX) + return -EINVAL; + + driver_override = kstrndup(buf, count, GFP_KERNEL); + if (!driver_override) + return -ENOMEM; + + cp = strchr(driver_override, '\n'); + if (cp) + *cp = '\0'; + + if (strlen(driver_override)) { + pdev->driver_override = driver_override; + } else { + kfree(driver_override); + pdev->driver_override = NULL; + } + + kfree(old); + + return count; +} + +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return sprintf(buf, "%s\n", pdev->driver_override); +} +static DEVICE_ATTR_RW(driver_override); + static struct attribute *pci_dev_attrs[] = { &dev_attr_resource.attr, &dev_attr_vendor.attr, @@ -521,6 +575,10 @@ static struct attribute *pci_dev_attrs[] = { #if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) &dev_attr_d3cold_allowed.attr, #endif +#ifdef CONFIG_OF + &dev_attr_devspec.attr, +#endif + &dev_attr_driver_override.attr, NULL, }; @@ -1255,11 +1313,6 @@ static struct bin_attribute pcie_config_attr = { .write = pci_write_config, }; -int __weak pcibios_add_platform_entries(struct pci_dev *dev) -{ - return 0; -} - static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -1375,11 +1428,6 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) pdev->rom_attr = attr; } - /* add platform-specific attributes */ - retval = pcibios_add_platform_entries(pdev); - if (retval) - goto err_rom_file; - /* add sysfs entries for various capabilities */ retval = pci_create_capabilities_sysfs(pdev); if (retval) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 7325d43bf03..7ae7aa0166b 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1468,6 +1468,17 @@ void __weak pcibios_release_device(struct pci_dev *dev) {} */ void __weak pcibios_disable_device (struct pci_dev *dev) {} +/** + * pcibios_penalize_isa_irq - penalize an ISA IRQ + * @irq: ISA IRQ to penalize + * @active: IRQ active or not + * + * Permits the platform to provide architecture-specific functionality when + * penalizing ISA IRQs. This is the default implementation. Architecture + * implementations can override this. + */ +void __weak pcibios_penalize_isa_irq(int irq, int active) {} + static void do_pci_disable_device(struct pci_dev *dev) { u16 pci_command; @@ -3067,7 +3078,8 @@ int pci_wait_for_pending_transaction(struct pci_dev *dev) if (!pci_is_pcie(dev)) return 1; - return pci_wait_for_pending(dev, PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); + return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_TRPND); } EXPORT_SYMBOL(pci_wait_for_pending_transaction); @@ -3109,7 +3121,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe) return 0; /* Wait for Transaction Pending bit clean */ - if (pci_wait_for_pending(dev, PCI_AF_STATUS, PCI_AF_STATUS_TP)) + if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP)) goto clear; dev_err(&dev->dev, "transaction is not cleared; " @@ -3305,8 +3317,27 @@ static void pci_dev_unlock(struct pci_dev *dev) pci_cfg_access_unlock(dev); } +/** + * pci_reset_notify - notify device driver of reset + * @dev: device to be notified of reset + * @prepare: 'true' if device is about to be reset; 'false' if reset attempt + * completed + * + * Must be called prior to device access being disabled and after device + * access is restored. + */ +static void pci_reset_notify(struct pci_dev *dev, bool prepare) +{ + const struct pci_error_handlers *err_handler = + dev->driver ? dev->driver->err_handler : NULL; + if (err_handler && err_handler->reset_notify) + err_handler->reset_notify(dev, prepare); +} + static void pci_dev_save_and_disable(struct pci_dev *dev) { + pci_reset_notify(dev, true); + /* * Wake-up device prior to save. PM registers default to D0 after * reset and a simple register restore doesn't reliably return @@ -3328,6 +3359,7 @@ static void pci_dev_save_and_disable(struct pci_dev *dev) static void pci_dev_restore(struct pci_dev *dev) { pci_restore_state(dev); + pci_reset_notify(dev, false); } static int pci_dev_reset(struct pci_dev *dev, int probe) @@ -3344,6 +3376,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) return rc; } + /** * __pci_reset_function - reset a PCI device function * @dev: PCI device to reset @@ -4125,7 +4158,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode, u16 cmd; int rc; - WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); + WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); /* ARCH specific VGA enables */ rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6bd082299e3..0601890db22 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -77,7 +77,7 @@ static inline void pci_wakeup_event(struct pci_dev *dev) pm_wakeup_event(&dev->dev, 100); } -static inline bool pci_is_bridge(struct pci_dev *pci_dev) +static inline bool pci_has_subordinate(struct pci_dev *pci_dev) { return !!(pci_dev->subordinate); } @@ -201,11 +201,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int reg); int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type); void pci_configure_ari(struct pci_dev *dev); -void __ref __pci_bus_size_bridges(struct pci_bus *bus, +void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head); -void __ref __pci_bus_assign_resources(const struct pci_bus *bus, - struct list_head *realloc_head, - struct list_head *fail_head); +void __pci_bus_assign_resources(const struct pci_bus *bus, + struct list_head *realloc_head, + struct list_head *fail_head); /** * pci_ari_enabled - query ARI forwarding status diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 986f8eadfd3..2f0ce668a77 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -99,7 +99,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) for (i = 0; i < nr_entries; i++) msix_entries[i].entry = i; - status = pci_enable_msix(dev, msix_entries, nr_entries); + status = pci_enable_msix_exact(dev, msix_entries, nr_entries); if (status) goto Exit; @@ -171,7 +171,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) pci_disable_msix(dev); /* Now allocate the MSI-X vectors for real */ - status = pci_enable_msix(dev, msix_entries, nvec); + status = pci_enable_msix_exact(dev, msix_entries, nvec); if (status) goto Exit; } @@ -379,10 +379,13 @@ int pcie_port_device_register(struct pci_dev *dev) /* * Initialize service irqs. Don't use service devices that * require interrupts if there is no way to generate them. + * However, some drivers may have a polling mode (e.g. pciehp_poll_mode) + * that can be used in the absence of irqs. Allow them to determine + * if that is to be used. */ status = init_service_irqs(dev, irqs, capabilities); if (status) { - capabilities &= PCIE_PORT_SERVICE_VC; + capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP; if (!capabilities) goto error_disable; } diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index ef09f5f2fe6..2bbf5221afb 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -171,9 +171,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int pos) { u32 l, sz, mask; + u64 l64, sz64, mask64; u16 orig_cmd; struct pci_bus_region region, inverted_region; - bool bar_too_big = false, bar_disabled = false; + bool bar_too_big = false, bar_too_high = false, bar_invalid = false; mask = type ? PCI_ROM_ADDRESS_MASK : ~0; @@ -226,9 +227,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, } if (res->flags & IORESOURCE_MEM_64) { - u64 l64 = l; - u64 sz64 = sz; - u64 mask64 = mask | (u64)~0 << 32; + l64 = l; + sz64 = sz; + mask64 = mask | (u64)~0 << 32; pci_read_config_dword(dev, pos + 4, &l); pci_write_config_dword(dev, pos + 4, ~0); @@ -243,19 +244,22 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, if (!sz64) goto fail; - if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { + if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) && + sz64 > 0x100000000ULL) { + res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; + res->start = 0; + res->end = 0; bar_too_big = true; - goto fail; + goto out; } - if ((sizeof(resource_size_t) < 8) && l) { - /* Address above 32-bit boundary; disable the BAR */ - pci_write_config_dword(dev, pos, 0); - pci_write_config_dword(dev, pos + 4, 0); + if ((sizeof(dma_addr_t) < 8) && l) { + /* Above 32-bit boundary; try to reallocate */ res->flags |= IORESOURCE_UNSET; - region.start = 0; - region.end = sz64; - bar_disabled = true; + res->start = 0; + res->end = sz64; + bar_too_high = true; + goto out; } else { region.start = l64; region.end = l64 + sz64; @@ -285,11 +289,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, * be claimed by the device. */ if (inverted_region.start != region.start) { - dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n", - pos, ®ion.start); res->flags |= IORESOURCE_UNSET; - res->end -= res->start; res->start = 0; + res->end = region.end - region.start; + bar_invalid = true; } goto out; @@ -303,8 +306,15 @@ out: pci_write_config_word(dev, PCI_COMMAND, orig_cmd); if (bar_too_big) - dev_err(&dev->dev, "reg 0x%x: can't handle 64-bit BAR\n", pos); - if (res->flags && !bar_disabled) + dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n", + pos, (unsigned long long) sz64); + if (bar_too_high) + dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n", + pos, (unsigned long long) l64); + if (bar_invalid) + dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n", + pos, (unsigned long long) region.start); + if (res->flags) dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res); return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; @@ -465,7 +475,7 @@ void pci_read_bridge_bases(struct pci_bus *child) if (dev->transparent) { pci_bus_for_each_resource(child->parent, res, i) { - if (res) { + if (res && res->flags) { pci_bus_add_resource(child, res, PCI_SUBTRACTIVE_DECODE); dev_printk(KERN_DEBUG, &dev->dev, @@ -719,7 +729,7 @@ add_dev: return child; } -struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) +struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) { struct pci_bus *child; @@ -984,6 +994,43 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev) /** + * pci_ext_cfg_is_aliased - is ext config space just an alias of std config? + * @dev: PCI device + * + * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that + * when forwarding a type1 configuration request the bridge must check that + * the extended register address field is zero. The bridge is not permitted + * to forward the transactions and must handle it as an Unsupported Request. + * Some bridges do not follow this rule and simply drop the extended register + * bits, resulting in the standard config space being aliased, every 256 + * bytes across the entire configuration space. Test for this condition by + * comparing the first dword of each potential alias to the vendor/device ID. + * Known offenders: + * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03) + * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40) + */ +static bool pci_ext_cfg_is_aliased(struct pci_dev *dev) +{ +#ifdef CONFIG_PCI_QUIRKS + int pos; + u32 header, tmp; + + pci_read_config_dword(dev, PCI_VENDOR_ID, &header); + + for (pos = PCI_CFG_SPACE_SIZE; + pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) { + if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL + || header != tmp) + return false; + } + + return true; +#else + return false; +#endif +} + +/** * pci_cfg_space_size - get the configuration space size of the PCI device. * @dev: PCI device * @@ -1001,7 +1048,7 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev) if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) goto fail; - if (status == 0xffffffff) + if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev)) goto fail; return PCI_CFG_SPACE_EXP_SIZE; @@ -1215,6 +1262,7 @@ static void pci_release_dev(struct device *dev) pci_release_of_node(pci_dev); pcibios_release_device(pci_dev); pci_bus_put(pci_dev->bus); + kfree(pci_dev->driver_override); kfree(pci_dev); } @@ -1369,7 +1417,7 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) WARN_ON(ret < 0); } -struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn) +struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) { struct pci_dev *dev; @@ -1617,7 +1665,7 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) */ void pcie_bus_configure_settings(struct pci_bus *bus) { - u8 smpss; + u8 smpss = 0; if (!bus->self) return; @@ -1670,8 +1718,7 @@ unsigned int pci_scan_child_bus(struct pci_bus *bus) for (pass=0; pass < 2; pass++) list_for_each_entry(dev, &bus->devices, bus_list) { - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + if (pci_is_bridge(dev)) max = pci_scan_bridge(bus, dev, max, pass); } @@ -1958,7 +2005,7 @@ EXPORT_SYMBOL(pci_scan_bus); * * Returns the max number of subordinate bus discovered. */ -unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge) +unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge) { unsigned int max; struct pci_bus *bus = bridge->subordinate; @@ -1981,7 +2028,7 @@ unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge) * * Returns the max number of subordinate bus discovered. */ -unsigned int __ref pci_rescan_bus(struct pci_bus *bus) +unsigned int pci_rescan_bus(struct pci_bus *bus) { unsigned int max; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index e7292065a1b..92e68c7747f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2954,6 +2954,7 @@ static void disable_igfx_irq(struct pci_dev *dev) } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); /* * PCI devices which are on Intel chips can skip the 10ms delay @@ -2991,6 +2992,14 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ quirk_broken_intx_masking); +/* + * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10) + * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC + * + * RTL8110SC - Fails under PCI device assignment using DisINTx masking. + */ +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169, + quirk_broken_intx_masking); static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) @@ -3453,6 +3462,8 @@ static const u16 pci_quirk_intel_pch_acs_ids[] = { /* Wildcat PCH */ 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97, 0x9c98, 0x9c99, 0x9c9a, 0x9c9b, + /* Patsburg (X79) PCH */ + 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e, }; static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 4a1b972efe7..8e495bda678 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -7,7 +7,6 @@ * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com> */ -#include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 138bdd6393b..fd9b545c3cf 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -713,12 +713,11 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) bus resource of a given type. Note: we intentionally skip the bus resources which have already been assigned (that is, have non-NULL parent resource). */ -static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type) +static struct resource *find_free_bus_resource(struct pci_bus *bus, + unsigned long type_mask, unsigned long type) { int i; struct resource *r; - unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | - IORESOURCE_PREFETCH; pci_bus_for_each_resource(bus, r, i) { if (r == &ioport_resource || r == &iomem_resource) @@ -815,7 +814,8 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, resource_size_t add_size, struct list_head *realloc_head) { struct pci_dev *dev; - struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO); + struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO, + IORESOURCE_IO); resource_size_t size = 0, size0 = 0, size1 = 0; resource_size_t children_add_size = 0; resource_size_t min_align, align; @@ -907,36 +907,40 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns, * @bus : the bus * @mask: mask the resource flag, then compare it with type * @type: the type of free resource from bridge + * @type2: second match type + * @type3: third match type * @min_size : the minimum memory window that must to be allocated * @add_size : additional optional memory window * @realloc_head : track the additional memory window on this list * * Calculate the size of the bus and minimal alignment which * guarantees that all child resources fit in this size. + * + * Returns -ENOSPC if there's no available bus resource of the desired type. + * Otherwise, sets the bus resource start/end to indicate the required + * size, adds things to realloc_head (if supplied), and returns 0. */ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, - unsigned long type, resource_size_t min_size, - resource_size_t add_size, - struct list_head *realloc_head) + unsigned long type, unsigned long type2, + unsigned long type3, + resource_size_t min_size, resource_size_t add_size, + struct list_head *realloc_head) { struct pci_dev *dev; resource_size_t min_align, align, size, size0, size1; - resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */ + resource_size_t aligns[14]; /* Alignments from 1Mb to 8Gb */ int order, max_order; - struct resource *b_res = find_free_bus_resource(bus, type); - unsigned int mem64_mask = 0; + struct resource *b_res = find_free_bus_resource(bus, + mask | IORESOURCE_PREFETCH, type); resource_size_t children_add_size = 0; if (!b_res) - return 0; + return -ENOSPC; memset(aligns, 0, sizeof(aligns)); max_order = 0; size = 0; - mem64_mask = b_res->flags & IORESOURCE_MEM_64; - b_res->flags &= ~IORESOURCE_MEM_64; - list_for_each_entry(dev, &bus->devices, bus_list) { int i; @@ -944,7 +948,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, struct resource *r = &dev->resource[i]; resource_size_t r_size; - if (r->parent || (r->flags & mask) != type) + if (r->parent || ((r->flags & mask) != type && + (r->flags & mask) != type2 && + (r->flags & mask) != type3)) continue; r_size = resource_size(r); #ifdef CONFIG_PCI_IOV @@ -957,10 +963,17 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, continue; } #endif - /* For bridges size != alignment */ + /* + * aligns[0] is for 1MB (since bridge memory + * windows are always at least 1MB aligned), so + * keep "order" from being negative for smaller + * resources. + */ align = pci_resource_alignment(dev, r); order = __ffs(align) - 20; - if (order > 11) { + if (order < 0) + order = 0; + if (order >= ARRAY_SIZE(aligns)) { dev_warn(&dev->dev, "disabling BAR %d: %pR " "(bad alignment %#llx)\n", i, r, (unsigned long long) align); @@ -968,15 +981,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, continue; } size += r_size; - if (order < 0) - order = 0; /* Exclude ranges with size > align from calculation of the alignment. */ if (r_size == align) aligns[order] += align; if (order > max_order) max_order = order; - mem64_mask &= r->flags & IORESOURCE_MEM_64; if (realloc_head) children_add_size += get_res_add_size(realloc_head, r); @@ -997,18 +1007,18 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, "%pR to %pR (unused)\n", b_res, &bus->busn_res); b_res->flags = 0; - return 1; + return 0; } b_res->start = min_align; b_res->end = size0 + min_align - 1; - b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask; + b_res->flags |= IORESOURCE_STARTALIGN; if (size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window " "%pR to %pR add_size %llx\n", b_res, &bus->busn_res, (unsigned long long)size1-size0); } - return 1; + return 0; } unsigned long pci_cardbus_resource_alignment(struct resource *res) @@ -1113,12 +1123,13 @@ handle_done: ; } -void __ref __pci_bus_size_bridges(struct pci_bus *bus, - struct list_head *realloc_head) +void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) { struct pci_dev *dev; - unsigned long mask, prefmask; + unsigned long mask, prefmask, type2 = 0, type3 = 0; resource_size_t additional_mem_size = 0, additional_io_size = 0; + struct resource *b_res; + int ret; list_for_each_entry(dev, &bus->devices, bus_list) { struct pci_bus *b = dev->subordinate; @@ -1152,41 +1163,93 @@ void __ref __pci_bus_size_bridges(struct pci_bus *bus, additional_io_size = pci_hotplug_io_size; additional_mem_size = pci_hotplug_mem_size; } - /* - * Follow thru - */ + /* Fall through */ default: pbus_size_io(bus, realloc_head ? 0 : additional_io_size, additional_io_size, realloc_head); - /* If the bridge supports prefetchable range, size it - separately. If it doesn't, or its prefetchable window - has already been allocated by arch code, try - non-prefetchable range for both types of PCI memory - resources. */ + + /* + * If there's a 64-bit prefetchable MMIO window, compute + * the size required to put all 64-bit prefetchable + * resources in it. + */ + b_res = &bus->self->resource[PCI_BRIDGE_RESOURCES]; mask = IORESOURCE_MEM; prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; - if (pbus_size_mem(bus, prefmask, prefmask, + if (b_res[2].flags & IORESOURCE_MEM_64) { + prefmask |= IORESOURCE_MEM_64; + ret = pbus_size_mem(bus, prefmask, prefmask, + prefmask, prefmask, realloc_head ? 0 : additional_mem_size, - additional_mem_size, realloc_head)) - mask = prefmask; /* Success, size non-prefetch only. */ - else - additional_mem_size += additional_mem_size; - pbus_size_mem(bus, mask, IORESOURCE_MEM, + additional_mem_size, realloc_head); + + /* + * If successful, all non-prefetchable resources + * and any 32-bit prefetchable resources will go in + * the non-prefetchable window. + */ + if (ret == 0) { + mask = prefmask; + type2 = prefmask & ~IORESOURCE_MEM_64; + type3 = prefmask & ~IORESOURCE_PREFETCH; + } + } + + /* + * If there is no 64-bit prefetchable window, compute the + * size required to put all prefetchable resources in the + * 32-bit prefetchable window (if there is one). + */ + if (!type2) { + prefmask &= ~IORESOURCE_MEM_64; + ret = pbus_size_mem(bus, prefmask, prefmask, + prefmask, prefmask, + realloc_head ? 0 : additional_mem_size, + additional_mem_size, realloc_head); + + /* + * If successful, only non-prefetchable resources + * will go in the non-prefetchable window. + */ + if (ret == 0) + mask = prefmask; + else + additional_mem_size += additional_mem_size; + + type2 = type3 = IORESOURCE_MEM; + } + + /* + * Compute the size required to put everything else in the + * non-prefetchable window. This includes: + * + * - all non-prefetchable resources + * - 32-bit prefetchable resources if there's a 64-bit + * prefetchable window or no prefetchable window at all + * - 64-bit prefetchable resources if there's no + * prefetchable window at all + * + * Note that the strategy in __pci_assign_resource() must + * match that used here. Specifically, we cannot put a + * 32-bit prefetchable resource in a 64-bit prefetchable + * window. + */ + pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3, realloc_head ? 0 : additional_mem_size, additional_mem_size, realloc_head); break; } } -void __ref pci_bus_size_bridges(struct pci_bus *bus) +void pci_bus_size_bridges(struct pci_bus *bus) { __pci_bus_size_bridges(bus, NULL); } EXPORT_SYMBOL(pci_bus_size_bridges); -void __ref __pci_bus_assign_resources(const struct pci_bus *bus, - struct list_head *realloc_head, - struct list_head *fail_head) +void __pci_bus_assign_resources(const struct pci_bus *bus, + struct list_head *realloc_head, + struct list_head *fail_head) { struct pci_bus *b; struct pci_dev *dev; @@ -1218,15 +1281,15 @@ void __ref __pci_bus_assign_resources(const struct pci_bus *bus, } } -void __ref pci_bus_assign_resources(const struct pci_bus *bus) +void pci_bus_assign_resources(const struct pci_bus *bus) { __pci_bus_assign_resources(bus, NULL, NULL); } EXPORT_SYMBOL(pci_bus_assign_resources); -static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, - struct list_head *add_head, - struct list_head *fail_head) +static void __pci_bridge_assign_resources(const struct pci_dev *bridge, + struct list_head *add_head, + struct list_head *fail_head) { struct pci_bus *b; @@ -1257,42 +1320,66 @@ static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge, static void pci_bridge_release_resources(struct pci_bus *bus, unsigned long type) { - int idx; - bool changed = false; - struct pci_dev *dev; + struct pci_dev *dev = bus->self; struct resource *r; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | - IORESOURCE_PREFETCH; + IORESOURCE_PREFETCH | IORESOURCE_MEM_64; + unsigned old_flags = 0; + struct resource *b_res; + int idx = 1; - dev = bus->self; - for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END; - idx++) { - r = &dev->resource[idx]; - if ((r->flags & type_mask) != type) - continue; - if (!r->parent) - continue; - /* - * if there are children under that, we should release them - * all - */ - release_child_resources(r); - if (!release_resource(r)) { - dev_printk(KERN_DEBUG, &dev->dev, - "resource %d %pR released\n", idx, r); - /* keep the old size */ - r->end = resource_size(r) - 1; - r->start = 0; - r->flags = 0; - changed = true; - } - } + b_res = &dev->resource[PCI_BRIDGE_RESOURCES]; + + /* + * 1. if there is io port assign fail, will release bridge + * io port. + * 2. if there is non pref mmio assign fail, release bridge + * nonpref mmio. + * 3. if there is 64bit pref mmio assign fail, and bridge pref + * is 64bit, release bridge pref mmio. + * 4. if there is pref mmio assign fail, and bridge pref is + * 32bit mmio, release bridge pref mmio + * 5. if there is pref mmio assign fail, and bridge pref is not + * assigned, release bridge nonpref mmio. + */ + if (type & IORESOURCE_IO) + idx = 0; + else if (!(type & IORESOURCE_PREFETCH)) + idx = 1; + else if ((type & IORESOURCE_MEM_64) && + (b_res[2].flags & IORESOURCE_MEM_64)) + idx = 2; + else if (!(b_res[2].flags & IORESOURCE_MEM_64) && + (b_res[2].flags & IORESOURCE_PREFETCH)) + idx = 2; + else + idx = 1; + + r = &b_res[idx]; + + if (!r->parent) + return; + + /* + * if there are children under that, we should release them + * all + */ + release_child_resources(r); + if (!release_resource(r)) { + type = old_flags = r->flags & type_mask; + dev_printk(KERN_DEBUG, &dev->dev, "resource %d %pR released\n", + PCI_BRIDGE_RESOURCES + idx, r); + /* keep the old size */ + r->end = resource_size(r) - 1; + r->start = 0; + r->flags = 0; - if (changed) { /* avoiding touch the one without PREF */ if (type & IORESOURCE_PREFETCH) type = IORESOURCE_PREFETCH; __pci_setup_bridge(bus, type); + /* for next child res under same bridge */ + r->flags = old_flags; } } @@ -1304,9 +1391,9 @@ enum release_type { * try to release pci bridge resources that is from leaf bridge, * so we can allocate big new one later */ -static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus, - unsigned long type, - enum release_type rel_type) +static void pci_bus_release_bridge_resources(struct pci_bus *bus, + unsigned long type, + enum release_type rel_type) { struct pci_dev *dev; bool is_leaf_bridge = true; @@ -1471,7 +1558,7 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) LIST_HEAD(fail_head); struct pci_dev_resource *fail_res; unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM | - IORESOURCE_PREFETCH; + IORESOURCE_PREFETCH | IORESOURCE_MEM_64; int pci_try_num = 1; enum enable_type enable_local; @@ -1629,9 +1716,7 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus) down_read(&pci_bus_sem); list_for_each_entry(dev, &bus->devices, bus_list) - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) - if (dev->subordinate) + if (pci_is_bridge(dev) && pci_has_subordinate(dev)) __pci_bus_size_bridges(dev->subordinate, &add_list); up_read(&pci_bus_sem); diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c index 9bd6864ec5d..dbc4ffcf42d 100644 --- a/drivers/pci/setup-irq.c +++ b/drivers/pci/setup-irq.c @@ -10,7 +10,6 @@ */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/errno.h> diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 7eed671d558..33f9e32d94d 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -16,7 +16,6 @@ * Resource sorting */ -#include <linux/init.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> @@ -209,21 +208,42 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; - /* First, try exact prefetching match.. */ + /* + * First, try exact prefetching match. Even if a 64-bit + * prefetchable bridge window is below 4GB, we can't put a 32-bit + * prefetchable resource in it because pbus_size_mem() assumes a + * 64-bit window will contain no 32-bit resources. If we assign + * things differently than they were sized, not everything will fit. + */ ret = pci_bus_alloc_resource(bus, res, size, align, min, - IORESOURCE_PREFETCH, + IORESOURCE_PREFETCH | IORESOURCE_MEM_64, pcibios_align_resource, dev); + if (ret == 0) + return 0; - if (ret < 0 && (res->flags & IORESOURCE_PREFETCH)) { - /* - * That failed. - * - * But a prefetching area can handle a non-prefetching - * window (it will just not perform as well). - */ - ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, + /* + * If the prefetchable window is only 32 bits wide, we can put + * 64-bit prefetchable resources in it. + */ + if ((res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) == + (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) { + ret = pci_bus_alloc_resource(bus, res, size, align, min, + IORESOURCE_PREFETCH, pcibios_align_resource, dev); + if (ret == 0) + return 0; } + + /* + * If we didn't find a better match, we can put any memory resource + * in a non-prefetchable window. If this resource is 32 bits and + * non-prefetchable, the first call already tried the only possibility + * so we don't need to try again. + */ + if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) + ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, + pcibios_align_resource, dev); + return ret; } diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index 8bde61952d2..4fe4cc4ae19 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c @@ -78,8 +78,7 @@ int __ref cb_alloc(struct pcmcia_socket *s) max = bus->busn_res.start; for (pass = 0; pass < 2; pass++) list_for_each_entry(dev, &bus->devices, bus_list) - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + if (pci_is_bridge(dev)) max = pci_scan_bridge(bus, dev, max, pass); /* diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c index 92ed4b2e3c0..c862f9c0e9c 100644 --- a/drivers/pinctrl/pinctrl-as3722.c +++ b/drivers/pinctrl/pinctrl-as3722.c @@ -64,7 +64,6 @@ struct as3722_pin_function { }; struct as3722_gpio_pin_control { - bool enable_gpio_invert; unsigned mode_prop; int io_function; }; @@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev, return mode; } - if (as_pci->gpio_control[offset].enable_gpio_invert) - mode |= AS3722_GPIO_INV; - - return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode); + return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset), + AS3722_GPIO_MODE_MASK, mode); } static const struct pinmux_ops as3722_pinmux_ops = { @@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset, { struct as3722_pctrl_info *as_pci = to_as_pci(chip); struct as3722 *as3722 = as_pci->as3722; - int en_invert = as_pci->gpio_control[offset].enable_gpio_invert; + int en_invert; u32 val; int ret; + ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val); + if (ret < 0) { + dev_err(as_pci->dev, + "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret); + return; + } + en_invert = !!(val & AS3722_GPIO_INV); + if (value) val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset); else diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index 81075f2a1d3..2960557bfed 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -810,6 +810,7 @@ static const struct pinconf_ops pcs_pinconf_ops = { static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, unsigned pin_pos) { + struct pcs_soc_data *pcs_soc = &pcs->socdata; struct pinctrl_pin_desc *pin; struct pcs_name *pn; int i; @@ -821,6 +822,18 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, return -ENOMEM; } + if (pcs_soc->irq_enable_mask) { + unsigned val; + + val = pcs->read(pcs->base + offset); + if (val & pcs_soc->irq_enable_mask) { + dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n", + (unsigned long)pcs->res->start + offset, val); + val &= ~pcs_soc->irq_enable_mask; + pcs->write(val, pcs->base + offset); + } + } + pin = &pcs->pins.pa[i]; pn = &pcs->names[i]; sprintf(pn->name, "%lx.%d", diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c index c5e0f6973a3..26ca6855f47 100644 --- a/drivers/pinctrl/pinctrl-tb10x.c +++ b/drivers/pinctrl/pinctrl-tb10x.c @@ -629,9 +629,8 @@ static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl, */ for (i = 0; i < state->pinfuncgrpcnt; i++) { const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i]; - unsigned int port = pfg->port; unsigned int mode = pfg->mode; - int j; + int j, port = pfg->port; /* * Skip pin groups which are always mapped and don't need diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c index 48093719167..f5cd3f96180 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c @@ -4794,8 +4794,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { FN_MSIOF0_SCK_B, 0, /* IP5_23_21 [3] */ FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4, - FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, - FN_IERX_C, 0, + FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C, /* IP5_20_18 [3] */ FN_WE0_N, FN_IECLK, FN_CAN_CLK, FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0, diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 5186d70c49d..7868bf3a0f9 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -5288,7 +5288,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { /* SEL_SCIF3 [2] */ FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3, /* SEL_IEB [2] */ - FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, + FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0, /* SEL_MMC [1] */ FN_SEL_MMC_0, FN_SEL_MMC_1, /* SEL_SCIF5 [1] */ diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c index 9802b67040c..2c61281bebd 100644 --- a/drivers/pinctrl/vt8500/pinctrl-wmt.c +++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c @@ -523,17 +523,6 @@ static int wmt_gpio_get_direction(struct gpio_chip *chip, unsigned offset) return GPIOF_DIR_IN; } -static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -{ - return pinctrl_gpio_direction_input(chip->base + offset); -} - -static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset, - int value) -{ - return pinctrl_gpio_direction_output(chip->base + offset); -} - static int wmt_gpio_get_value(struct gpio_chip *chip, unsigned offset) { struct wmt_pinctrl_data *data = dev_get_drvdata(chip->dev); @@ -568,6 +557,18 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset, wmt_clearbits(data, reg_data_out, BIT(bit)); } +static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset) +{ + return pinctrl_gpio_direction_input(chip->base + offset); +} + +static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset, + int value) +{ + wmt_gpio_set_value(chip, offset, value); + return pinctrl_gpio_direction_output(chip->base + offset); +} + static struct gpio_chip wmt_gpio_chip = { .label = "gpio-wmt", .owner = THIS_MODULE, diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index c5e082fb82f..91ef69a5226 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -642,8 +642,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus) dev = pci_scan_single_device(bus, 0); if (dev) { pci_bus_assign_resources(bus); - if (pci_bus_add_device(dev)) - pr_err("Unable to hotplug wifi\n"); + pci_bus_add_device(dev); } } else { dev = pci_get_slot(bus, 0); diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 399e8c56219..9b0c57cd1d4 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -633,8 +633,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) dev = pci_scan_single_device(bus, 0); if (dev) { pci_bus_assign_resources(bus); - if (pci_bus_add_device(dev)) - pr_err("Unable to hotplug wifi\n"); + pci_bus_add_device(dev); } } else { dev = pci_get_slot(bus, 0); diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 9f611cbbc29..c31aa07b3ba 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle; - struct acpi_buffer buffer; - int ret; + int ret = 0; pnp_dbg(&dev->dev, "set resources\n"); @@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev) if (WARN_ON_ONCE(acpi_dev != dev->data)) dev->data = acpi_dev; - ret = pnpacpi_build_resource_template(dev, &buffer); - if (ret) - return ret; - ret = pnpacpi_encode_resources(dev, &buffer); - if (ret) { + if (acpi_has_method(handle, METHOD_NAME__SRS)) { + struct acpi_buffer buffer; + + ret = pnpacpi_build_resource_template(dev, &buffer); + if (ret) + return ret; + + ret = pnpacpi_encode_resources(dev, &buffer); + if (!ret) { + acpi_status status; + + status = acpi_set_current_resources(handle, &buffer); + if (ACPI_FAILURE(status)) + ret = -EIO; + } kfree(buffer.pointer); - return ret; } - if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer))) - ret = -EINVAL; - else if (acpi_bus_power_manageable(handle)) + if (!ret && acpi_bus_power_manageable(handle)) ret = acpi_bus_set_power(handle, ACPI_STATE_D0); - kfree(buffer.pointer); + return ret; } @@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) { struct acpi_device *acpi_dev; acpi_handle handle; - int ret; + acpi_status status; dev_dbg(&dev->dev, "disable resources\n"); @@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev) } /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ - ret = 0; if (acpi_bus_power_manageable(handle)) acpi_bus_set_power(handle, ACPI_STATE_D3_COLD); - /* continue even if acpi_bus_set_power() fails */ - if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL))) - ret = -ENODEV; - return ret; + + /* continue even if acpi_bus_set_power() fails */ + status = acpi_evaluate_object(handle, "_DIS", NULL, NULL); + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) + return -ENODEV; + + return 0; } #ifdef CONFIG_ACPI_SLEEP diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c index deb7f4bcdb7..438d4c72c7b 100644 --- a/drivers/pnp/pnpbios/bioscalls.c +++ b/drivers/pnp/pnpbios/bioscalls.c @@ -37,7 +37,7 @@ __visible struct { * kernel begins at offset 3GB... */ -asmlinkage void pnp_bios_callfunc(void); +asmlinkage __visible void pnp_bios_callfunc(void); __asm__(".text \n" __ALIGN_STR "\n" diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index 3736bc408ad..ebf0d6710b5 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -335,7 +335,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev) } #endif -#ifdef CONFIG_X86 +#ifdef CONFIG_PCI /* Device IDs of parts that have 32KB MCH space */ static const unsigned int mch_quirk_devices[] = { 0x0154, /* Ivy Bridge */ @@ -440,7 +440,7 @@ static struct pnp_fixup pnp_fixups[] = { #ifdef CONFIG_AMD_NB {"PNP0c01", quirk_amd_mmconfig_area}, #endif -#ifdef CONFIG_X86 +#ifdef CONFIG_PCI {"PNP0c02", quirk_intel_mch}, #endif {""} diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index fa0e4e057b9..bdcf5173e37 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -12,6 +12,14 @@ config POWER_RESET_AS3722 help This driver supports turning off board via a ams AS3722 power-off. +config POWER_RESET_AXXIA + bool "LSI Axxia reset driver" + depends on POWER_RESET && ARCH_AXXIA + help + This driver supports restart for Axxia SoC. + + Say Y if you have an Axxia family SoC. + config POWER_RESET_GPIO bool "GPIO power-off driver" depends on OF_GPIO && POWER_RESET @@ -43,6 +51,13 @@ config POWER_RESET_RESTART Instead they restart, and u-boot holds the SoC until the user presses a key. u-boot then boots into Linux. +config POWER_RESET_SUN6I + bool "Allwinner A31 SoC reset driver" + depends on ARCH_SUNXI + depends on POWER_RESET + help + Reboot support for the Allwinner A31 SoCs. + config POWER_RESET_VEXPRESS bool "ARM Versatile Express power-off and reset driver" depends on ARM || ARM64 @@ -57,3 +72,11 @@ config POWER_RESET_XGENE depends on POWER_RESET help Reboot support for the APM SoC X-Gene Eval boards. + +config POWER_RESET_KEYSTONE + bool "Keystone reset driver" + depends on ARCH_KEYSTONE + select MFD_SYSCON + help + Reboot support for the KEYSTONE SoCs. + diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index a5b4a77d1a4..dde2e8bbac5 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -1,7 +1,10 @@ obj-$(CONFIG_POWER_RESET_AS3722) += as3722-poweroff.o +obj-$(CONFIG_POWER_RESET_AXXIA) += axxia-reset.o obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o +obj-$(CONFIG_POWER_RESET_SUN6I) += sun6i-reboot.o obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o +obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o diff --git a/drivers/power/reset/axxia-reset.c b/drivers/power/reset/axxia-reset.c new file mode 100644 index 00000000000..3b1f8d60178 --- /dev/null +++ b/drivers/power/reset/axxia-reset.c @@ -0,0 +1,88 @@ +/* + * Reset driver for Axxia devices + * + * Copyright (C) 2014 LSI + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/init.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> +#include <linux/regmap.h> + +#include <asm/system_misc.h> + + +#define SC_CRIT_WRITE_KEY 0x1000 +#define SC_LATCH_ON_RESET 0x1004 +#define SC_RESET_CONTROL 0x1008 +#define RSTCTL_RST_ZERO (1<<3) +#define RSTCTL_RST_FAB (1<<2) +#define RSTCTL_RST_CHIP (1<<1) +#define RSTCTL_RST_SYS (1<<0) +#define SC_EFUSE_INT_STATUS 0x180c +#define EFUSE_READ_DONE (1<<31) + +static struct regmap *syscon; + +static void do_axxia_restart(enum reboot_mode reboot_mode, const char *cmd) +{ + /* Access Key (0xab) */ + regmap_write(syscon, SC_CRIT_WRITE_KEY, 0xab); + /* Select internal boot from 0xffff0000 */ + regmap_write(syscon, SC_LATCH_ON_RESET, 0x00000040); + /* Assert ResetReadDone (to avoid hanging in boot ROM) */ + regmap_write(syscon, SC_EFUSE_INT_STATUS, EFUSE_READ_DONE); + /* Assert chip reset */ + regmap_update_bits(syscon, SC_RESET_CONTROL, + RSTCTL_RST_CHIP, RSTCTL_RST_CHIP); +} + +static int axxia_reset_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon"); + if (IS_ERR(syscon)) { + pr_err("%s: syscon lookup failed\n", dev->of_node->name); + return PTR_ERR(syscon); + } + + arm_pm_restart = do_axxia_restart; + + return 0; +} + +static const struct of_device_id of_axxia_reset_match[] = { + { .compatible = "lsi,axm55xx-reset", }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_axxia_reset_match); + +static struct platform_driver axxia_reset_driver = { + .probe = axxia_reset_probe, + .driver = { + .name = "axxia-reset", + .of_match_table = of_match_ptr(of_axxia_reset_match), + }, +}; + +static int __init axxia_reset_init(void) +{ + return platform_driver_register(&axxia_reset_driver); +} +device_initcall(axxia_reset_init); diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c new file mode 100644 index 00000000000..408a18fd91c --- /dev/null +++ b/drivers/power/reset/keystone-reset.c @@ -0,0 +1,166 @@ +/* + * TI keystone reboot driver + * + * Copyright (C) 2014 Texas Instruments Incorporated. http://www.ti.com/ + * + * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/reboot.h> +#include <linux/regmap.h> +#include <asm/system_misc.h> +#include <linux/mfd/syscon.h> +#include <linux/of_platform.h> + +#define RSTYPE_RG 0x0 +#define RSCTRL_RG 0x4 +#define RSCFG_RG 0x8 +#define RSISO_RG 0xc + +#define RSCTRL_KEY_MASK 0x0000ffff +#define RSCTRL_RESET_MASK BIT(16) +#define RSCTRL_KEY 0x5a69 + +#define RSMUX_OMODE_MASK 0xe +#define RSMUX_OMODE_RESET_ON 0xa +#define RSMUX_OMODE_RESET_OFF 0x0 +#define RSMUX_LOCK_MASK 0x1 +#define RSMUX_LOCK_SET 0x1 + +#define RSCFG_RSTYPE_SOFT 0x300f +#define RSCFG_RSTYPE_HARD 0x0 + +#define WDT_MUX_NUMBER 0x4 + +static int rspll_offset; +static struct regmap *pllctrl_regs; + +/** + * rsctrl_enable_rspll_write - enable access to RSCTRL, RSCFG + * To be able to access to RSCTRL, RSCFG registers + * we have to write a key before + */ +static inline int rsctrl_enable_rspll_write(void) +{ + return regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG, + RSCTRL_KEY_MASK, RSCTRL_KEY); +} + +static void rsctrl_restart(enum reboot_mode mode, const char *cmd) +{ + /* enable write access to RSTCTRL */ + rsctrl_enable_rspll_write(); + + /* reset the SOC */ + regmap_update_bits(pllctrl_regs, rspll_offset + RSCTRL_RG, + RSCTRL_RESET_MASK, 0); +} + +static struct of_device_id rsctrl_of_match[] = { + {.compatible = "ti,keystone-reset", }, + {}, +}; + +static int rsctrl_probe(struct platform_device *pdev) +{ + int i; + int ret; + u32 val; + unsigned int rg; + u32 rsmux_offset; + struct regmap *devctrl_regs; + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + + if (!np) + return -ENODEV; + + /* get regmaps */ + pllctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pll"); + if (IS_ERR(pllctrl_regs)) + return PTR_ERR(pllctrl_regs); + + devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-dev"); + if (IS_ERR(devctrl_regs)) + return PTR_ERR(devctrl_regs); + + ret = of_property_read_u32_index(np, "ti,syscon-pll", 1, &rspll_offset); + if (ret) { + dev_err(dev, "couldn't read the reset pll offset!\n"); + return -EINVAL; + } + + ret = of_property_read_u32_index(np, "ti,syscon-dev", 1, &rsmux_offset); + if (ret) { + dev_err(dev, "couldn't read the rsmux offset!\n"); + return -EINVAL; + } + + /* set soft/hard reset */ + val = of_property_read_bool(np, "ti,soft-reset"); + val = val ? RSCFG_RSTYPE_SOFT : RSCFG_RSTYPE_HARD; + + ret = rsctrl_enable_rspll_write(); + if (ret) + return ret; + + ret = regmap_write(pllctrl_regs, rspll_offset + RSCFG_RG, val); + if (ret) + return ret; + + arm_pm_restart = rsctrl_restart; + + /* disable a reset isolation for all module clocks */ + ret = regmap_write(pllctrl_regs, rspll_offset + RSISO_RG, 0); + if (ret) + return ret; + + /* enable a reset for watchdogs from wdt-list */ + for (i = 0; i < WDT_MUX_NUMBER; i++) { + ret = of_property_read_u32_index(np, "ti,wdt-list", i, &val); + if (ret == -EOVERFLOW && !i) { + dev_err(dev, "ti,wdt-list property has to contain at" + "least one entry\n"); + return -EINVAL; + } else if (ret) { + break; + } + + if (val >= WDT_MUX_NUMBER) { + dev_err(dev, "ti,wdt-list property can contain" + "only numbers < 4\n"); + return -EINVAL; + } + + rg = rsmux_offset + val * 4; + + ret = regmap_update_bits(devctrl_regs, rg, RSMUX_OMODE_MASK, + RSMUX_OMODE_RESET_ON | + RSMUX_LOCK_SET); + if (ret) + return ret; + } + + return 0; +} + +static struct platform_driver rsctrl_driver = { + .probe = rsctrl_probe, + .driver = { + .owner = THIS_MODULE, + .name = KBUILD_MODNAME, + .of_match_table = rsctrl_of_match, + }, +}; +module_platform_driver(rsctrl_driver); + +MODULE_AUTHOR("Ivan Khoronzhuk <ivan.khoronzhuk@ti.com>"); +MODULE_DESCRIPTION("Texas Instruments keystone reset driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" KBUILD_MODNAME); diff --git a/drivers/power/reset/sun6i-reboot.c b/drivers/power/reset/sun6i-reboot.c new file mode 100644 index 00000000000..af2cd7ff2fe --- /dev/null +++ b/drivers/power/reset/sun6i-reboot.c @@ -0,0 +1,85 @@ +/* + * Allwinner A31 SoCs reset code + * + * Copyright (C) 2012-2014 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/reboot.h> + +#include <asm/system_misc.h> + +#define SUN6I_WATCHDOG1_IRQ_REG 0x00 +#define SUN6I_WATCHDOG1_CTRL_REG 0x10 +#define SUN6I_WATCHDOG1_CTRL_RESTART BIT(0) +#define SUN6I_WATCHDOG1_CONFIG_REG 0x14 +#define SUN6I_WATCHDOG1_CONFIG_RESTART BIT(0) +#define SUN6I_WATCHDOG1_CONFIG_IRQ BIT(1) +#define SUN6I_WATCHDOG1_MODE_REG 0x18 +#define SUN6I_WATCHDOG1_MODE_ENABLE BIT(0) + +static void __iomem *wdt_base; + +static void sun6i_wdt_restart(enum reboot_mode mode, const char *cmd) +{ + if (!wdt_base) + return; + + /* Disable interrupts */ + writel(0, wdt_base + SUN6I_WATCHDOG1_IRQ_REG); + + /* We want to disable the IRQ and just reset the whole system */ + writel(SUN6I_WATCHDOG1_CONFIG_RESTART, + wdt_base + SUN6I_WATCHDOG1_CONFIG_REG); + + /* Enable timer. The default and lowest interval value is 0.5s */ + writel(SUN6I_WATCHDOG1_MODE_ENABLE, + wdt_base + SUN6I_WATCHDOG1_MODE_REG); + + /* Restart the watchdog. */ + writel(SUN6I_WATCHDOG1_CTRL_RESTART, + wdt_base + SUN6I_WATCHDOG1_CTRL_REG); + + while (1) { + mdelay(5); + writel(SUN6I_WATCHDOG1_MODE_ENABLE, + wdt_base + SUN6I_WATCHDOG1_MODE_REG); + } +} + +static int sun6i_reboot_probe(struct platform_device *pdev) +{ + wdt_base = of_iomap(pdev->dev.of_node, 0); + if (!wdt_base) { + WARN(1, "failed to map watchdog base address"); + return -ENODEV; + } + + arm_pm_restart = sun6i_wdt_restart; + + return 0; +} + +static struct of_device_id sun6i_reboot_of_match[] = { + { .compatible = "allwinner,sun6i-a31-wdt" }, + {} +}; + +static struct platform_driver sun6i_reboot_driver = { + .probe = sun6i_reboot_probe, + .driver = { + .name = "sun6i-reboot", + .of_match_table = sun6i_reboot_of_match, + }, +}; +module_platform_driver(sun6i_reboot_driver); diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c index b95cf71ed69..4dc102e2b23 100644 --- a/drivers/power/reset/vexpress-poweroff.c +++ b/drivers/power/reset/vexpress-poweroff.c @@ -23,10 +23,10 @@ static void vexpress_reset_do(struct device *dev, const char *what) { int err = -ENOENT; - struct vexpress_config_func *func = dev_get_drvdata(dev); + struct regmap *reg = dev_get_drvdata(dev); - if (func) { - err = vexpress_config_write(func, 0, 0); + if (reg) { + err = regmap_write(reg, 0, 0); if (!err) mdelay(1000); } @@ -91,17 +91,17 @@ static int vexpress_reset_probe(struct platform_device *pdev) enum vexpress_reset_func func; const struct of_device_id *match = of_match_device(vexpress_reset_of_match, &pdev->dev); - struct vexpress_config_func *config_func; + struct regmap *regmap; if (match) func = (enum vexpress_reset_func)match->data; else func = pdev->id_entry->driver_data; - config_func = vexpress_config_func_get_by_dev(&pdev->dev); - if (!config_func) - return -EINVAL; - dev_set_drvdata(&pdev->dev, config_func); + regmap = devm_regmap_init_vexpress_config(&pdev->dev); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + dev_set_drvdata(&pdev->dev, regmap); switch (func) { case FUNC_SHUTDOWN: diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 6963bdf5417..6aea373547f 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -6,6 +6,7 @@ menu "PTP clock support" config PTP_1588_CLOCK tristate "PTP clock support" + depends on NET select PPS select NET_PTP_CLASSIFY help @@ -74,7 +75,7 @@ config DP83640_PHY config PTP_1588_CLOCK_PCH tristate "Intel PCH EG20T as PTP clock" depends on X86 || COMPILE_TEST - depends on HAS_IOMEM + depends on HAS_IOMEM && NET select PTP_1588_CLOCK help This driver adds support for using the PCH EG20T as a PTP diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c index f3ae28a7e66..2863428813e 100644 --- a/drivers/regulator/vexpress.c +++ b/drivers/regulator/vexpress.c @@ -26,14 +26,14 @@ struct vexpress_regulator { struct regulator_desc desc; struct regulator_dev *regdev; - struct vexpress_config_func *func; + struct regmap *regmap; }; static int vexpress_regulator_get_voltage(struct regulator_dev *regdev) { struct vexpress_regulator *reg = rdev_get_drvdata(regdev); u32 uV; - int err = vexpress_config_read(reg->func, 0, &uV); + int err = regmap_read(reg->regmap, 0, &uV); return err ? err : uV; } @@ -43,7 +43,7 @@ static int vexpress_regulator_set_voltage(struct regulator_dev *regdev, { struct vexpress_regulator *reg = rdev_get_drvdata(regdev); - return vexpress_config_write(reg->func, 0, min_uV); + return regmap_write(reg->regmap, 0, min_uV); } static struct regulator_ops vexpress_regulator_ops_ro = { @@ -57,22 +57,17 @@ static struct regulator_ops vexpress_regulator_ops = { static int vexpress_regulator_probe(struct platform_device *pdev) { - int err; struct vexpress_regulator *reg; struct regulator_init_data *init_data; struct regulator_config config = { }; reg = devm_kzalloc(&pdev->dev, sizeof(*reg), GFP_KERNEL); - if (!reg) { - err = -ENOMEM; - goto error_kzalloc; - } + if (!reg) + return -ENOMEM; - reg->func = vexpress_config_func_get_by_dev(&pdev->dev); - if (!reg->func) { - err = -ENXIO; - goto error_get_func; - } + reg->regmap = devm_regmap_init_vexpress_config(&pdev->dev); + if (IS_ERR(reg->regmap)) + return PTR_ERR(reg->regmap); reg->desc.name = dev_name(&pdev->dev); reg->desc.type = REGULATOR_VOLTAGE; @@ -80,10 +75,8 @@ static int vexpress_regulator_probe(struct platform_device *pdev) reg->desc.continuous_voltage_range = true; init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node); - if (!init_data) { - err = -EINVAL; - goto error_get_regulator_init_data; - } + if (!init_data) + return -EINVAL; init_data->constraints.apply_uV = 0; if (init_data->constraints.min_uV && init_data->constraints.max_uV) @@ -97,30 +90,12 @@ static int vexpress_regulator_probe(struct platform_device *pdev) config.of_node = pdev->dev.of_node; reg->regdev = devm_regulator_register(&pdev->dev, ®->desc, &config); - if (IS_ERR(reg->regdev)) { - err = PTR_ERR(reg->regdev); - goto error_regulator_register; - } + if (IS_ERR(reg->regdev)) + return PTR_ERR(reg->regdev); platform_set_drvdata(pdev, reg); return 0; - -error_regulator_register: -error_get_regulator_init_data: - vexpress_config_func_put(reg->func); -error_get_func: -error_kzalloc: - return err; -} - -static int vexpress_regulator_remove(struct platform_device *pdev) -{ - struct vexpress_regulator *reg = platform_get_drvdata(pdev); - - vexpress_config_func_put(reg->func); - - return 0; } static struct of_device_id vexpress_regulator_of_match[] = { @@ -130,7 +105,6 @@ static struct of_device_id vexpress_regulator_of_match[] = { static struct platform_driver vexpress_regulator_driver = { .probe = vexpress_regulator_probe, - .remove = vexpress_regulator_remove, .driver = { .name = DRVNAME, .owner = THIS_MODULE, diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 4f60caf750c..60fed3d7820 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_RESET_CONTROLLER) += core.o +obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o obj-$(CONFIG_ARCH_STI) += sti/ diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c new file mode 100644 index 00000000000..79c32ca84ef --- /dev/null +++ b/drivers/reset/reset-socfpga.c @@ -0,0 +1,146 @@ +/* + * Copyright 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de> + * + * based on + * Allwinner SoCs Reset Controller driver + * + * Copyright 2013 Maxime Ripard + * + * Maxime Ripard <maxime.ripard@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset-controller.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +#define NR_BANKS 4 +#define OFFSET_MODRST 0x10 + +struct socfpga_reset_data { + spinlock_t lock; + void __iomem *membase; + struct reset_controller_dev rcdev; +}; + +static int socfpga_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct socfpga_reset_data *data = container_of(rcdev, + struct socfpga_reset_data, + rcdev); + int bank = id / BITS_PER_LONG; + int offset = id % BITS_PER_LONG; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&data->lock, flags); + + reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS)); + writel(reg | BIT(offset), data->membase + OFFSET_MODRST + + (bank * NR_BANKS)); + spin_unlock_irqrestore(&data->lock, flags); + + return 0; +} + +static int socfpga_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct socfpga_reset_data *data = container_of(rcdev, + struct socfpga_reset_data, + rcdev); + + int bank = id / BITS_PER_LONG; + int offset = id % BITS_PER_LONG; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&data->lock, flags); + + reg = readl(data->membase + OFFSET_MODRST + (bank * NR_BANKS)); + writel(reg & ~BIT(offset), data->membase + OFFSET_MODRST + + (bank * NR_BANKS)); + + spin_unlock_irqrestore(&data->lock, flags); + + return 0; +} + +static struct reset_control_ops socfpga_reset_ops = { + .assert = socfpga_reset_assert, + .deassert = socfpga_reset_deassert, +}; + +static int socfpga_reset_probe(struct platform_device *pdev) +{ + struct socfpga_reset_data *data; + struct resource *res; + + /* + * The binding was mainlined without the required property. + * Do not continue, when we encounter an old DT. + */ + if (!of_find_property(pdev->dev.of_node, "#reset-cells", NULL)) { + dev_err(&pdev->dev, "%s missing #reset-cells property\n", + pdev->dev.of_node->full_name); + return -EINVAL; + } + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->membase)) + return PTR_ERR(data->membase); + + spin_lock_init(&data->lock); + + data->rcdev.owner = THIS_MODULE; + data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG; + data->rcdev.ops = &socfpga_reset_ops; + data->rcdev.of_node = pdev->dev.of_node; + reset_controller_register(&data->rcdev); + + return 0; +} + +static int socfpga_reset_remove(struct platform_device *pdev) +{ + struct socfpga_reset_data *data = platform_get_drvdata(pdev); + + reset_controller_unregister(&data->rcdev); + + return 0; +} + +static const struct of_device_id socfpga_reset_dt_ids[] = { + { .compatible = "altr,rst-mgr", }, + { /* sentinel */ }, +}; + +static struct platform_driver socfpga_reset_driver = { + .probe = socfpga_reset_probe, + .remove = socfpga_reset_remove, + .driver = { + .name = "socfpga-reset", + .owner = THIS_MODULE, + .of_match_table = socfpga_reset_dt_ids, + }, +}; +module_platform_driver(socfpga_reset_driver); + +MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de"); +MODULE_DESCRIPTION("Socfpga Reset Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c index 695bd3496eb..a94e7a7820b 100644 --- a/drivers/reset/reset-sunxi.c +++ b/drivers/reset/reset-sunxi.c @@ -145,7 +145,24 @@ MODULE_DEVICE_TABLE(of, sunxi_reset_dt_ids); static int sunxi_reset_probe(struct platform_device *pdev) { - return sunxi_reset_init(pdev->dev.of_node); + struct sunxi_reset_data *data; + struct resource *res; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->membase)) + return PTR_ERR(data->membase); + + data->rcdev.owner = THIS_MODULE; + data->rcdev.nr_resets = resource_size(res) * 32; + data->rcdev.ops = &sunxi_reset_ops; + data->rcdev.of_node = pdev->dev.of_node; + + return reset_controller_register(&data->rcdev); } static int sunxi_reset_remove(struct platform_device *pdev) @@ -153,8 +170,6 @@ static int sunxi_reset_remove(struct platform_device *pdev) struct sunxi_reset_data *data = platform_get_drvdata(pdev); reset_controller_unregister(&data->rcdev); - iounmap(data->membase); - kfree(data); return 0; } diff --git a/drivers/reset/sti/reset-stih415.c b/drivers/reset/sti/reset-stih415.c index e6f6c41abe1..c93fd260447 100644 --- a/drivers/reset/sti/reset-stih415.c +++ b/drivers/reset/sti/reset-stih415.c @@ -73,6 +73,7 @@ static const struct syscfg_reset_channel_data stih415_softresets[] = { [STIH415_USB0_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 9), [STIH415_USB1_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 10), [STIH415_USB2_SOFTRESET] = STIH415_SRST_REAR(SYSCFG_376, 11), + [STIH415_KEYSCAN_SOFTRESET] = STIH415_SRST_LPM(LPM_SYSCFG_1, 8), }; static struct syscfg_reset_controller_data stih415_powerdown_controller = { diff --git a/drivers/reset/sti/reset-stih416.c b/drivers/reset/sti/reset-stih416.c index fe3bf02bdc8..5fc987076a9 100644 --- a/drivers/reset/sti/reset-stih416.c +++ b/drivers/reset/sti/reset-stih416.c @@ -104,6 +104,7 @@ static const struct syscfg_reset_channel_data stih416_softresets[] = { [STIH416_COMPO_A_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 4), [STIH416_VP8_DEC_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 10), [STIH416_VTG_MAIN_SOFTRESET] = STIH416_SRST_CPU(SYSCFG_7564, 16), + [STIH416_KEYSCAN_SOFTRESET] = STIH416_SRST_LPM(LPM_SYSCFG_1, 8), }; static struct syscfg_reset_controller_data stih416_powerdown_controller = { diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c index bd628a6f981..e5f13c4310f 100644 --- a/drivers/rtc/rtc-hym8563.c +++ b/drivers/rtc/rtc-hym8563.c @@ -569,6 +569,9 @@ static int hym8563_probe(struct i2c_client *client, if (IS_ERR(hym8563->rtc)) return PTR_ERR(hym8563->rtc); + /* the hym8563 alarm only supports a minute accuracy */ + hym8563->rtc->uie_unsupported = 1; + #ifdef CONFIG_COMMON_CLK hym8563_clkout_register_clk(hym8563); #endif diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 5c8f8226c84..4cdb64be061 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -206,7 +206,7 @@ static int pcf8523_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_hour = bcd2bin(regs[2] & 0x3f); tm->tm_mday = bcd2bin(regs[3] & 0x3f); tm->tm_wday = regs[4] & 0x7; - tm->tm_mon = bcd2bin(regs[5] & 0x1f); + tm->tm_mon = bcd2bin(regs[5] & 0x1f) - 1; tm->tm_year = bcd2bin(regs[6]) + 100; return rtc_valid_tm(tm); @@ -229,7 +229,7 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm) regs[3] = bin2bcd(tm->tm_hour); regs[4] = bin2bcd(tm->tm_mday); regs[5] = tm->tm_wday; - regs[6] = bin2bcd(tm->tm_mon); + regs[6] = bin2bcd(tm->tm_mon + 1); regs[7] = bin2bcd(tm->tm_year - 100); msg.addr = client->addr; diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 9f0ea6cb692..e3bf885f4a6 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c @@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) { - do { + static int ntsm_unsupported; + + while (true) { memset(sei, 0, sizeof(*sei)); sei->request.length = 0x0010; sei->request.code = 0x000e; - sei->ntsm = ntsm; + if (!ntsm_unsupported) + sei->ntsm = ntsm; if (chsc(sei)) break; if (sei->response.code != 0x0001) { - CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", - sei->response.code); + CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n", + sei->response.code, sei->ntsm); + + if (sei->response.code == 3 && sei->ntsm) { + /* Fallback for old firmware. */ + ntsm_unsupported = 1; + continue; + } break; } @@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm) CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); break; } - } while (sei->u.nt0_area.flags & 0x80); + + if (!(sei->u.nt0_area.flags & 0x80)) + break; + } } /* diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index 4ccb5d86938..a40ee1e3748 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c @@ -207,7 +207,7 @@ static void jsfd_do_request(struct request_queue *q) goto end; } - jsfd_read(req->buffer, jdp->dbase + offset, len); + jsfd_read(bio_data(req->bio), jdp->dbase + offset, len); err = 0; end: if (!__blk_end_request_cur(req, err)) diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 296c936cc03..a8d721ff19e 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -639,7 +639,7 @@ static int __init atari_scsi_detect(struct scsi_host_template *host) "double buffer\n"); return 0; } - atari_dma_phys_buffer = virt_to_phys(atari_dma_buffer); + atari_dma_phys_buffer = atari_stram_to_phys(atari_dma_buffer); atari_dma_orig_addr = 0; } #endif diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 7f0af4fcc00..6fd7d40b2c4 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state) mpt2sas_base_free_resources(ioc); pci_save_state(pdev); - pci_disable_device(pdev); pci_set_power_state(pdev, device_state); return 0; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9db097a28a7..a0c95cac91f 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -140,7 +140,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) cmd->result = 0; spin_lock_irqsave(q->queue_lock, flags); blk_requeue_request(q, cmd->request); - kblockd_schedule_work(q, &device->requeue_work); + kblockd_schedule_work(&device->requeue_work); spin_unlock_irqrestore(q->queue_lock, flags); } @@ -1019,8 +1019,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb, return BLKPREP_DEFER; } - req->buffer = NULL; - /* * Next, walk the list, and fill in the addresses and sizes of * each segment. @@ -1158,7 +1156,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) BUG_ON(blk_rq_bytes(req)); memset(&cmd->sdb, 0, sizeof(cmd->sdb)); - req->buffer = NULL; } cmd->cmd_len = req->cmd_len; diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index fe30ea94ffe..109802f776e 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c @@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb) goto next_msg; } - if (!capable(CAP_SYS_ADMIN)) { + if (!netlink_capable(skb, CAP_SYS_ADMIN)) { err = -EPERM; goto next_msg; } diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 1b681427dde..c341f855fad 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy) list_del(&rphy->list); mutex_unlock(&sas_host->lock); - sas_bsg_remove(shost, rphy); - transport_destroy_device(dev); put_device(dev); @@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy) } sas_rphy_unlink(rphy); + sas_bsg_remove(NULL, rphy); transport_remove_device(dev); device_del(dev); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index efcbcd18286..96af195224f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -737,16 +737,14 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) goto out; } + rq->completion_data = page; blk_add_request_payload(rq, page, len); ret = scsi_setup_blk_pc_cmnd(sdp, rq); - rq->buffer = page_address(page); rq->__data_len = nr_bytes; out: - if (ret != BLKPREP_OK) { + if (ret != BLKPREP_OK) __free_page(page); - rq->buffer = NULL; - } return ret; } @@ -842,10 +840,9 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq) { struct scsi_cmnd *SCpnt = rq->special; - if (rq->cmd_flags & REQ_DISCARD) { - free_page((unsigned long)rq->buffer); - rq->buffer = NULL; - } + if (rq->cmd_flags & REQ_DISCARD) + __free_page(rq->completion_data); + if (SCpnt->cmnd != rq->cmd) { mempool_free(SCpnt->cmnd, sd_cdb_pool); SCpnt->cmnd = NULL; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 16bfd50cd3f..db3b494e592 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) vscsi->affinity_hint_set = true; } else { - for (i = 0; i < vscsi->num_queues; i++) + for (i = 0; i < vscsi->num_queues; i++) { + if (!vscsi->req_vqs[i].vq) + continue; + virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); + } vscsi->affinity_hint_set = false; } diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile index fc67f564f02..788ed9b59b4 100644 --- a/drivers/sh/Makefile +++ b/drivers/sh/Makefile @@ -1,10 +1,12 @@ # # Makefile for the SuperH specific drivers. # -obj-y := intc/ +obj-$(CONFIG_SUPERH) += intc/ +obj-$(CONFIG_ARCH_SHMOBILE_LEGACY) += intc/ +ifneq ($(CONFIG_COMMON_CLK),y) +obj-$(CONFIG_HAVE_CLK) += clk/ +endif +obj-$(CONFIG_MAPLE) += maple/ +obj-$(CONFIG_SUPERHYWAY) += superhyway/ -obj-$(CONFIG_HAVE_CLK) += clk/ -obj-$(CONFIG_MAPLE) += maple/ -obj-$(CONFIG_SUPERHYWAY) += superhyway/ - -obj-y += pm_runtime.o +obj-y += pm_runtime.o diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index 8afa5a4589f..10c65eb51f8 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c @@ -50,8 +50,25 @@ static struct pm_clk_notifier_block platform_bus_notifier = { .con_ids = { NULL, }, }; +static bool default_pm_on; + static int __init sh_pm_runtime_init(void) { + if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) { + if (!of_machine_is_compatible("renesas,emev2") && + !of_machine_is_compatible("renesas,r7s72100") && + !of_machine_is_compatible("renesas,r8a73a4") && + !of_machine_is_compatible("renesas,r8a7740") && + !of_machine_is_compatible("renesas,r8a7778") && + !of_machine_is_compatible("renesas,r8a7779") && + !of_machine_is_compatible("renesas,r8a7790") && + !of_machine_is_compatible("renesas,r8a7791") && + !of_machine_is_compatible("renesas,sh7372") && + !of_machine_is_compatible("renesas,sh73a0")) + return 0; + } + + default_pm_on = true; pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } @@ -59,7 +76,8 @@ core_initcall(sh_pm_runtime_init); static int __init sh_pm_runtime_late_init(void) { - pm_genpd_poweroff_unused(); + if (default_pm_on) + pm_genpd_poweroff_unused(); return 0; } late_initcall(sh_pm_runtime_late_init); diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig new file mode 100644 index 00000000000..c8543855aa8 --- /dev/null +++ b/drivers/soc/Kconfig @@ -0,0 +1,5 @@ +menu "SOC (System On Chip) specific Drivers" + +source "drivers/soc/qcom/Kconfig" + +endmenu diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile new file mode 100644 index 00000000000..0f7c44793b2 --- /dev/null +++ b/drivers/soc/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Linux Kernel SOC specific device drivers. +# + +obj-$(CONFIG_ARCH_QCOM) += qcom/ diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig new file mode 100644 index 00000000000..7bd2c94f54a --- /dev/null +++ b/drivers/soc/qcom/Kconfig @@ -0,0 +1,11 @@ +# +# QCOM Soc drivers +# +config QCOM_GSBI + tristate "QCOM General Serial Bus Interface" + depends on ARCH_QCOM + help + Say y here to enable GSBI support. The GSBI provides control + functions for connecting the underlying serial UART, SPI, and I2C + devices to the output pins. + diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile new file mode 100644 index 00000000000..438901257ac --- /dev/null +++ b/drivers/soc/qcom/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c new file mode 100644 index 00000000000..447458e696a --- /dev/null +++ b/drivers/soc/qcom/qcom_gsbi.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2014, The Linux foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License rev 2 and + * only rev 2 as published by the free Software foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> + +#define GSBI_CTRL_REG 0x0000 +#define GSBI_PROTOCOL_SHIFT 4 + +static int gsbi_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct resource *res; + void __iomem *base; + struct clk *hclk; + u32 mode, crci = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + if (of_property_read_u32(node, "qcom,mode", &mode)) { + dev_err(&pdev->dev, "missing mode configuration\n"); + return -EINVAL; + } + + /* not required, so default to 0 if not present */ + of_property_read_u32(node, "qcom,crci", &crci); + + dev_info(&pdev->dev, "GSBI port protocol: %d crci: %d\n", mode, crci); + + hclk = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(hclk)) + return PTR_ERR(hclk); + + clk_prepare_enable(hclk); + + writel_relaxed((mode << GSBI_PROTOCOL_SHIFT) | crci, + base + GSBI_CTRL_REG); + + /* make sure the gsbi control write is not reordered */ + wmb(); + + clk_disable_unprepare(hclk); + + return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); +} + +static const struct of_device_id gsbi_dt_match[] = { + { .compatible = "qcom,gsbi-v1.0.0", }, + { }, +}; + +MODULE_DEVICE_TABLE(of, gsbi_dt_match); + +static struct platform_driver gsbi_driver = { + .driver = { + .name = "gsbi", + .owner = THIS_MODULE, + .of_match_table = gsbi_dt_match, + }, + .probe = gsbi_probe, +}; + +module_platform_driver(gsbi_driver); + +MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>"); +MODULE_DESCRIPTION("QCOM GSBI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index 713af4806f2..f6759dc0153 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data, struct sg_table *sgt; void *buf, *pbuf; - /* - * Some DMA controllers have problems transferring buffers that are - * not multiple of 4 bytes. So we truncate the transfer so that it - * is suitable for such controllers, and handle the trailing bytes - * manually after the DMA completes. - * - * REVISIT: It would be better if this information could be - * retrieved directly from the DMA device in a similar way than - * ->copy_align etc. is done. - */ - len = ALIGN(drv_data->len, 4); - if (dir == DMA_TO_DEVICE) { dmadev = drv_data->tx_chan->device->dev; sgt = &drv_data->tx_sgt; @@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data, if (!error) { pxa2xx_spi_unmap_dma_buffers(drv_data); - /* Handle the last bytes of unaligned transfer */ drv_data->tx += drv_data->tx_map_len; - drv_data->write(drv_data); - drv_data->rx += drv_data->rx_map_len; - drv_data->read(drv_data); msg->actual_length += drv_data->len; msg->state = pxa2xx_spi_next_transfer(drv_data); diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index b032e8885e2..78c66e3c53e 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev) int ret; ret = pm_runtime_get_sync(&pdev->dev); - if (ret) + if (ret < 0) return ret; ret = spi_qup_set_state(controller, QUP_STATE_RESET); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 4eb9bf02996..939edf47323 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable) spi->master->set_cs(spi, !enable); } +#ifdef CONFIG_HAS_DMA static int spi_map_buf(struct spi_master *master, struct device *dev, struct sg_table *sgt, void *buf, size_t len, enum dma_data_direction dir) @@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev, } } -static int spi_map_msg(struct spi_master *master, struct spi_message *msg) +static int __spi_map_msg(struct spi_master *master, struct spi_message *msg) { struct device *tx_dev, *rx_dev; struct spi_transfer *xfer; - void *tmp; - unsigned int max_tx, max_rx; int ret; - if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { - max_tx = 0; - max_rx = 0; - - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - if ((master->flags & SPI_MASTER_MUST_TX) && - !xfer->tx_buf) - max_tx = max(xfer->len, max_tx); - if ((master->flags & SPI_MASTER_MUST_RX) && - !xfer->rx_buf) - max_rx = max(xfer->len, max_rx); - } - - if (max_tx) { - tmp = krealloc(master->dummy_tx, max_tx, - GFP_KERNEL | GFP_DMA); - if (!tmp) - return -ENOMEM; - master->dummy_tx = tmp; - memset(tmp, 0, max_tx); - } - - if (max_rx) { - tmp = krealloc(master->dummy_rx, max_rx, - GFP_KERNEL | GFP_DMA); - if (!tmp) - return -ENOMEM; - master->dummy_rx = tmp; - } - - if (max_tx || max_rx) { - list_for_each_entry(xfer, &msg->transfers, - transfer_list) { - if (!xfer->tx_buf) - xfer->tx_buf = master->dummy_tx; - if (!xfer->rx_buf) - xfer->rx_buf = master->dummy_rx; - } - } - } - if (!master->can_dma) return 0; @@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) return 0; } +#else /* !CONFIG_HAS_DMA */ +static inline int __spi_map_msg(struct spi_master *master, + struct spi_message *msg) +{ + return 0; +} + +static inline int spi_unmap_msg(struct spi_master *master, + struct spi_message *msg) +{ + return 0; +} +#endif /* !CONFIG_HAS_DMA */ + +static int spi_map_msg(struct spi_master *master, struct spi_message *msg) +{ + struct spi_transfer *xfer; + void *tmp; + unsigned int max_tx, max_rx; + + if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { + max_tx = 0; + max_rx = 0; + + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + if ((master->flags & SPI_MASTER_MUST_TX) && + !xfer->tx_buf) + max_tx = max(xfer->len, max_tx); + if ((master->flags & SPI_MASTER_MUST_RX) && + !xfer->rx_buf) + max_rx = max(xfer->len, max_rx); + } + + if (max_tx) { + tmp = krealloc(master->dummy_tx, max_tx, + GFP_KERNEL | GFP_DMA); + if (!tmp) + return -ENOMEM; + master->dummy_tx = tmp; + memset(tmp, 0, max_tx); + } + + if (max_rx) { + tmp = krealloc(master->dummy_rx, max_rx, + GFP_KERNEL | GFP_DMA); + if (!tmp) + return -ENOMEM; + master->dummy_rx = tmp; + } + + if (max_tx || max_rx) { + list_for_each_entry(xfer, &msg->transfers, + transfer_list) { + if (!xfer->tx_buf) + xfer->tx_buf = master->dummy_tx; + if (!xfer->rx_buf) + xfer->rx_buf = master->dummy_rx; + } + } + } + + return __spi_map_msg(master, msg); +} /* * spi_transfer_one_message - Default implementation of transfer_one_message() @@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master) { int ret; - master->queued = true; master->transfer = spi_queued_transfer; if (!master->transfer_one_message) master->transfer_one_message = spi_transfer_one_message; @@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master) dev_err(&master->dev, "problem initializing queue\n"); goto err_init_queue; } + master->queued = true; ret = spi_start_queue(master); if (ret) { dev_err(&master->dev, "problem starting queue\n"); @@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master) return 0; err_start_queue: -err_init_queue: spi_destroy_queue(master); +err_init_queue: return ret; } @@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master); */ int spi_setup(struct spi_device *spi) { - unsigned bad_bits; + unsigned bad_bits, ugly_bits; int status = 0; /* check mode to prevent that DUAL and QUAD set at the same time @@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi) * that aren't supported with their current master */ bad_bits = spi->mode & ~spi->master->mode_bits; + ugly_bits = bad_bits & + (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD); + if (ugly_bits) { + dev_warn(&spi->dev, + "setup: ignoring unsupported mode bits %x\n", + ugly_bits); + spi->mode &= ~ugly_bits; + bad_bits &= ~ugly_bits; + } if (bad_bits) { dev_err(&spi->dev, "setup: unsupported mode bits %x\n", bad_bits); diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c index e2b48204515..017d2f8379b 100644 --- a/drivers/staging/iio/resolver/ad2s1200.c +++ b/drivers/staging/iio/resolver/ad2s1200.c @@ -107,7 +107,7 @@ static int ad2s1200_probe(struct spi_device *spi) int pn, ret = 0; unsigned short *pins = spi->dev.platform_data; - for (pn = 0; pn < AD2S1200_PN; pn++) + for (pn = 0; pn < AD2S1200_PN; pn++) { ret = devm_gpio_request_one(&spi->dev, pins[pn], GPIOF_DIR_OUT, DRV_NAME); if (ret) { @@ -115,6 +115,7 @@ static int ad2s1200_probe(struct spi_device *spi) pins[pn]); return ret; } + } indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); if (!indio_dev) return -ENOMEM; diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c index 4144a75e5f7..c270c9ae6d2 100644 --- a/drivers/staging/imx-drm/imx-drm-core.c +++ b/drivers/staging/imx-drm/imx-drm-core.c @@ -517,7 +517,7 @@ int imx_drm_encoder_get_mux_id(struct device_node *node, of_node_put(port); if (port == imx_crtc->port) { ret = of_graph_parse_endpoint(ep, &endpoint); - return ret ? ret : endpoint.id; + return ret ? ret : endpoint.port; } } while (ep); @@ -675,6 +675,11 @@ static int imx_drm_platform_probe(struct platform_device *pdev) if (!remote || !of_device_is_available(remote)) { of_node_put(remote); continue; + } else if (!of_device_is_available(remote->parent)) { + dev_warn(&pdev->dev, "parent device of %s is not available\n", + remote->full_name); + of_node_put(remote); + continue; } ret = imx_drm_add_component(&pdev->dev, remote); diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c index 575533f4fd6..a23f4f77314 100644 --- a/drivers/staging/imx-drm/imx-tve.c +++ b/drivers/staging/imx-drm/imx-tve.c @@ -582,7 +582,7 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) tve->dev = dev; spin_lock_init(&tve->lock); - ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0); + ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); if (ddc_node) { tve->ddc = of_find_i2c_adapter_by_node(ddc_node); of_node_put(ddc_node); diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c index 8c101cbbee9..acc8184c46c 100644 --- a/drivers/staging/media/davinci_vpfe/vpfe_video.c +++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c @@ -1247,9 +1247,18 @@ static int vpfe_stop_streaming(struct vb2_queue *vq) struct vpfe_fh *fh = vb2_get_drv_priv(vq); struct vpfe_video_device *video = fh->video; - if (!vb2_is_streaming(vq)) - return 0; /* release all active buffers */ + if (video->cur_frm == video->next_frm) { + vb2_buffer_done(&video->cur_frm->vb, VB2_BUF_STATE_ERROR); + } else { + if (video->cur_frm != NULL) + vb2_buffer_done(&video->cur_frm->vb, + VB2_BUF_STATE_ERROR); + if (video->next_frm != NULL) + vb2_buffer_done(&video->next_frm->vb, + VB2_BUF_STATE_ERROR); + } + while (!list_empty(&video->dma_queue)) { video->next_frm = list_entry(video->dma_queue.next, struct vpfe_cap_buffer, list); diff --git a/drivers/staging/media/sn9c102/sn9c102_devtable.h b/drivers/staging/media/sn9c102/sn9c102_devtable.h index b3d2cc72965..4ba56925849 100644 --- a/drivers/staging/media/sn9c102/sn9c102_devtable.h +++ b/drivers/staging/media/sn9c102/sn9c102_devtable.h @@ -48,10 +48,8 @@ static const struct usb_device_id sn9c102_id_table[] = { { SN9C102_USB_DEVICE(0x0c45, 0x600d, BRIDGE_SN9C102), }, /* { SN9C102_USB_DEVICE(0x0c45, 0x6011, BRIDGE_SN9C102), }, OV6650 */ { SN9C102_USB_DEVICE(0x0c45, 0x6019, BRIDGE_SN9C102), }, -#endif { SN9C102_USB_DEVICE(0x0c45, 0x6024, BRIDGE_SN9C102), }, { SN9C102_USB_DEVICE(0x0c45, 0x6025, BRIDGE_SN9C102), }, -#if !defined CONFIG_USB_GSPCA_SONIXB && !defined CONFIG_USB_GSPCA_SONIXB_MODULE { SN9C102_USB_DEVICE(0x0c45, 0x6028, BRIDGE_SN9C102), }, { SN9C102_USB_DEVICE(0x0c45, 0x6029, BRIDGE_SN9C102), }, { SN9C102_USB_DEVICE(0x0c45, 0x602a, BRIDGE_SN9C102), }, diff --git a/drivers/staging/rtl8723au/os_dep/os_intfs.c b/drivers/staging/rtl8723au/os_dep/os_intfs.c index 57eca7a4567..4fe751f7c2b 100644 --- a/drivers/staging/rtl8723au/os_dep/os_intfs.c +++ b/drivers/staging/rtl8723au/os_dep/os_intfs.c @@ -953,8 +953,6 @@ static int netdev_close(struct net_device *pnetdev) #endif /* CONFIG_8723AU_P2P */ rtw_scan_abort23a(padapter); - /* set this at the end */ - padapter->rtw_wdev->iftype = NL80211_IFTYPE_MONITOR; RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-871x_drv - drv_close\n")); DBG_8723A("-871x_drv - drv_close, bup =%d\n", padapter->bup); diff --git a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c index c49160e477d..07e542e5d15 100644 --- a/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c +++ b/drivers/staging/rtl8723au/os_dep/usb_ops_linux.c @@ -26,7 +26,7 @@ unsigned int ffaddr2pipehdl23a(struct dvobj_priv *pdvobj, u32 addr) if (addr == RECV_BULK_IN_ADDR) { pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]); } else if (addr == RECV_INT_IN_ADDR) { - pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]); + pipe = usb_rcvintpipe(pusbd, pdvobj->RtInPipe[1]); } else if (addr < HW_QUEUE_ENTRY) { ep_num = pdvobj->Queue2Pipe[addr]; pipe = usb_sndbulkpipe(pusbd, ep_num); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 78cab13bbb1..46588c85d39 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1593,7 +1593,9 @@ int iscsit_process_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * Initiator is expecting a NopIN ping reply.. */ if (hdr->itt != RESERVED_ITT) { - BUG_ON(!cmd); + if (!cmd) + return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, + (unsigned char *)hdr); spin_lock_bh(&conn->cmd_lock); list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 6960f22909a..302eb3b7871 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -775,6 +775,7 @@ struct iscsi_np { int np_ip_proto; int np_sock_type; enum np_thread_state_table np_thread_state; + bool enabled; enum iscsi_timer_flags_table np_login_timer_flags; u32 np_exports; enum np_flags_table np_flags; diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 8739b98f6f9..ca31fa1b8a4 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -436,7 +436,7 @@ static int iscsi_login_zero_tsih_s2( } off = mrdsl % PAGE_SIZE; if (!off) - return 0; + goto check_prot; if (mrdsl < PAGE_SIZE) mrdsl = PAGE_SIZE; @@ -452,6 +452,31 @@ static int iscsi_login_zero_tsih_s2( ISCSI_LOGIN_STATUS_NO_RESOURCES); return -1; } + /* + * ISER currently requires that ImmediateData + Unsolicited + * Data be disabled when protection / signature MRs are enabled. + */ +check_prot: + if (sess->se_sess->sup_prot_ops & + (TARGET_PROT_DOUT_STRIP | TARGET_PROT_DOUT_PASS | + TARGET_PROT_DOUT_INSERT)) { + + sprintf(buf, "ImmediateData=No"); + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + + sprintf(buf, "InitialR2T=Yes"); + if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) { + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + return -1; + } + pr_debug("Forcing ImmediateData=No + InitialR2T=Yes for" + " T10-PI enabled ISER session\n"); + } } return 0; @@ -984,6 +1009,7 @@ int iscsi_target_setup_login_socket( } np->np_transport = t; + np->enabled = true; return 0; } diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index eb96b20dc09..ca1811858af 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -184,6 +184,7 @@ static void iscsit_clear_tpg_np_login_thread( return; } + tpg_np->tpg_np->enabled = false; iscsit_reset_np_thread(tpg_np->tpg_np, tpg_np, tpg, shutdown); } diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 65001e13367..26416c15d65 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -798,10 +798,10 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) pr_err("emulate_write_cache not supported for pSCSI\n"); return -EINVAL; } - if (dev->transport->get_write_cache) { - pr_warn("emulate_write_cache cannot be changed when underlying" - " HW reports WriteCacheEnabled, ignoring request\n"); - return 0; + if (flag && + dev->transport->get_write_cache) { + pr_err("emulate_write_cache not supported for this device\n"); + return -EINVAL; } dev->dev_attrib.emulate_write_cache = flag; @@ -936,6 +936,10 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag) return 0; } if (!dev->transport->init_prot || !dev->transport->free_prot) { + /* 0 is only allowed value for non-supporting backends */ + if (flag == 0) + return 0; + pr_err("DIF protection not supported by backend: %s\n", dev->transport->name); return -ENOSYS; diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index d4b98690a73..789aa9eb0a1 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1113,6 +1113,7 @@ void transport_init_se_cmd( init_completion(&cmd->cmd_wait_comp); init_completion(&cmd->task_stop_comp); spin_lock_init(&cmd->t_state_lock); + kref_init(&cmd->cmd_kref); cmd->transport_state = CMD_T_DEV_ACTIVE; cmd->se_tfo = tfo; @@ -2357,7 +2358,6 @@ int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd, unsigned long flags; int ret = 0; - kref_init(&se_cmd->cmd_kref); /* * Add a second kref if the fabric caller is expecting to handle * fabric acknowledgement that requires two target_put_sess_cmd() diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 01cf37f212c..f5fd515b2be 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -90,18 +90,18 @@ static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; struct fc_lport *lport; - struct se_session *se_sess; + struct ft_sess *sess; if (!cmd) return; - se_sess = cmd->sess->se_sess; + sess = cmd->sess; fp = cmd->req_frame; lport = fr_dev(fp); if (fr_seq(fp)) lport->tt.seq_release(fr_seq(fp)); fc_frame_free(fp); - percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); - ft_sess_put(cmd->sess); /* undo get from lookup at recv */ + percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); + ft_sess_put(sess); /* undo get from lookup at recv */ } void ft_release_cmd(struct se_cmd *se_cmd) diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c index 94f9e3a3841..0ff7fda0742 100644 --- a/drivers/tty/hvc/hvc_console.c +++ b/drivers/tty/hvc/hvc_console.c @@ -190,7 +190,7 @@ static struct tty_driver *hvc_console_device(struct console *c, int *index) return hvc_driver; } -static int __init hvc_console_setup(struct console *co, char *options) +static int hvc_console_setup(struct console *co, char *options) { if (co->index < 0 || co->index >= MAX_NR_HVC_CONSOLES) return -ENODEV; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 41fe8a047d3..fe9d129c873 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2353,8 +2353,12 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, if (tty->ops->flush_chars) tty->ops->flush_chars(tty); } else { + struct n_tty_data *ldata = tty->disc_data; + while (nr > 0) { + mutex_lock(&ldata->output_lock); c = tty->ops->write(tty, b, nr); + mutex_unlock(&ldata->output_lock); if (c < 0) { retval = c; goto break_out; diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index 0e1bf885843..2d4bd3929e5 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -555,7 +555,7 @@ static void serial8250_set_sleep(struct uart_8250_port *p, int sleep) */ if ((p->port.type == PORT_XR17V35X) || (p->port.type == PORT_XR17D15X)) { - serial_out(p, UART_EXAR_SLEEP, 0xff); + serial_out(p, UART_EXAR_SLEEP, sleep ? 0xff : 0); return; } diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 053b98eb46c..778e376f197 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -52,7 +52,6 @@ struct msm_port { struct clk *clk; struct clk *pclk; unsigned int imr; - void __iomem *gsbi_base; int is_uartdm; unsigned int old_snap_state; }; @@ -599,9 +598,7 @@ static const char *msm_type(struct uart_port *port) static void msm_release_port(struct uart_port *port) { struct platform_device *pdev = to_platform_device(port->dev); - struct msm_port *msm_port = UART_TO_MSM(port); struct resource *uart_resource; - struct resource *gsbi_resource; resource_size_t size; uart_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -612,28 +609,12 @@ static void msm_release_port(struct uart_port *port) release_mem_region(port->mapbase, size); iounmap(port->membase); port->membase = NULL; - - if (msm_port->gsbi_base) { - writel_relaxed(GSBI_PROTOCOL_IDLE, - msm_port->gsbi_base + GSBI_CONTROL); - - gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (unlikely(!gsbi_resource)) - return; - - size = resource_size(gsbi_resource); - release_mem_region(gsbi_resource->start, size); - iounmap(msm_port->gsbi_base); - msm_port->gsbi_base = NULL; - } } static int msm_request_port(struct uart_port *port) { - struct msm_port *msm_port = UART_TO_MSM(port); struct platform_device *pdev = to_platform_device(port->dev); struct resource *uart_resource; - struct resource *gsbi_resource; resource_size_t size; int ret; @@ -652,30 +633,8 @@ static int msm_request_port(struct uart_port *port) goto fail_release_port; } - gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); - /* Is this a GSBI-based port? */ - if (gsbi_resource) { - size = resource_size(gsbi_resource); - - if (!request_mem_region(gsbi_resource->start, size, - "msm_serial")) { - ret = -EBUSY; - goto fail_release_port_membase; - } - - msm_port->gsbi_base = ioremap(gsbi_resource->start, size); - if (!msm_port->gsbi_base) { - ret = -EBUSY; - goto fail_release_gsbi; - } - } - return 0; -fail_release_gsbi: - release_mem_region(gsbi_resource->start, size); -fail_release_port_membase: - iounmap(port->membase); fail_release_port: release_mem_region(port->mapbase, size); return ret; @@ -683,7 +642,6 @@ fail_release_port: static void msm_config_port(struct uart_port *port, int flags) { - struct msm_port *msm_port = UART_TO_MSM(port); int ret; if (flags & UART_CONFIG_TYPE) { port->type = PORT_MSM; @@ -691,9 +649,6 @@ static void msm_config_port(struct uart_port *port, int flags) if (ret) return; } - if (msm_port->gsbi_base) - writel_relaxed(GSBI_PROTOCOL_UART, - msm_port->gsbi_base + GSBI_CONTROL); } static int msm_verify_port(struct uart_port *port, struct serial_struct *ser) @@ -1110,6 +1065,7 @@ static struct of_device_id msm_match_table[] = { static struct platform_driver msm_platform_driver = { .remove = msm_serial_remove, + .probe = msm_serial_probe, .driver = { .name = "msm_serial", .owner = THIS_MODULE, @@ -1125,7 +1081,7 @@ static int __init msm_serial_init(void) if (unlikely(ret)) return ret; - ret = platform_driver_probe(&msm_platform_driver, msm_serial_probe); + ret = platform_driver_register(&msm_platform_driver); if (unlikely(ret)) uart_unregister_driver(&msm_uart_driver); diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h index 1e9b68b6f9e..d98d45efdf8 100644 --- a/drivers/tty/serial/msm_serial.h +++ b/drivers/tty/serial/msm_serial.h @@ -109,11 +109,6 @@ #define UART_ISR 0x0014 #define UART_ISR_TX_READY (1 << 7) -#define GSBI_CONTROL 0x0 -#define GSBI_PROTOCOL_CODE 0x30 -#define GSBI_PROTOCOL_UART 0x40 -#define GSBI_PROTOCOL_IDLE 0x0 - #define UARTDM_RXFS 0x50 #define UARTDM_RXFS_BUF_SHIFT 0x7 #define UARTDM_RXFS_BUF_MASK 0x7 diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c index f1d30f6945a..cf78d1985cd 100644 --- a/drivers/tty/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -255,16 +255,15 @@ static int __tty_buffer_request_room(struct tty_port *port, size_t size, if (change || left < size) { /* This is the slow path - looking for new buffers to use */ if ((n = tty_buffer_alloc(port, size)) != NULL) { - unsigned long iflags; - n->flags = flags; buf->tail = n; - - spin_lock_irqsave(&buf->flush_lock, iflags); b->commit = b->used; + /* paired w/ barrier in flush_to_ldisc(); ensures the + * latest commit value can be read before the head is + * advanced to the next buffer + */ + smp_wmb(); b->next = n; - spin_unlock_irqrestore(&buf->flush_lock, iflags); - } else if (change) size = 0; else @@ -448,27 +447,28 @@ static void flush_to_ldisc(struct work_struct *work) mutex_lock(&buf->lock); while (1) { - unsigned long flags; struct tty_buffer *head = buf->head; + struct tty_buffer *next; int count; /* Ldisc or user is trying to gain exclusive access */ if (atomic_read(&buf->priority)) break; - spin_lock_irqsave(&buf->flush_lock, flags); + next = head->next; + /* paired w/ barrier in __tty_buffer_request_room(); + * ensures commit value read is not stale if the head + * is advancing to the next buffer + */ + smp_rmb(); count = head->commit - head->read; if (!count) { - if (head->next == NULL) { - spin_unlock_irqrestore(&buf->flush_lock, flags); + if (next == NULL) break; - } - buf->head = head->next; - spin_unlock_irqrestore(&buf->flush_lock, flags); + buf->head = next; tty_buffer_free(port, head); continue; } - spin_unlock_irqrestore(&buf->flush_lock, flags); count = receive_buf(tty, head, count); if (!count) @@ -523,7 +523,6 @@ void tty_buffer_init(struct tty_port *port) struct tty_bufhead *buf = &port->buf; mutex_init(&buf->lock); - spin_lock_init(&buf->flush_lock); tty_buffer_reset(&buf->sentinel, 0); buf->head = &buf->sentinel; buf->tail = &buf->sentinel; diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index f605ad8c190..cfd18bcca72 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c @@ -1709,16 +1709,6 @@ static int at91udc_probe(struct platform_device *pdev) return -ENODEV; } - if (pdev->num_resources != 2) { - DBG("invalid num_resources\n"); - return -ENODEV; - } - if ((pdev->resource[0].flags != IORESOURCE_MEM) - || (pdev->resource[1].flags != IORESOURCE_IRQ)) { - DBG("invalid resource type\n"); - return -ENODEV; - } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 6f2c8d3899d..cf2734b532a 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -248,7 +248,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd, break; } - if (pdata->have_sysif_regs && pdata->controller_ver && + if (pdata->have_sysif_regs && + pdata->controller_ver > FSL_USB_VER_1_6 && (phy_mode == FSL_USB2_PHY_ULPI)) { /* check PHY_CLK_VALID to get phy clk valid */ if (!(spin_event_timeout(in_be32(non_ehci + FSL_SOC_USB_CTRL) & diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index c81c8721cc5..cd871b89501 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c @@ -90,6 +90,24 @@ __acquires(ohci->lock) dl_done_list (ohci); finish_unlinks (ohci, ohci_frame_no(ohci)); + /* + * Some controllers don't handle "global" suspend properly if + * there are unsuspended ports. For these controllers, put all + * the enabled ports into suspend before suspending the root hub. + */ + if (ohci->flags & OHCI_QUIRK_GLOBAL_SUSPEND) { + __hc32 __iomem *portstat = ohci->regs->roothub.portstatus; + int i; + unsigned temp; + + for (i = 0; i < ohci->num_ports; (++i, ++portstat)) { + temp = ohci_readl(ohci, portstat); + if ((temp & (RH_PS_PES | RH_PS_PSS)) == + RH_PS_PES) + ohci_writel(ohci, RH_PS_PSS, portstat); + } + } + /* maybe resume can wake root hub */ if (ohci_to_hcd(ohci)->self.root_hub->do_remote_wakeup || autostop) { ohci->hc_control |= OHCI_CTRL_RWE; diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c index 90879e9ccbe..bb150967572 100644 --- a/drivers/usb/host/ohci-pci.c +++ b/drivers/usb/host/ohci-pci.c @@ -160,6 +160,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd) ohci_dbg(ohci, "enabled AMD prefetch quirk\n"); } + ohci->flags |= OHCI_QUIRK_GLOBAL_SUSPEND; return 0; } diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h index 9250cada13f..4550ce05af7 100644 --- a/drivers/usb/host/ohci.h +++ b/drivers/usb/host/ohci.h @@ -405,6 +405,8 @@ struct ohci_hcd { #define OHCI_QUIRK_HUB_POWER 0x100 /* distrust firmware power/oc setup */ #define OHCI_QUIRK_AMD_PLL 0x200 /* AMD PLL quirk*/ #define OHCI_QUIRK_AMD_PREFETCH 0x400 /* pre-fetch for ISO transfer */ +#define OHCI_QUIRK_GLOBAL_SUSPEND 0x800 /* must suspend ports */ + // there are also chip quirks/bugs in init logic struct work_struct nec_work; /* Worker for NEC quirk */ diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c index c47e5a6edde..d03fadd2629 100644 --- a/drivers/usb/phy/phy-fsm-usb.c +++ b/drivers/usb/phy/phy-fsm-usb.c @@ -303,17 +303,18 @@ int otg_statemachine(struct otg_fsm *fsm) otg_set_state(fsm, OTG_STATE_A_WAIT_VRISE); break; case OTG_STATE_A_WAIT_VRISE: - if (fsm->id || fsm->a_bus_drop || fsm->a_vbus_vld || - fsm->a_wait_vrise_tmout) { + if (fsm->a_vbus_vld) otg_set_state(fsm, OTG_STATE_A_WAIT_BCON); - } + else if (fsm->id || fsm->a_bus_drop || + fsm->a_wait_vrise_tmout) + otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL); break; case OTG_STATE_A_WAIT_BCON: if (!fsm->a_vbus_vld) otg_set_state(fsm, OTG_STATE_A_VBUS_ERR); else if (fsm->b_conn) otg_set_state(fsm, OTG_STATE_A_HOST); - else if (fsm->id | fsm->a_bus_drop | fsm->a_wait_bcon_tmout) + else if (fsm->id || fsm->a_bus_drop || fsm->a_wait_bcon_tmout) otg_set_state(fsm, OTG_STATE_A_WAIT_VFALL); break; case OTG_STATE_A_HOST: diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c index 6e146d723b3..69e49be8866 100644 --- a/drivers/usb/phy/phy-isp1301-omap.c +++ b/drivers/usb/phy/phy-isp1301-omap.c @@ -1295,7 +1295,7 @@ isp1301_set_host(struct usb_otg *otg, struct usb_bus *host) return isp1301_otg_enable(isp); return 0; -#elif !defined(CONFIG_USB_GADGET_OMAP) +#elif !IS_ENABLED(CONFIG_USB_OMAP) // FIXME update its refcount otg->host = host; diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 7ed681a714a..6c0a542e8ec 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -151,6 +151,21 @@ static const struct usb_device_id id_table[] = { {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 0)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 2)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a3, 3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 0)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 2)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a4, 3)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 0)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 2)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a8, 3)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card Modem */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 0)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Device Management */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 2)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card NMEA */ + {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a9, 3)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card Modem */ { } /* Terminating entry */ }; diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c index 4ef2a80728f..008d805c3d2 100644 --- a/drivers/usb/storage/shuttle_usbat.c +++ b/drivers/usb/storage/shuttle_usbat.c @@ -1851,7 +1851,7 @@ static int usbat_probe(struct usb_interface *intf, us->transport_name = "Shuttle USBAT"; us->transport = usbat_flash_transport; us->transport_reset = usb_stor_CB_reset; - us->max_lun = 1; + us->max_lun = 0; result = usb_stor_probe2(us); return result; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index f4a82291894..174a447868c 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -234,6 +234,20 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64 ), +/* Reported by Daniele Forsi <dforsi@gmail.com> */ +UNUSUAL_DEV( 0x0421, 0x04b9, 0x0350, 0x0350, + "Nokia", + "5300", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64 ), + +/* Patch submitted by Victor A. Santos <victoraur.santos@gmail.com> */ +UNUSUAL_DEV( 0x0421, 0x05af, 0x0742, 0x0742, + "Nokia", + "305", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64), + /* Patch submitted by Mikhail Zolotaryov <lebon@lebon.org.ua> */ UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110, "Nokia", diff --git a/drivers/video/fbdev/atafb.c b/drivers/video/fbdev/atafb.c index e21d1f58554..4953b657635 100644 --- a/drivers/video/fbdev/atafb.c +++ b/drivers/video/fbdev/atafb.c @@ -191,7 +191,7 @@ static struct fb_info fb_info = { }; static void *screen_base; /* base address of screen */ -static void *real_screen_base; /* (only for Overscan) */ +static unsigned long phys_screen_base; /* (only for Overscan) */ static int screen_len; @@ -213,7 +213,8 @@ static unsigned int external_yres; */ static unsigned int external_depth; static int external_pmode; -static void *external_addr; +static void *external_screen_base; +static unsigned long external_addr; static unsigned long external_len; static unsigned long external_vgaiobase; static unsigned int external_bitspercol = 6; @@ -592,7 +593,7 @@ static int tt_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) int mode; strcpy(fix->id, "Atari Builtin"); - fix->smem_start = (unsigned long)real_screen_base; + fix->smem_start = phys_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; @@ -790,7 +791,7 @@ static void tt_get_par(struct atafb_par *par) addr = ((shifter.bas_hi & 0xff) << 16) | ((shifter.bas_md & 0xff) << 8) | ((shifter.bas_lo & 0xff)); - par->screen_base = phys_to_virt(addr); + par->screen_base = atari_stram_to_virt(addr); } static void tt_set_par(struct atafb_par *par) @@ -888,7 +889,7 @@ static int falcon_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { strcpy(fix->id, "Atari Builtin"); - fix->smem_start = (unsigned long)real_screen_base; + fix->smem_start = phys_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; @@ -1584,7 +1585,7 @@ static void falcon_get_par(struct atafb_par *par) addr = (shifter.bas_hi & 0xff) << 16 | (shifter.bas_md & 0xff) << 8 | (shifter.bas_lo & 0xff); - par->screen_base = phys_to_virt(addr); + par->screen_base = atari_stram_to_virt(addr); /* derived parameters */ hw->ste_mode = (hw->f_shift & 0x510) == 0 && hw->st_shift == 0x100; @@ -1814,7 +1815,7 @@ static int stste_encode_fix(struct fb_fix_screeninfo *fix, int mode; strcpy(fix->id, "Atari Builtin"); - fix->smem_start = (unsigned long)real_screen_base; + fix->smem_start = phys_screen_base; fix->smem_len = screen_len; fix->type = FB_TYPE_INTERLEAVED_PLANES; fix->type_aux = 2; @@ -1980,7 +1981,7 @@ static void stste_get_par(struct atafb_par *par) ((shifter.bas_md & 0xff) << 8); if (ATARIHW_PRESENT(EXTD_SHIFTER)) addr |= (shifter.bas_lo & 0xff); - par->screen_base = phys_to_virt(addr); + par->screen_base = atari_stram_to_virt(addr); } static void stste_set_par(struct atafb_par *par) @@ -2039,7 +2040,7 @@ static int stste_detect(void) static void stste_set_screen_base(void *s_base) { unsigned long addr; - addr = virt_to_phys(s_base); + addr = atari_stram_to_phys(s_base); /* Setup Screen Memory */ shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16); shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8); @@ -2113,7 +2114,7 @@ static void st_ovsc_switch(void) static int ext_encode_fix(struct fb_fix_screeninfo *fix, struct atafb_par *par) { strcpy(fix->id, "Unknown Extern"); - fix->smem_start = (unsigned long)external_addr; + fix->smem_start = external_addr; fix->smem_len = PAGE_ALIGN(external_len); if (external_depth == 1) { fix->type = FB_TYPE_PACKED_PIXELS; @@ -2213,7 +2214,7 @@ static int ext_encode_var(struct fb_var_screeninfo *var, struct atafb_par *par) static void ext_get_par(struct atafb_par *par) { - par->screen_base = external_addr; + par->screen_base = external_screen_base; } static void ext_set_par(struct atafb_par *par) @@ -2286,7 +2287,7 @@ static void set_screen_base(void *s_base) { unsigned long addr; - addr = virt_to_phys(s_base); + addr = atari_stram_to_phys(s_base); /* Setup Screen Memory */ shifter.bas_hi = (unsigned char)((addr & 0xff0000) >> 16); shifter.bas_md = (unsigned char)((addr & 0x00ff00) >> 8); @@ -2433,7 +2434,9 @@ static void atafb_set_disp(struct fb_info *info) atafb_get_var(&info->var, info); atafb_get_fix(&info->fix, info); - info->screen_base = (void *)info->fix.smem_start; + /* Note: smem_start derives from phys_screen_base, not screen_base! */ + info->screen_base = (external_addr ? external_screen_base : + atari_stram_to_virt(info->fix.smem_start)); } static int atafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, @@ -2904,7 +2907,7 @@ static void __init atafb_setup_ext(char *spec) external_yres = yres; external_depth = depth; external_pmode = planes; - external_addr = (void *)addr; + external_addr = addr; external_len = len; if (external_card_type == IS_MV300) { @@ -3166,30 +3169,30 @@ int __init atafb_init(void) memset(screen_base, 0, mem_req); pad = -(unsigned long)screen_base & (PAGE_SIZE - 1); screen_base += pad; - real_screen_base = screen_base + ovsc_offset; + phys_screen_base = atari_stram_to_phys(screen_base + ovsc_offset); screen_len = (mem_req - pad - ovsc_offset) & PAGE_MASK; st_ovsc_switch(); if (CPU_IS_040_OR_060) { /* On a '040+, the cache mode of video RAM must be set to * write-through also for internal video hardware! */ - cache_push(virt_to_phys(screen_base), screen_len); + cache_push(atari_stram_to_phys(screen_base), screen_len); kernel_set_cachemode(screen_base, screen_len, IOMAP_WRITETHROUGH); } - printk("atafb: screen_base %p real_screen_base %p screen_len %d\n", - screen_base, real_screen_base, screen_len); + printk("atafb: screen_base %p phys_screen_base %lx screen_len %d\n", + screen_base, phys_screen_base, screen_len); #ifdef ATAFB_EXT } else { /* Map the video memory (physical address given) to somewhere * in the kernel address space. */ - external_addr = ioremap_writethrough((unsigned long)external_addr, + external_screen_base = ioremap_writethrough(external_addr, external_len); if (external_vgaiobase) external_vgaiobase = (unsigned long)ioremap(external_vgaiobase, 0x10000); - screen_base = - real_screen_base = external_addr; + screen_base = external_screen_base; + phys_screen_base = external_addr; screen_len = external_len & PAGE_MASK; memset (screen_base, 0, external_len); } @@ -3235,8 +3238,8 @@ int __init atafb_init(void) if (register_framebuffer(&fb_info) < 0) { #ifdef ATAFB_EXT if (external_addr) { - iounmap(external_addr); - external_addr = NULL; + iounmap(external_screen_base); + external_addr = 0; } if (external_vgaiobase) { iounmap((void*)external_vgaiobase); diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index 96109a9972b..84b4bfb8434 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -66,7 +66,22 @@ static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue); static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; static unsigned event_array_pages __read_mostly; +/* + * sync_set_bit() and friends must be unsigned long aligned on non-x86 + * platforms. + */ +#if !defined(CONFIG_X86) && BITS_PER_LONG > 32 + +#define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL) +#define EVTCHN_FIFO_BIT(b, w) \ + (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b) + +#else + #define BM(w) ((unsigned long *)(w)) +#define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b + +#endif static inline event_word_t *event_word_from_port(unsigned port) { @@ -161,33 +176,38 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu) static void evtchn_fifo_clear_pending(unsigned port) { event_word_t *word = event_word_from_port(port); - sync_clear_bit(EVTCHN_FIFO_PENDING, BM(word)); + sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } static void evtchn_fifo_set_pending(unsigned port) { event_word_t *word = event_word_from_port(port); - sync_set_bit(EVTCHN_FIFO_PENDING, BM(word)); + sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } static bool evtchn_fifo_is_pending(unsigned port) { event_word_t *word = event_word_from_port(port); - return sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)); + return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word)); } static bool evtchn_fifo_test_and_set_mask(unsigned port) { event_word_t *word = event_word_from_port(port); - return sync_test_and_set_bit(EVTCHN_FIFO_MASKED, BM(word)); + return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); } static void evtchn_fifo_mask(unsigned port) { event_word_t *word = event_word_from_port(port); - sync_set_bit(EVTCHN_FIFO_MASKED, BM(word)); + sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); } +static bool evtchn_fifo_is_masked(unsigned port) +{ + event_word_t *word = event_word_from_port(port); + return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word)); +} /* * Clear MASKED, spinning if BUSY is set. */ @@ -211,7 +231,7 @@ static void evtchn_fifo_unmask(unsigned port) BUG_ON(!irqs_disabled()); clear_masked(word); - if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word))) { + if (evtchn_fifo_is_pending(port)) { struct evtchn_unmask unmask = { .port = port }; (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask); } @@ -243,7 +263,7 @@ static void handle_irq_for_port(unsigned port) static void consume_one_event(unsigned cpu, struct evtchn_fifo_control_block *control_block, - unsigned priority, uint32_t *ready) + unsigned priority, unsigned long *ready) { struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); uint32_t head; @@ -273,10 +293,9 @@ static void consume_one_event(unsigned cpu, * copy of the ready word. */ if (head == 0) - clear_bit(priority, BM(ready)); + clear_bit(priority, ready); - if (sync_test_bit(EVTCHN_FIFO_PENDING, BM(word)) - && !sync_test_bit(EVTCHN_FIFO_MASKED, BM(word))) + if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) handle_irq_for_port(port); q->head[priority] = head; @@ -285,7 +304,7 @@ static void consume_one_event(unsigned cpu, static void evtchn_fifo_handle_events(unsigned cpu) { struct evtchn_fifo_control_block *control_block; - uint32_t ready; + unsigned long ready; unsigned q; control_block = per_cpu(cpu_control_block, cpu); diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 32f9236c959..c3667b202f2 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -41,9 +41,6 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID; struct suspend_info { int cancelled; - unsigned long arg; /* extra hypercall argument */ - void (*pre)(void); - void (*post)(int cancelled); }; static RAW_NOTIFIER_HEAD(xen_resume_notifier); @@ -61,26 +58,6 @@ void xen_resume_notifier_unregister(struct notifier_block *nb) EXPORT_SYMBOL_GPL(xen_resume_notifier_unregister); #ifdef CONFIG_HIBERNATE_CALLBACKS -static void xen_hvm_post_suspend(int cancelled) -{ - xen_arch_hvm_post_suspend(cancelled); - gnttab_resume(); -} - -static void xen_pre_suspend(void) -{ - xen_mm_pin_all(); - gnttab_suspend(); - xen_arch_pre_suspend(); -} - -static void xen_post_suspend(int cancelled) -{ - xen_arch_post_suspend(cancelled); - gnttab_resume(); - xen_mm_unpin_all(); -} - static int xen_suspend(void *data) { struct suspend_info *si = data; @@ -94,18 +71,20 @@ static int xen_suspend(void *data) return err; } - if (si->pre) - si->pre(); + gnttab_suspend(); + xen_arch_pre_suspend(); /* * This hypercall returns 1 if suspend was cancelled * or the domain was merely checkpointed, and 0 if it * is resuming in a new domain. */ - si->cancelled = HYPERVISOR_suspend(si->arg); + si->cancelled = HYPERVISOR_suspend(xen_pv_domain() + ? virt_to_mfn(xen_start_info) + : 0); - if (si->post) - si->post(si->cancelled); + xen_arch_post_suspend(si->cancelled); + gnttab_resume(); if (!si->cancelled) { xen_irq_resume(); @@ -154,16 +133,6 @@ static void do_suspend(void) si.cancelled = 1; - if (xen_hvm_domain()) { - si.arg = 0UL; - si.pre = NULL; - si.post = &xen_hvm_post_suspend; - } else { - si.arg = virt_to_mfn(xen_start_info); - si.pre = &xen_pre_suspend; - si.post = &xen_post_suspend; - } - err = stop_machine(xen_suspend, &si, cpumask_of(0)); raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 82358d14ecf..59fc190f1e9 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -127,7 +127,7 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr) pr_debug(" C%d: %s %d uS\n", cx->type, cx->desc, (u32)cx->latency); } - } else if (ret != -EINVAL) + } else if ((ret != -EINVAL) && (ret != -ENOSYS)) /* EINVAL means the ACPI ID is incorrect - meaning the ACPI * table is referencing a non-existing CPU - which can happen * with broken ACPI tables. */ @@ -259,7 +259,7 @@ static int push_pxx_to_hypervisor(struct acpi_processor *_pr) (u32) perf->states[i].power, (u32) perf->states[i].transition_latency); } - } else if (ret != -EINVAL) + } else if ((ret != -EINVAL) && (ret != -ENOSYS)) /* EINVAL means the ACPI ID is incorrect - meaning the ACPI * table is referencing a non-existing CPU - which can happen * with broken ACPI tables. */ diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c index 62fcd485f0a..d57a173685f 100644 --- a/drivers/xen/xen-pciback/pci_stub.c +++ b/drivers/xen/xen-pciback/pci_stub.c @@ -242,6 +242,15 @@ struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev, return found_dev; } +/* + * Called when: + * - XenBus state has been reconfigure (pci unplug). See xen_pcibk_remove_device + * - XenBus state has been disconnected (guest shutdown). See xen_pcibk_xenbus_remove + * - 'echo BDF > unbind' on pciback module with no guest attached. See pcistub_remove + * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove + * + * As such we have to be careful. + */ void pcistub_put_pci_dev(struct pci_dev *dev) { struct pcistub_device *psdev, *found_psdev = NULL; @@ -272,16 +281,16 @@ void pcistub_put_pci_dev(struct pci_dev *dev) * and want to inhibit the user from fiddling with 'reset' */ pci_reset_function(dev); - pci_restore_state(psdev->dev); + pci_restore_state(dev); /* This disables the device. */ - xen_pcibk_reset_device(found_psdev->dev); + xen_pcibk_reset_device(dev); /* And cleanup up our emulated fields. */ - xen_pcibk_config_free_dyn_fields(found_psdev->dev); - xen_pcibk_config_reset_dev(found_psdev->dev); + xen_pcibk_config_reset_dev(dev); + xen_pcibk_config_free_dyn_fields(dev); - xen_unregister_device_domain_owner(found_psdev->dev); + xen_unregister_device_domain_owner(dev); spin_lock_irqsave(&found_psdev->lock, flags); found_psdev->pdev = NULL; @@ -493,6 +502,8 @@ static int pcistub_seize(struct pci_dev *dev) return err; } +/* Called when 'bind'. This means we must _NOT_ call pci_reset_function or + * other functions that take the sysfs lock. */ static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err = 0; @@ -520,6 +531,8 @@ out: return err; } +/* Called when 'unbind'. This means we must _NOT_ call pci_reset_function or + * other functions that take the sysfs lock. */ static void pcistub_remove(struct pci_dev *dev) { struct pcistub_device *psdev, *found_psdev = NULL; @@ -551,6 +564,8 @@ static void pcistub_remove(struct pci_dev *dev) pr_warn("****** shutdown driver domain before binding device\n"); pr_warn("****** to other drivers or domains\n"); + /* N.B. This ends up calling pcistub_put_pci_dev which ends up + * doing the FLR. */ xen_pcibk_release_pci_dev(found_psdev->pdev, found_psdev->dev); } diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index a9ed867afab..4a7e6e0a5f4 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c @@ -93,6 +93,8 @@ static void free_pdev(struct xen_pcibk_device *pdev) xen_pcibk_disconnect(pdev); + /* N.B. This calls pcistub_put_pci_dev which does the FLR on all + * of the PCIe devices. */ xen_pcibk_release_devices(pdev); dev_set_drvdata(&pdev->xdev->dev, NULL); @@ -286,6 +288,8 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev, dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id); xen_unregister_device_domain_owner(dev); + /* N.B. This ends up calling pcistub_put_pci_dev which ends up + * doing the FLR. */ xen_pcibk_release_pci_dev(pdev, dev); out: |