diff options
Diffstat (limited to 'drivers')
199 files changed, 14701 insertions, 1323 deletions
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 3a73a93596e..85b32376dad 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -49,10 +49,6 @@ ACPI_MODULE_NAME("processor_perflib"); static DEFINE_MUTEX(performance_mutex); -/* Use cpufreq debug layer for _PPC changes. */ -#define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ - "cpufreq-core", msg) - /* * _PPC support is implemented as a CPUfreq policy notifier: * This means each time a CPUfreq driver registered also with @@ -145,7 +141,7 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) return -ENODEV; } - cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, + pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, (int)ppc, ppc ? "" : "not"); pr->performance_platform_limit = (int)ppc; diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index ff9d832a163..d38c40fe4dd 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -561,27 +561,6 @@ void ahci_start_engine(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); u32 tmp; - u8 status; - - status = readl(port_mmio + PORT_TFDATA) & 0xFF; - - /* - * At end of section 10.1 of AHCI spec (rev 1.3), it states - * Software shall not set PxCMD.ST to 1 until it is determined - * that a functoinal device is present on the port as determined by - * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h - * - * Even though most AHCI host controllers work without this check, - * specific controller will fail under this condition - */ - if (status & (ATA_BUSY | ATA_DRQ)) - return; - else { - ahci_scr_read(&ap->link, SCR_STATUS, &tmp); - - if ((tmp & 0xf) != 0x3) - return; - } /* start DMA */ tmp = readl(port_mmio + PORT_CMD); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index f26f2fe3480..dad9fd660f3 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3316,7 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; - bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM; + bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM; unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM; unsigned int err_mask; int rc; diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index bdd2719f3f6..bc9e702186d 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -2643,16 +2643,19 @@ fore200e_init(struct fore200e* fore200e, struct device *parent) } #ifdef CONFIG_SBUS +static const struct of_device_id fore200e_sba_match[]; static int __devinit fore200e_sba_probe(struct platform_device *op) { + const struct of_device_id *match; const struct fore200e_bus *bus; struct fore200e *fore200e; static int index = 0; int err; - if (!op->dev.of_match) + match = of_match_device(fore200e_sba_match, &op->dev); + if (!match) return -EINVAL; - bus = op->dev.of_match->data; + bus = match->data; fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL); if (!fore200e) diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index e9e5238f310..d57e8d0fb82 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -168,11 +168,4 @@ config SYS_HYPERVISOR bool default n -config ARCH_NO_SYSDEV_OPS - bool - ---help--- - To be selected by architectures that don't use sysdev class or - sysdev driver power management (suspend/resume) and shutdown - operations. - endmenu diff --git a/drivers/base/base.h b/drivers/base/base.h index 19f49e41ce5..a34dca0ad04 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -111,8 +111,6 @@ static inline int driver_match_device(struct device_driver *drv, return drv->bus->match ? drv->bus->match(dev, drv) : 1; } -extern void sysdev_shutdown(void); - extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index da57ee9d63f..29917c7506c 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -316,8 +316,7 @@ static void __device_release_driver(struct device *dev) drv = dev->driver; if (drv) { - pm_runtime_get_noresume(dev); - pm_runtime_barrier(dev); + pm_runtime_get_sync(dev); driver_sysfs_remove(dev); @@ -326,6 +325,8 @@ static void __device_release_driver(struct device *dev) BUS_NOTIFY_UNBIND_DRIVER, dev); + pm_runtime_put_sync(dev); + if (dev->bus && dev->bus->remove) dev->bus->remove(dev); else if (drv->remove) @@ -338,7 +339,6 @@ static void __device_release_driver(struct device *dev) BUS_NOTIFY_UNBOUND_DRIVER, dev); - pm_runtime_put_sync(dev); } } diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 8c798ef7f13..bbb03e6f725 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -521,6 +521,11 @@ static int _request_firmware(const struct firmware **firmware_p, if (!firmware_p) return -EINVAL; + if (WARN_ON(usermodehelper_is_disabled())) { + dev_err(device, "firmware: %s will not be loaded\n", name); + return -EBUSY; + } + *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); if (!firmware) { dev_err(device, "%s: kmalloc(struct firmware) failed\n", diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9e0e4fc24c4..48425f18302 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -667,7 +667,7 @@ static int platform_legacy_resume(struct device *dev) return ret; } -static int platform_pm_prepare(struct device *dev) +int platform_pm_prepare(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -678,7 +678,7 @@ static int platform_pm_prepare(struct device *dev) return ret; } -static void platform_pm_complete(struct device *dev) +void platform_pm_complete(struct device *dev) { struct device_driver *drv = dev->driver; @@ -686,16 +686,11 @@ static void platform_pm_complete(struct device *dev) drv->pm->complete(dev); } -#else /* !CONFIG_PM_SLEEP */ - -#define platform_pm_prepare NULL -#define platform_pm_complete NULL - -#endif /* !CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND -int __weak platform_pm_suspend(struct device *dev) +int platform_pm_suspend(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -713,7 +708,7 @@ int __weak platform_pm_suspend(struct device *dev) return ret; } -int __weak platform_pm_suspend_noirq(struct device *dev) +int platform_pm_suspend_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -729,7 +724,7 @@ int __weak platform_pm_suspend_noirq(struct device *dev) return ret; } -int __weak platform_pm_resume(struct device *dev) +int platform_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -747,7 +742,7 @@ int __weak platform_pm_resume(struct device *dev) return ret; } -int __weak platform_pm_resume_noirq(struct device *dev) +int platform_pm_resume_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -763,18 +758,11 @@ int __weak platform_pm_resume_noirq(struct device *dev) return ret; } -#else /* !CONFIG_SUSPEND */ - -#define platform_pm_suspend NULL -#define platform_pm_resume NULL -#define platform_pm_suspend_noirq NULL -#define platform_pm_resume_noirq NULL - -#endif /* !CONFIG_SUSPEND */ +#endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS -static int platform_pm_freeze(struct device *dev) +int platform_pm_freeze(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -792,7 +780,7 @@ static int platform_pm_freeze(struct device *dev) return ret; } -static int platform_pm_freeze_noirq(struct device *dev) +int platform_pm_freeze_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -808,7 +796,7 @@ static int platform_pm_freeze_noirq(struct device *dev) return ret; } -static int platform_pm_thaw(struct device *dev) +int platform_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -826,7 +814,7 @@ static int platform_pm_thaw(struct device *dev) return ret; } -static int platform_pm_thaw_noirq(struct device *dev) +int platform_pm_thaw_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -842,7 +830,7 @@ static int platform_pm_thaw_noirq(struct device *dev) return ret; } -static int platform_pm_poweroff(struct device *dev) +int platform_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -860,7 +848,7 @@ static int platform_pm_poweroff(struct device *dev) return ret; } -static int platform_pm_poweroff_noirq(struct device *dev) +int platform_pm_poweroff_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -876,7 +864,7 @@ static int platform_pm_poweroff_noirq(struct device *dev) return ret; } -static int platform_pm_restore(struct device *dev) +int platform_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -894,7 +882,7 @@ static int platform_pm_restore(struct device *dev) return ret; } -static int platform_pm_restore_noirq(struct device *dev) +int platform_pm_restore_noirq(struct device *dev) { struct device_driver *drv = dev->driver; int ret = 0; @@ -910,62 +898,13 @@ static int platform_pm_restore_noirq(struct device *dev) return ret; } -#else /* !CONFIG_HIBERNATE_CALLBACKS */ - -#define platform_pm_freeze NULL -#define platform_pm_thaw NULL -#define platform_pm_poweroff NULL -#define platform_pm_restore NULL -#define platform_pm_freeze_noirq NULL -#define platform_pm_thaw_noirq NULL -#define platform_pm_poweroff_noirq NULL -#define platform_pm_restore_noirq NULL - -#endif /* !CONFIG_HIBERNATE_CALLBACKS */ - -#ifdef CONFIG_PM_RUNTIME - -int __weak platform_pm_runtime_suspend(struct device *dev) -{ - return pm_generic_runtime_suspend(dev); -}; - -int __weak platform_pm_runtime_resume(struct device *dev) -{ - return pm_generic_runtime_resume(dev); -}; - -int __weak platform_pm_runtime_idle(struct device *dev) -{ - return pm_generic_runtime_idle(dev); -}; - -#else /* !CONFIG_PM_RUNTIME */ - -#define platform_pm_runtime_suspend NULL -#define platform_pm_runtime_resume NULL -#define platform_pm_runtime_idle NULL - -#endif /* !CONFIG_PM_RUNTIME */ +#endif /* CONFIG_HIBERNATE_CALLBACKS */ static const struct dev_pm_ops platform_dev_pm_ops = { - .prepare = platform_pm_prepare, - .complete = platform_pm_complete, - .suspend = platform_pm_suspend, - .resume = platform_pm_resume, - .freeze = platform_pm_freeze, - .thaw = platform_pm_thaw, - .poweroff = platform_pm_poweroff, - .restore = platform_pm_restore, - .suspend_noirq = platform_pm_suspend_noirq, - .resume_noirq = platform_pm_resume_noirq, - .freeze_noirq = platform_pm_freeze_noirq, - .thaw_noirq = platform_pm_thaw_noirq, - .poweroff_noirq = platform_pm_poweroff_noirq, - .restore_noirq = platform_pm_restore_noirq, - .runtime_suspend = platform_pm_runtime_suspend, - .runtime_resume = platform_pm_runtime_resume, - .runtime_idle = platform_pm_runtime_idle, + .runtime_suspend = pm_generic_runtime_suspend, + .runtime_resume = pm_generic_runtime_resume, + .runtime_idle = pm_generic_runtime_idle, + USE_PLATFORM_PM_SLEEP_OPS }; struct bus_type platform_bus_type = { @@ -977,41 +916,6 @@ struct bus_type platform_bus_type = { }; EXPORT_SYMBOL_GPL(platform_bus_type); -/** - * platform_bus_get_pm_ops() - return pointer to busses dev_pm_ops - * - * This function can be used by platform code to get the current - * set of dev_pm_ops functions used by the platform_bus_type. - */ -const struct dev_pm_ops * __init platform_bus_get_pm_ops(void) -{ - return platform_bus_type.pm; -} - -/** - * platform_bus_set_pm_ops() - update dev_pm_ops for the platform_bus_type - * - * @pm: pointer to new dev_pm_ops struct to be used for platform_bus_type - * - * Platform code can override the dev_pm_ops methods of - * platform_bus_type by using this function. It is expected that - * platform code will first do a platform_bus_get_pm_ops(), then - * kmemdup it, then customize selected methods and pass a pointer to - * the new struct dev_pm_ops to this function. - * - * Since platform-specific code is customizing methods for *all* - * devices (not just platform-specific devices) it is expected that - * any custom overrides of these functions will keep existing behavior - * and simply extend it. For example, any customization of the - * runtime PM methods should continue to call the pm_generic_* - * functions as the default ones do in addition to the - * platform-specific behavior. - */ -void __init platform_bus_set_pm_ops(const struct dev_pm_ops *pm) -{ - platform_bus_type.pm = pm; -} - int __init platform_bus_init(void) { int error; diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 118c1b92a51..3647e114d0e 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -3,6 +3,6 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o +obj-$(CONFIG_HAVE_CLK) += clock_ops.o -ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG -ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG +ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
\ No newline at end of file diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c new file mode 100644 index 00000000000..c0dd09df7be --- /dev/null +++ b/drivers/base/power/clock_ops.c @@ -0,0 +1,431 @@ +/* + * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks + * + * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/err.h> + +#ifdef CONFIG_PM_RUNTIME + +struct pm_runtime_clk_data { + struct list_head clock_list; + struct mutex lock; +}; + +enum pce_status { + PCE_STATUS_NONE = 0, + PCE_STATUS_ACQUIRED, + PCE_STATUS_ENABLED, + PCE_STATUS_ERROR, +}; + +struct pm_clock_entry { + struct list_head node; + char *con_id; + struct clk *clk; + enum pce_status status; +}; + +static struct pm_runtime_clk_data *__to_prd(struct device *dev) +{ + return dev ? dev->power.subsys_data : NULL; +} + +/** + * pm_runtime_clk_add - Start using a device clock for runtime PM. + * @dev: Device whose clock is going to be used for runtime PM. + * @con_id: Connection ID of the clock. + * + * Add the clock represented by @con_id to the list of clocks used for + * the runtime PM of @dev. + */ +int pm_runtime_clk_add(struct device *dev, const char *con_id) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce; + + if (!prd) + return -EINVAL; + + ce = kzalloc(sizeof(*ce), GFP_KERNEL); + if (!ce) { + dev_err(dev, "Not enough memory for clock entry.\n"); + return -ENOMEM; + } + + if (con_id) { + ce->con_id = kstrdup(con_id, GFP_KERNEL); + if (!ce->con_id) { + dev_err(dev, + "Not enough memory for clock connection ID.\n"); + kfree(ce); + return -ENOMEM; + } + } + + mutex_lock(&prd->lock); + list_add_tail(&ce->node, &prd->clock_list); + mutex_unlock(&prd->lock); + return 0; +} + +/** + * __pm_runtime_clk_remove - Destroy runtime PM clock entry. + * @ce: Runtime PM clock entry to destroy. + * + * This routine must be called under the mutex protecting the runtime PM list + * of clocks corresponding the the @ce's device. + */ +static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) +{ + if (!ce) + return; + + list_del(&ce->node); + + if (ce->status < PCE_STATUS_ERROR) { + if (ce->status == PCE_STATUS_ENABLED) + clk_disable(ce->clk); + + if (ce->status >= PCE_STATUS_ACQUIRED) + clk_put(ce->clk); + } + + if (ce->con_id) + kfree(ce->con_id); + + kfree(ce); +} + +/** + * pm_runtime_clk_remove - Stop using a device clock for runtime PM. + * @dev: Device whose clock should not be used for runtime PM any more. + * @con_id: Connection ID of the clock. + * + * Remove the clock represented by @con_id from the list of clocks used for + * the runtime PM of @dev. + */ +void pm_runtime_clk_remove(struct device *dev, const char *con_id) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce; + + if (!prd) + return; + + mutex_lock(&prd->lock); + + list_for_each_entry(ce, &prd->clock_list, node) { + if (!con_id && !ce->con_id) { + __pm_runtime_clk_remove(ce); + break; + } else if (!con_id || !ce->con_id) { + continue; + } else if (!strcmp(con_id, ce->con_id)) { + __pm_runtime_clk_remove(ce); + break; + } + } + + mutex_unlock(&prd->lock); +} + +/** + * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. + * @dev: Device to initialize the list of runtime PM clocks for. + * + * Allocate a struct pm_runtime_clk_data object, initialize its lock member and + * make the @dev's power.subsys_data field point to it. + */ +int pm_runtime_clk_init(struct device *dev) +{ + struct pm_runtime_clk_data *prd; + + prd = kzalloc(sizeof(*prd), GFP_KERNEL); + if (!prd) { + dev_err(dev, "Not enough memory fo runtime PM data.\n"); + return -ENOMEM; + } + + INIT_LIST_HEAD(&prd->clock_list); + mutex_init(&prd->lock); + dev->power.subsys_data = prd; + return 0; +} + +/** + * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. + * @dev: Device to destroy the list of runtime PM clocks for. + * + * Clear the @dev's power.subsys_data field, remove the list of clock entries + * from the struct pm_runtime_clk_data object pointed to by it before and free + * that object. + */ +void pm_runtime_clk_destroy(struct device *dev) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce, *c; + + if (!prd) + return; + + dev->power.subsys_data = NULL; + + mutex_lock(&prd->lock); + + list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) + __pm_runtime_clk_remove(ce); + + mutex_unlock(&prd->lock); + + kfree(prd); +} + +/** + * pm_runtime_clk_acquire - Acquire a device clock. + * @dev: Device whose clock is to be acquired. + * @con_id: Connection ID of the clock. + */ +static void pm_runtime_clk_acquire(struct device *dev, + struct pm_clock_entry *ce) +{ + ce->clk = clk_get(dev, ce->con_id); + if (IS_ERR(ce->clk)) { + ce->status = PCE_STATUS_ERROR; + } else { + ce->status = PCE_STATUS_ACQUIRED; + dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); + } +} + +/** + * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. + * @dev: Device to disable the clocks for. + */ +int pm_runtime_clk_suspend(struct device *dev) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce; + + dev_dbg(dev, "%s()\n", __func__); + + if (!prd) + return 0; + + mutex_lock(&prd->lock); + + list_for_each_entry_reverse(ce, &prd->clock_list, node) { + if (ce->status == PCE_STATUS_NONE) + pm_runtime_clk_acquire(dev, ce); + + if (ce->status < PCE_STATUS_ERROR) { + clk_disable(ce->clk); + ce->status = PCE_STATUS_ACQUIRED; + } + } + + mutex_unlock(&prd->lock); + + return 0; +} + +/** + * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. + * @dev: Device to enable the clocks for. + */ +int pm_runtime_clk_resume(struct device *dev) +{ + struct pm_runtime_clk_data *prd = __to_prd(dev); + struct pm_clock_entry *ce; + + dev_dbg(dev, "%s()\n", __func__); + + if (!prd) + return 0; + + mutex_lock(&prd->lock); + + list_for_each_entry(ce, &prd->clock_list, node) { + if (ce->status == PCE_STATUS_NONE) + pm_runtime_clk_acquire(dev, ce); + + if (ce->status < PCE_STATUS_ERROR) { + clk_enable(ce->clk); + ce->status = PCE_STATUS_ENABLED; + } + } + + mutex_unlock(&prd->lock); + + return 0; +} + +/** + * pm_runtime_clk_notify - Notify routine for device addition and removal. + * @nb: Notifier block object this function is a member of. + * @action: Operation being carried out by the caller. + * @data: Device the routine is being run for. + * + * For this function to work, @nb must be a member of an object of type + * struct pm_clk_notifier_block containing all of the requisite data. + * Specifically, the pwr_domain member of that object is copied to the device's + * pwr_domain field and its con_ids member is used to populate the device's list + * of runtime PM clocks, depending on @action. + * + * If the device's pwr_domain field is already populated with a value different + * from the one stored in the struct pm_clk_notifier_block object, the function + * does nothing. + */ +static int pm_runtime_clk_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct pm_clk_notifier_block *clknb; + struct device *dev = data; + char *con_id; + int error; + + dev_dbg(dev, "%s() %ld\n", __func__, action); + + clknb = container_of(nb, struct pm_clk_notifier_block, nb); + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (dev->pwr_domain) + break; + + error = pm_runtime_clk_init(dev); + if (error) + break; + + dev->pwr_domain = clknb->pwr_domain; + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids[0]; *con_id; con_id++) + pm_runtime_clk_add(dev, con_id); + } else { + pm_runtime_clk_add(dev, NULL); + } + + break; + case BUS_NOTIFY_DEL_DEVICE: + if (dev->pwr_domain != clknb->pwr_domain) + break; + + dev->pwr_domain = NULL; + pm_runtime_clk_destroy(dev); + break; + } + + return 0; +} + +#else /* !CONFIG_PM_RUNTIME */ + +/** + * enable_clock - Enable a device clock. + * @dev: Device whose clock is to be enabled. + * @con_id: Connection ID of the clock. + */ +static void enable_clock(struct device *dev, const char *con_id) +{ + struct clk *clk; + + clk = clk_get(dev, con_id); + if (!IS_ERR(clk)) { + clk_enable(clk); + clk_put(clk); + dev_info(dev, "Runtime PM disabled, clock forced on.\n"); + } +} + +/** + * disable_clock - Disable a device clock. + * @dev: Device whose clock is to be disabled. + * @con_id: Connection ID of the clock. + */ +static void disable_clock(struct device *dev, const char *con_id) +{ + struct clk *clk; + + clk = clk_get(dev, con_id); + if (!IS_ERR(clk)) { + clk_disable(clk); + clk_put(clk); + dev_info(dev, "Runtime PM disabled, clock forced off.\n"); + } +} + +/** + * pm_runtime_clk_notify - Notify routine for device addition and removal. + * @nb: Notifier block object this function is a member of. + * @action: Operation being carried out by the caller. + * @data: Device the routine is being run for. + * + * For this function to work, @nb must be a member of an object of type + * struct pm_clk_notifier_block containing all of the requisite data. + * Specifically, the con_ids member of that object is used to enable or disable + * the device's clocks, depending on @action. + */ +static int pm_runtime_clk_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct pm_clk_notifier_block *clknb; + struct device *dev = data; + char *con_id; + + dev_dbg(dev, "%s() %ld\n", __func__, action); + + clknb = container_of(nb, struct pm_clk_notifier_block, nb); + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids[0]; *con_id; con_id++) + enable_clock(dev, con_id); + } else { + enable_clock(dev, NULL); + } + break; + case BUS_NOTIFY_DEL_DEVICE: + if (clknb->con_ids[0]) { + for (con_id = clknb->con_ids[0]; *con_id; con_id++) + disable_clock(dev, con_id); + } else { + disable_clock(dev, NULL); + } + break; + } + + return 0; +} + +#endif /* !CONFIG_PM_RUNTIME */ + +/** + * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. + * @bus: Bus type to add the notifier to. + * @clknb: Notifier to be added to the given bus type. + * + * The nb member of @clknb is not expected to be initialized and its + * notifier_call member will be replaced with pm_runtime_clk_notify(). However, + * the remaining members of @clknb should be populated prior to calling this + * routine. + */ +void pm_runtime_clk_add_notifier(struct bus_type *bus, + struct pm_clk_notifier_block *clknb) +{ + if (!bus || !clknb) + return; + + clknb->nb.notifier_call = pm_runtime_clk_notify; + bus_register_notifier(bus, &clknb->nb); +} diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 42f97f92562..cb3bb368681 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -74,6 +74,23 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); #ifdef CONFIG_PM_SLEEP /** + * pm_generic_prepare - Generic routine preparing a device for power transition. + * @dev: Device to prepare. + * + * Prepare a device for a system-wide power transition. + */ +int pm_generic_prepare(struct device *dev) +{ + struct device_driver *drv = dev->driver; + int ret = 0; + + if (drv && drv->pm && drv->pm->prepare) + ret = drv->pm->prepare(dev); + + return ret; +} + +/** * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. * @dev: Device to handle. * @event: PM transition of the system under way. @@ -213,16 +230,38 @@ int pm_generic_restore(struct device *dev) return __pm_generic_resume(dev, PM_EVENT_RESTORE); } EXPORT_SYMBOL_GPL(pm_generic_restore); + +/** + * pm_generic_complete - Generic routine competing a device power transition. + * @dev: Device to handle. + * + * Complete a device power transition during a system-wide power transition. + */ +void pm_generic_complete(struct device *dev) +{ + struct device_driver *drv = dev->driver; + + if (drv && drv->pm && drv->pm->complete) + drv->pm->complete(dev); + + /* + * Let runtime PM try to suspend devices that haven't been in use before + * going into the system-wide sleep state we're resuming from. + */ + pm_runtime_idle(dev); +} #endif /* CONFIG_PM_SLEEP */ struct dev_pm_ops generic_subsys_pm_ops = { #ifdef CONFIG_PM_SLEEP + .prepare = pm_generic_prepare, .suspend = pm_generic_suspend, .resume = pm_generic_resume, .freeze = pm_generic_freeze, .thaw = pm_generic_thaw, .poweroff = pm_generic_poweroff, .restore = pm_generic_restore, + .complete = pm_generic_complete, #endif #ifdef CONFIG_PM_RUNTIME .runtime_suspend = pm_generic_runtime_suspend, diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index abe3ab709e8..aa632020774 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -426,10 +426,8 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) if (dev->pwr_domain) { pm_dev_dbg(dev, state, "EARLY power domain "); - pm_noirq_op(dev, &dev->pwr_domain->ops, state); - } - - if (dev->type && dev->type->pm) { + error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); + } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "EARLY type "); error = pm_noirq_op(dev, dev->type->pm, state); } else if (dev->class && dev->class->pm) { @@ -517,7 +515,8 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) if (dev->pwr_domain) { pm_dev_dbg(dev, state, "power domain "); - pm_op(dev, &dev->pwr_domain->ops, state); + error = pm_op(dev, &dev->pwr_domain->ops, state); + goto End; } if (dev->type && dev->type->pm) { @@ -580,11 +579,13 @@ static bool is_async(struct device *dev) * Execute the appropriate "resume" callback for all devices whose status * indicates that they are suspended. */ -static void dpm_resume(pm_message_t state) +void dpm_resume(pm_message_t state) { struct device *dev; ktime_t starttime = ktime_get(); + might_sleep(); + mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -629,12 +630,11 @@ static void device_complete(struct device *dev, pm_message_t state) { device_lock(dev); - if (dev->pwr_domain && dev->pwr_domain->ops.complete) { + if (dev->pwr_domain) { pm_dev_dbg(dev, state, "completing power domain "); - dev->pwr_domain->ops.complete(dev); - } - - if (dev->type && dev->type->pm) { + if (dev->pwr_domain->ops.complete) + dev->pwr_domain->ops.complete(dev); + } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "completing type "); if (dev->type->pm->complete) dev->type->pm->complete(dev); @@ -658,10 +658,12 @@ static void device_complete(struct device *dev, pm_message_t state) * Execute the ->complete() callbacks for all devices whose PM status is not * DPM_ON (this allows new devices to be registered). */ -static void dpm_complete(pm_message_t state) +void dpm_complete(pm_message_t state) { struct list_head list; + might_sleep(); + INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_prepared_list)) { @@ -690,7 +692,6 @@ static void dpm_complete(pm_message_t state) */ void dpm_resume_end(pm_message_t state) { - might_sleep(); dpm_resume(state); dpm_complete(state); } @@ -732,7 +733,12 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) { int error; - if (dev->type && dev->type->pm) { + if (dev->pwr_domain) { + pm_dev_dbg(dev, state, "LATE power domain "); + error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); + if (error) + return error; + } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "LATE type "); error = pm_noirq_op(dev, dev->type->pm, state); if (error) @@ -749,11 +755,6 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) return error; } - if (dev->pwr_domain) { - pm_dev_dbg(dev, state, "LATE power domain "); - pm_noirq_op(dev, &dev->pwr_domain->ops, state); - } - return 0; } @@ -841,21 +842,27 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) goto End; } + if (dev->pwr_domain) { + pm_dev_dbg(dev, state, "power domain "); + error = pm_op(dev, &dev->pwr_domain->ops, state); + goto End; + } + if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "type "); error = pm_op(dev, dev->type->pm, state); - goto Domain; + goto End; } if (dev->class) { if (dev->class->pm) { pm_dev_dbg(dev, state, "class "); error = pm_op(dev, dev->class->pm, state); - goto Domain; + goto End; } else if (dev->class->suspend) { pm_dev_dbg(dev, state, "legacy class "); error = legacy_suspend(dev, state, dev->class->suspend); - goto Domain; + goto End; } } @@ -869,12 +876,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } } - Domain: - if (!error && dev->pwr_domain) { - pm_dev_dbg(dev, state, "power domain "); - pm_op(dev, &dev->pwr_domain->ops, state); - } - End: device_unlock(dev); complete_all(&dev->power.completion); @@ -914,11 +915,13 @@ static int device_suspend(struct device *dev) * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. * @state: PM transition of the system being carried out. */ -static int dpm_suspend(pm_message_t state) +int dpm_suspend(pm_message_t state) { ktime_t starttime = ktime_get(); int error = 0; + might_sleep(); + mutex_lock(&dpm_list_mtx); pm_transition = state; async_error = 0; @@ -965,7 +968,14 @@ static int device_prepare(struct device *dev, pm_message_t state) device_lock(dev); - if (dev->type && dev->type->pm) { + if (dev->pwr_domain) { + pm_dev_dbg(dev, state, "preparing power domain "); + if (dev->pwr_domain->ops.prepare) + error = dev->pwr_domain->ops.prepare(dev); + suspend_report_result(dev->pwr_domain->ops.prepare, error); + if (error) + goto End; + } else if (dev->type && dev->type->pm) { pm_dev_dbg(dev, state, "preparing type "); if (dev->type->pm->prepare) error = dev->type->pm->prepare(dev); @@ -984,13 +994,6 @@ static int device_prepare(struct device *dev, pm_message_t state) if (dev->bus->pm->prepare) error = dev->bus->pm->prepare(dev); suspend_report_result(dev->bus->pm->prepare, error); - if (error) - goto End; - } - - if (dev->pwr_domain && dev->pwr_domain->ops.prepare) { - pm_dev_dbg(dev, state, "preparing power domain "); - dev->pwr_domain->ops.prepare(dev); } End: @@ -1005,10 +1008,12 @@ static int device_prepare(struct device *dev, pm_message_t state) * * Execute the ->prepare() callback(s) for all devices. */ -static int dpm_prepare(pm_message_t state) +int dpm_prepare(pm_message_t state) { int error = 0; + might_sleep(); + mutex_lock(&dpm_list_mtx); while (!list_empty(&dpm_list)) { struct device *dev = to_device(dpm_list.next); @@ -1057,7 +1062,6 @@ int dpm_suspend_start(pm_message_t state) { int error; - might_sleep(); error = dpm_prepare(state); if (!error) error = dpm_suspend(state); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 3172c60d23a..0d4587b15c5 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -168,7 +168,6 @@ static int rpm_check_suspend_allowed(struct device *dev) static int rpm_idle(struct device *dev, int rpmflags) { int (*callback)(struct device *); - int (*domain_callback)(struct device *); int retval; retval = rpm_check_suspend_allowed(dev); @@ -214,7 +213,9 @@ static int rpm_idle(struct device *dev, int rpmflags) dev->power.idle_notification = true; - if (dev->type && dev->type->pm) + if (dev->pwr_domain) + callback = dev->pwr_domain->ops.runtime_idle; + else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_idle; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_idle; @@ -223,19 +224,10 @@ static int rpm_idle(struct device *dev, int rpmflags) else callback = NULL; - if (dev->pwr_domain) - domain_callback = dev->pwr_domain->ops.runtime_idle; - else - domain_callback = NULL; - - if (callback || domain_callback) { + if (callback) { spin_unlock_irq(&dev->power.lock); - if (domain_callback) - retval = domain_callback(dev); - - if (!retval && callback) - callback(dev); + callback(dev); spin_lock_irq(&dev->power.lock); } @@ -382,7 +374,9 @@ static int rpm_suspend(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_SUSPENDING); - if (dev->type && dev->type->pm) + if (dev->pwr_domain) + callback = dev->pwr_domain->ops.runtime_suspend; + else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_suspend; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_suspend; @@ -400,8 +394,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) else pm_runtime_cancel_pending(dev); } else { - if (dev->pwr_domain) - rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev); no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -582,9 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags) __update_runtime_status(dev, RPM_RESUMING); if (dev->pwr_domain) - rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); - - if (dev->type && dev->type->pm) + callback = dev->pwr_domain->ops.runtime_resume; + else if (dev->type && dev->type->pm) callback = dev->type->pm->runtime_resume; else if (dev->class && dev->class->pm) callback = dev->class->pm->runtime_resume; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index fff49bee781..a9f5b897961 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -212,8 +212,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, autosuspend_delay_ms_store); -#endif +#endif /* CONFIG_PM_RUNTIME */ +#ifdef CONFIG_PM_SLEEP static ssize_t wake_show(struct device * dev, struct device_attribute *attr, char * buf) { @@ -248,7 +249,6 @@ wake_store(struct device * dev, struct device_attribute *attr, static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); -#ifdef CONFIG_PM_SLEEP static ssize_t wakeup_count_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index abbbd33e8d8..84f7c7d5a09 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -110,7 +110,6 @@ void wakeup_source_add(struct wakeup_source *ws) spin_lock_irq(&events_lock); list_add_rcu(&ws->entry, &wakeup_sources); spin_unlock_irq(&events_lock); - synchronize_rcu(); } EXPORT_SYMBOL_GPL(wakeup_source_add); diff --git a/drivers/base/sys.c b/drivers/base/sys.c index acde9b5ee13..9dff77bfe1e 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c @@ -328,203 +328,8 @@ void sysdev_unregister(struct sys_device *sysdev) kobject_put(&sysdev->kobj); } - -#ifndef CONFIG_ARCH_NO_SYSDEV_OPS -/** - * sysdev_shutdown - Shut down all system devices. - * - * Loop over each class of system devices, and the devices in each - * of those classes. For each device, we call the shutdown method for - * each driver registered for the device - the auxiliaries, - * and the class driver. - * - * Note: The list is iterated in reverse order, so that we shut down - * child devices before we shut down their parents. The list ordering - * is guaranteed by virtue of the fact that child devices are registered - * after their parents. - */ -void sysdev_shutdown(void) -{ - struct sysdev_class *cls; - - pr_debug("Shutting Down System Devices\n"); - - mutex_lock(&sysdev_drivers_lock); - list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { - struct sys_device *sysdev; - - pr_debug("Shutting down type '%s':\n", - kobject_name(&cls->kset.kobj)); - - list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { - struct sysdev_driver *drv; - pr_debug(" %s\n", kobject_name(&sysdev->kobj)); - - /* Call auxiliary drivers first */ - list_for_each_entry(drv, &cls->drivers, entry) { - if (drv->shutdown) - drv->shutdown(sysdev); - } - - /* Now call the generic one */ - if (cls->shutdown) - cls->shutdown(sysdev); - } - } - mutex_unlock(&sysdev_drivers_lock); -} - -static void __sysdev_resume(struct sys_device *dev) -{ - struct sysdev_class *cls = dev->cls; - struct sysdev_driver *drv; - - /* First, call the class-specific one */ - if (cls->resume) - cls->resume(dev); - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", cls->resume); - - /* Call auxiliary drivers next. */ - list_for_each_entry(drv, &cls->drivers, entry) { - if (drv->resume) - drv->resume(dev); - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", drv->resume); - } -} - -/** - * sysdev_suspend - Suspend all system devices. - * @state: Power state to enter. - * - * We perform an almost identical operation as sysdev_shutdown() - * above, though calling ->suspend() instead. Interrupts are disabled - * when this called. Devices are responsible for both saving state and - * quiescing or powering down the device. - * - * This is only called by the device PM core, so we let them handle - * all synchronization. - */ -int sysdev_suspend(pm_message_t state) -{ - struct sysdev_class *cls; - struct sys_device *sysdev, *err_dev; - struct sysdev_driver *drv, *err_drv; - int ret; - - pr_debug("Checking wake-up interrupts\n"); - - /* Return error code if there are any wake-up interrupts pending */ - ret = check_wakeup_irqs(); - if (ret) - return ret; - - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled while suspending system devices\n"); - - pr_debug("Suspending System Devices\n"); - - list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { - pr_debug("Suspending type '%s':\n", - kobject_name(&cls->kset.kobj)); - - list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { - pr_debug(" %s\n", kobject_name(&sysdev->kobj)); - - /* Call auxiliary drivers first */ - list_for_each_entry(drv, &cls->drivers, entry) { - if (drv->suspend) { - ret = drv->suspend(sysdev, state); - if (ret) - goto aux_driver; - } - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", - drv->suspend); - } - - /* Now call the generic one */ - if (cls->suspend) { - ret = cls->suspend(sysdev, state); - if (ret) - goto cls_driver; - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled after %pF\n", - cls->suspend); - } - } - } - return 0; - /* resume current sysdev */ -cls_driver: - drv = NULL; - printk(KERN_ERR "Class suspend failed for %s: %d\n", - kobject_name(&sysdev->kobj), ret); - -aux_driver: - if (drv) - printk(KERN_ERR "Class driver suspend failed for %s: %d\n", - kobject_name(&sysdev->kobj), ret); - list_for_each_entry(err_drv, &cls->drivers, entry) { - if (err_drv == drv) - break; - if (err_drv->resume) - err_drv->resume(sysdev); - } - - /* resume other sysdevs in current class */ - list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { - if (err_dev == sysdev) - break; - pr_debug(" %s\n", kobject_name(&err_dev->kobj)); - __sysdev_resume(err_dev); - } - - /* resume other classes */ - list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) { - list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { - pr_debug(" %s\n", kobject_name(&err_dev->kobj)); - __sysdev_resume(err_dev); - } - } - return ret; -} -EXPORT_SYMBOL_GPL(sysdev_suspend); - -/** - * sysdev_resume - Bring system devices back to life. - * - * Similar to sysdev_suspend(), but we iterate the list forwards - * to guarantee that parent devices are resumed before their children. - * - * Note: Interrupts are disabled when called. - */ -int sysdev_resume(void) -{ - struct sysdev_class *cls; - - WARN_ONCE(!irqs_disabled(), - "Interrupts enabled while resuming system devices\n"); - - pr_debug("Resuming System Devices\n"); - - list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { - struct sys_device *sysdev; - - pr_debug("Resuming type '%s':\n", - kobject_name(&cls->kset.kobj)); - - list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { - pr_debug(" %s\n", kobject_name(&sysdev->kobj)); - - __sysdev_resume(sysdev); - } - } - return 0; -} -EXPORT_SYMBOL_GPL(sysdev_resume); -#endif /* CONFIG_ARCH_NO_SYSDEV_OPS */ +EXPORT_SYMBOL_GPL(sysdev_register); +EXPORT_SYMBOL_GPL(sysdev_unregister); int __init system_bus_init(void) { @@ -534,9 +339,6 @@ int __init system_bus_init(void) return 0; } -EXPORT_SYMBOL_GPL(sysdev_register); -EXPORT_SYMBOL_GPL(sysdev_unregister); - #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) ssize_t sysdev_store_ulong(struct sys_device *sysdev, diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 8066d086578..e086fbbbe85 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -2547,7 +2547,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) disk->major = MajorNumber; disk->first_minor = n << DAC960_MaxPartitionsBits; disk->fops = &DAC960_BlockDeviceOperations; - disk->events = DISK_EVENT_MEDIA_CHANGE; } /* Indicate the Block Device Registration completed successfully, diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 456c0cc90dc..8eba86bba59 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1736,7 +1736,6 @@ static int __init fd_probe_drives(void) disk->major = FLOPPY_MAJOR; disk->first_minor = drive; disk->fops = &floppy_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; sprintf(disk->disk_name, "fd%d", drive); disk->private_data = &unit[drive]; set_capacity(disk, 880*2); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index c871eae1412..ede16c64ff0 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1964,7 +1964,6 @@ static int __init atari_floppy_init (void) unit[i].disk->first_minor = i; sprintf(unit[i].disk->disk_name, "fd%d", i); unit[i].disk->fops = &floppy_fops; - unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE; unit[i].disk->private_data = &unit[i]; unit[i].disk->queue = blk_init_queue(do_fd_request, &ataflop_lock); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 301d7a9a41a..db8f88586c8 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4205,7 +4205,6 @@ static int __init floppy_init(void) disks[dr]->major = FLOPPY_MAJOR; disks[dr]->first_minor = TOMINOR(dr); disks[dr]->fops = &floppy_fops; - disks[dr]->events = DISK_EVENT_MEDIA_CHANGE; sprintf(disks[dr]->disk_name, "fd%d", dr); init_timer(&motor_off_timer[dr]); diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 2f2ccf68625..8690e31d993 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -320,7 +320,6 @@ static void pcd_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, cd->name); /* umm... */ disk->fops = &pcd_bdops; - disk->events = DISK_EVENT_MEDIA_CHANGE; } } diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 21dfdb77686..869e7676d46 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -837,7 +837,6 @@ static void pd_probe_drive(struct pd_unit *disk) p->fops = &pd_fops; p->major = major; p->first_minor = (disk - pd) << PD_BITS; - p->events = DISK_EVENT_MEDIA_CHANGE; disk->gd = p; p->private_data = disk; p->queue = pd_queue; diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index 7adeb1edbf4..f21b520ef41 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -294,7 +294,6 @@ static void __init pf_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, pf->name); disk->fops = &pf_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; if (!(*drives[unit])[D_PRT]) pf_drive_count++; } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3e904717c1c..9712fad82bc 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -92,6 +92,8 @@ struct rbd_client { struct list_head node; }; +struct rbd_req_coll; + /* * a single io request */ @@ -100,6 +102,24 @@ struct rbd_request { struct bio *bio; /* cloned bio */ struct page **pages; /* list of used pages */ u64 len; + int coll_index; + struct rbd_req_coll *coll; +}; + +struct rbd_req_status { + int done; + int rc; + u64 bytes; +}; + +/* + * a collection of requests + */ +struct rbd_req_coll { + int total; + int num_done; + struct kref kref; + struct rbd_req_status status[0]; }; struct rbd_snap { @@ -416,6 +436,17 @@ static void rbd_put_client(struct rbd_device *rbd_dev) rbd_dev->client = NULL; } +/* + * Destroy requests collection + */ +static void rbd_coll_release(struct kref *kref) +{ + struct rbd_req_coll *coll = + container_of(kref, struct rbd_req_coll, kref); + + dout("rbd_coll_release %p\n", coll); + kfree(coll); +} /* * Create a new header structure, translate header format from the on-disk @@ -590,6 +621,14 @@ static u64 rbd_get_segment(struct rbd_image_header *header, return len; } +static int rbd_get_num_segments(struct rbd_image_header *header, + u64 ofs, u64 len) +{ + u64 start_seg = ofs >> header->obj_order; + u64 end_seg = (ofs + len - 1) >> header->obj_order; + return end_seg - start_seg + 1; +} + /* * bio helpers */ @@ -735,6 +774,50 @@ static void rbd_destroy_ops(struct ceph_osd_req_op *ops) kfree(ops); } +static void rbd_coll_end_req_index(struct request *rq, + struct rbd_req_coll *coll, + int index, + int ret, u64 len) +{ + struct request_queue *q; + int min, max, i; + + dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n", + coll, index, ret, len); + + if (!rq) + return; + + if (!coll) { + blk_end_request(rq, ret, len); + return; + } + + q = rq->q; + + spin_lock_irq(q->queue_lock); + coll->status[index].done = 1; + coll->status[index].rc = ret; + coll->status[index].bytes = len; + max = min = coll->num_done; + while (max < coll->total && coll->status[max].done) + max++; + + for (i = min; i<max; i++) { + __blk_end_request(rq, coll->status[i].rc, + coll->status[i].bytes); + coll->num_done++; + kref_put(&coll->kref, rbd_coll_release); + } + spin_unlock_irq(q->queue_lock); +} + +static void rbd_coll_end_req(struct rbd_request *req, + int ret, u64 len) +{ + rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len); +} + /* * Send ceph osd request */ @@ -749,6 +832,8 @@ static int rbd_do_request(struct request *rq, int flags, struct ceph_osd_req_op *ops, int num_reply, + struct rbd_req_coll *coll, + int coll_index, void (*rbd_cb)(struct ceph_osd_request *req, struct ceph_msg *msg), struct ceph_osd_request **linger_req, @@ -763,12 +848,20 @@ static int rbd_do_request(struct request *rq, struct ceph_osd_request_head *reqhead; struct rbd_image_header *header = &dev->header; - ret = -ENOMEM; req_data = kzalloc(sizeof(*req_data), GFP_NOIO); - if (!req_data) - goto done; + if (!req_data) { + if (coll) + rbd_coll_end_req_index(rq, coll, coll_index, + -ENOMEM, len); + return -ENOMEM; + } - dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs); + if (coll) { + req_data->coll = coll; + req_data->coll_index = coll_index; + } + + dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs); down_read(&header->snap_rwsem); @@ -828,7 +921,8 @@ static int rbd_do_request(struct request *rq, ret = ceph_osdc_wait_request(&dev->client->osdc, req); if (ver) *ver = le64_to_cpu(req->r_reassert_version.version); - dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version)); + dout("reassert_ver=%lld\n", + le64_to_cpu(req->r_reassert_version.version)); ceph_osdc_put_request(req); } return ret; @@ -837,10 +931,8 @@ done_err: bio_chain_put(req_data->bio); ceph_osdc_put_request(req); done_pages: + rbd_coll_end_req(req_data, ret, len); kfree(req_data); -done: - if (rq) - blk_end_request(rq, ret, len); return ret; } @@ -874,7 +966,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg) bytes = req_data->len; } - blk_end_request(req_data->rq, rc, bytes); + rbd_coll_end_req(req_data, rc, bytes); if (req_data->bio) bio_chain_put(req_data->bio); @@ -934,6 +1026,7 @@ static int rbd_req_sync_op(struct rbd_device *dev, flags, ops, 2, + NULL, 0, NULL, linger_req, ver); if (ret < 0) @@ -959,7 +1052,9 @@ static int rbd_do_op(struct request *rq, u64 snapid, int opcode, int flags, int num_reply, u64 ofs, u64 len, - struct bio *bio) + struct bio *bio, + struct rbd_req_coll *coll, + int coll_index) { char *seg_name; u64 seg_ofs; @@ -995,7 +1090,10 @@ static int rbd_do_op(struct request *rq, flags, ops, num_reply, + coll, coll_index, rbd_req_cb, 0, NULL); + + rbd_destroy_ops(ops); done: kfree(seg_name); return ret; @@ -1008,13 +1106,15 @@ static int rbd_req_write(struct request *rq, struct rbd_device *rbd_dev, struct ceph_snap_context *snapc, u64 ofs, u64 len, - struct bio *bio) + struct bio *bio, + struct rbd_req_coll *coll, + int coll_index) { return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, 2, - ofs, len, bio); + ofs, len, bio, coll, coll_index); } /* @@ -1024,14 +1124,16 @@ static int rbd_req_read(struct request *rq, struct rbd_device *rbd_dev, u64 snapid, u64 ofs, u64 len, - struct bio *bio) + struct bio *bio, + struct rbd_req_coll *coll, + int coll_index) { return rbd_do_op(rq, rbd_dev, NULL, (snapid ? snapid : CEPH_NOSNAP), CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, 2, - ofs, len, bio); + ofs, len, bio, coll, coll_index); } /* @@ -1063,7 +1165,9 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, { struct ceph_osd_req_op *ops; struct page **pages = NULL; - int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); + int ret; + + ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0); if (ret < 0) return ret; @@ -1077,6 +1181,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, CEPH_OSD_FLAG_READ, ops, 1, + NULL, 0, rbd_simple_req_cb, 0, NULL); rbd_destroy_ops(ops); @@ -1274,6 +1379,20 @@ static int rbd_req_sync_exec(struct rbd_device *dev, return ret; } +static struct rbd_req_coll *rbd_alloc_coll(int num_reqs) +{ + struct rbd_req_coll *coll = + kzalloc(sizeof(struct rbd_req_coll) + + sizeof(struct rbd_req_status) * num_reqs, + GFP_ATOMIC); + + if (!coll) + return NULL; + coll->total = num_reqs; + kref_init(&coll->kref); + return coll; +} + /* * block device queue callback */ @@ -1291,6 +1410,8 @@ static void rbd_rq_fn(struct request_queue *q) bool do_write; int size, op_size = 0; u64 ofs; + int num_segs, cur_seg = 0; + struct rbd_req_coll *coll; /* peek at request from block layer */ if (!rq) @@ -1321,6 +1442,14 @@ static void rbd_rq_fn(struct request_queue *q) do_write ? "write" : "read", size, blk_rq_pos(rq) * 512ULL); + num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); + coll = rbd_alloc_coll(num_segs); + if (!coll) { + spin_lock_irq(q->queue_lock); + __blk_end_request_all(rq, -ENOMEM); + goto next; + } + do { /* a bio clone to be passed down to OSD req */ dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt); @@ -1328,35 +1457,41 @@ static void rbd_rq_fn(struct request_queue *q) rbd_dev->header.block_name, ofs, size, NULL, NULL); + kref_get(&coll->kref); bio = bio_chain_clone(&rq_bio, &next_bio, &bp, op_size, GFP_ATOMIC); if (!bio) { - spin_lock_irq(q->queue_lock); - __blk_end_request_all(rq, -ENOMEM); - goto next; + rbd_coll_end_req_index(rq, coll, cur_seg, + -ENOMEM, op_size); + goto next_seg; } + /* init OSD command: write or read */ if (do_write) rbd_req_write(rq, rbd_dev, rbd_dev->header.snapc, ofs, - op_size, bio); + op_size, bio, + coll, cur_seg); else rbd_req_read(rq, rbd_dev, cur_snap_id(rbd_dev), ofs, - op_size, bio); + op_size, bio, + coll, cur_seg); +next_seg: size -= op_size; ofs += op_size; + cur_seg++; rq_bio = next_bio; } while (size > 0); + kref_put(&coll->kref, rbd_coll_release); if (bp) bio_pair_release(bp); - spin_lock_irq(q->queue_lock); next: rq = blk_fetch_request(q); diff --git a/drivers/block/swim.c b/drivers/block/swim.c index 24a482f2fbd..fd5adcd5594 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -858,7 +858,6 @@ static int __devinit swim_floppy_init(struct swim_priv *swd) swd->unit[drive].disk->first_minor = drive; sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive); swd->unit[drive].disk->fops = &floppy_fops; - swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE; swd->unit[drive].disk->private_data = &swd->unit[drive]; swd->unit[drive].disk->queue = swd->queue; set_capacity(swd->unit[drive].disk, 2880); diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 4c10f56facb..773bfa79277 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -1163,7 +1163,6 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device disk->major = FLOPPY_MAJOR; disk->first_minor = i; disk->fops = &floppy_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = &floppy_states[i]; disk->queue = swim3_queue; disk->flags |= GENHD_FL_REMOVABLE; diff --git a/drivers/block/ub.c b/drivers/block/ub.c index 68b9430c7cf..0e376d46bdd 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c @@ -2334,7 +2334,6 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) disk->major = UB_MAJOR; disk->first_minor = lun->id * UB_PARTS_PER_LUN; disk->fops = &ub_bd_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = lun; disk->driverfs_dev = &sc->intf->dev; diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 645ff765cd1..6c7fd7db6df 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -1005,7 +1005,6 @@ static int __devinit ace_setup(struct ace_device *ace) ace->gd->major = ace_major; ace->gd->first_minor = ace->id * ACE_NUM_MINORS; ace->gd->fops = &ace_fops; - ace->gd->events = DISK_EVENT_MEDIA_CHANGE; ace->gd->queue = ace->queue; ace->gd->private_data = ace; snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 514dd8efaf7..75fb965b8f7 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -986,6 +986,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t cdinfo(CD_OPEN, "entering cdrom_open\n"); + /* open is event synchronization point, check events first */ + check_disk_change(bdev); + /* if this was a O_NONBLOCK open and we should honor the flags, * do a quick open without drive/disc integrity checks. */ cdi->use_count++; @@ -1012,9 +1015,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n", cdi->name, cdi->use_count); - /* Do this on open. Don't wait for mount, because they might - not be mounting, but opening with O_NONBLOCK */ - check_disk_change(bdev); return 0; err_release: if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) { diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index b2b034fea34..3ceaf006e7f 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c @@ -803,7 +803,6 @@ static int __devinit probe_gdrom(struct platform_device *devptr) goto probe_fail_cdrom_register; } gd.disk->fops = &gdrom_bdops; - gd.disk->events = DISK_EVENT_MEDIA_CHANGE; /* latch on to the interrupt */ err = gdrom_set_interrupt_handlers(); if (err) diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 4e874c5fa60..e427fbe4599 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -626,7 +626,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) gendisk->queue = q; gendisk->fops = &viocd_fops; gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; - gendisk->events = DISK_EVENT_MEDIA_CHANGE; set_capacity(gendisk, 0); gendisk->private_data = d; d->viocd_disk = gendisk; diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 43ac61978d8..ac6739e085e 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -619,15 +619,18 @@ static void __devinit n2rng_driver_version(void) pr_info("%s", version); } +static const struct of_device_id n2rng_match[]; static int __devinit n2rng_probe(struct platform_device *op) { + const struct of_device_id *match; int victoria_falls; int err = -ENOMEM; struct n2rng *np; - if (!op->dev.of_match) + match = of_match_device(n2rng_match, &op->dev); + if (!match) return -EINVAL; - victoria_falls = (op->dev.of_match->data != NULL); + victoria_falls = (match->data != NULL); n2rng_driver_version(); np = kzalloc(sizeof(*np), GFP_KERNEL); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index cc6c9b2546a..64c6b853061 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2554,9 +2554,11 @@ static struct pci_driver ipmi_pci_driver = { }; #endif /* CONFIG_PCI */ +static struct of_device_id ipmi_match[]; static int __devinit ipmi_probe(struct platform_device *dev) { #ifdef CONFIG_OF + const struct of_device_id *match; struct smi_info *info; struct resource resource; const __be32 *regsize, *regspacing, *regshift; @@ -2566,7 +2568,8 @@ static int __devinit ipmi_probe(struct platform_device *dev) dev_info(&dev->dev, "probing via device tree\n"); - if (!dev->dev.of_match) + match = of_match_device(ipmi_match, &dev->dev); + if (!match) return -EINVAL; ret = of_address_to_resource(np, 0, &resource); @@ -2601,7 +2604,7 @@ static int __devinit ipmi_probe(struct platform_device *dev) return -ENOMEM; } - info->si_type = (enum si_type) dev->dev.of_match->data; + info->si_type = (enum si_type) match->data; info->addr_source = SI_DEVICETREE; info->irq_setup = std_irq_setup; diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c index d6412c16385..39ccdeada79 100644 --- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c +++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c @@ -715,13 +715,13 @@ static int __devexit hwicap_remove(struct device *dev) } #ifdef CONFIG_OF -static int __devinit hwicap_of_probe(struct platform_device *op) +static int __devinit hwicap_of_probe(struct platform_device *op, + const struct hwicap_driver_config *config) { struct resource res; const unsigned int *id; const char *family; int rc; - const struct hwicap_driver_config *config = op->dev.of_match->data; const struct config_registers *regs; @@ -751,20 +751,24 @@ static int __devinit hwicap_of_probe(struct platform_device *op) regs); } #else -static inline int hwicap_of_probe(struct platform_device *op) +static inline int hwicap_of_probe(struct platform_device *op, + const struct hwicap_driver_config *config) { return -EINVAL; } #endif /* CONFIG_OF */ +static const struct of_device_id __devinitconst hwicap_of_match[]; static int __devinit hwicap_drv_probe(struct platform_device *pdev) { + const struct of_device_id *match; struct resource *res; const struct config_registers *regs; const char *family; - if (pdev->dev.of_match) - return hwicap_of_probe(pdev); + match = of_match_device(hwicap_of_match, &pdev->dev); + if (match) + return hwicap_of_probe(pdev, match->data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index ca8ee8093d6..9fb84853d8e 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -1,3 +1,5 @@ +menu "CPU Frequency scaling" + config CPU_FREQ bool "CPU Frequency scaling" help @@ -18,19 +20,6 @@ if CPU_FREQ config CPU_FREQ_TABLE tristate -config CPU_FREQ_DEBUG - bool "Enable CPUfreq debugging" - help - Say Y here to enable CPUfreq subsystem (including drivers) - debugging. You will need to activate it via the kernel - command line by passing - cpufreq.debug=<value> - - To get <value>, add - 1 to activate CPUfreq core debugging, - 2 to activate CPUfreq drivers debugging, and - 4 to activate CPUfreq governor debugging - config CPU_FREQ_STAT tristate "CPU frequency translation statistics" select CPU_FREQ_TABLE @@ -190,4 +179,10 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. -endif # CPU_FREQ +menu "x86 CPU frequency scaling drivers" +depends on X86 +source "drivers/cpufreq/Kconfig.x86" +endmenu + +endif +endmenu diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 new file mode 100644 index 00000000000..343f8476048 --- /dev/null +++ b/drivers/cpufreq/Kconfig.x86 @@ -0,0 +1,255 @@ +# +# x86 CPU Frequency scaling drivers +# + +config X86_PCC_CPUFREQ + tristate "Processor Clocking Control interface driver" + depends on ACPI && ACPI_PROCESSOR + help + This driver adds support for the PCC interface. + + For details, take a look at: + <file:Documentation/cpu-freq/pcc-cpufreq.txt>. + + To compile this driver as a module, choose M here: the + module will be called pcc-cpufreq. + + If in doubt, say N. + +config X86_ACPI_CPUFREQ + tristate "ACPI Processor P-States driver" + select CPU_FREQ_TABLE + depends on ACPI_PROCESSOR + help + This driver adds a CPUFreq driver which utilizes the ACPI + Processor Performance States. + This driver also supports Intel Enhanced Speedstep. + + To compile this driver as a module, choose M here: the + module will be called acpi-cpufreq. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config ELAN_CPUFREQ + tristate "AMD Elan SC400 and SC410" + select CPU_FREQ_TABLE + depends on X86_ELAN + ---help--- + This adds the CPUFreq driver for AMD Elan SC400 and SC410 + processors. + + You need to specify the processor maximum speed as boot + parameter: elanfreq=maxspeed (in kHz) or as module + parameter "max_freq". + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config SC520_CPUFREQ + tristate "AMD Elan SC520" + select CPU_FREQ_TABLE + depends on X86_ELAN + ---help--- + This adds the CPUFreq driver for AMD Elan SC520 processor. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + + +config X86_POWERNOW_K6 + tristate "AMD Mobile K6-2/K6-3 PowerNow!" + select CPU_FREQ_TABLE + depends on X86_32 + help + This adds the CPUFreq driver for mobile AMD K6-2+ and mobile + AMD K6-3+ processors. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config X86_POWERNOW_K7 + tristate "AMD Mobile Athlon/Duron PowerNow!" + select CPU_FREQ_TABLE + depends on X86_32 + help + This adds the CPUFreq driver for mobile AMD K7 mobile processors. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config X86_POWERNOW_K7_ACPI + bool + depends on X86_POWERNOW_K7 && ACPI_PROCESSOR + depends on !(X86_POWERNOW_K7 = y && ACPI_PROCESSOR = m) + depends on X86_32 + default y + +config X86_POWERNOW_K8 + tristate "AMD Opteron/Athlon64 PowerNow!" + select CPU_FREQ_TABLE + depends on ACPI && ACPI_PROCESSOR + help + This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors. + + To compile this driver as a module, choose M here: the + module will be called powernow-k8. + + For details, take a look at <file:Documentation/cpu-freq/>. + +config X86_GX_SUSPMOD + tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" + depends on X86_32 && PCI + help + This add the CPUFreq driver for NatSemi Geode processors which + support suspend modulation. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config X86_SPEEDSTEP_CENTRINO + tristate "Intel Enhanced SpeedStep (deprecated)" + select CPU_FREQ_TABLE + select X86_SPEEDSTEP_CENTRINO_TABLE if X86_32 + depends on X86_32 || (X86_64 && ACPI_PROCESSOR) + help + This is deprecated and this functionality is now merged into + acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of + speedstep_centrino. + This adds the CPUFreq driver for Enhanced SpeedStep enabled + mobile CPUs. This means Intel Pentium M (Centrino) CPUs + or 64bit enabled Intel Xeons. + + To compile this driver as a module, choose M here: the + module will be called speedstep-centrino. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config X86_SPEEDSTEP_CENTRINO_TABLE + bool "Built-in tables for Banias CPUs" + depends on X86_32 && X86_SPEEDSTEP_CENTRINO + default y + help + Use built-in tables for Banias CPUs if ACPI encoding + is not available. + + If in doubt, say N. + +config X86_SPEEDSTEP_ICH + tristate "Intel Speedstep on ICH-M chipsets (ioport interface)" + select CPU_FREQ_TABLE + depends on X86_32 + help + This adds the CPUFreq driver for certain mobile Intel Pentium III + (Coppermine), all mobile Intel Pentium III-M (Tualatin) and all + mobile Intel Pentium 4 P4-M on systems which have an Intel ICH2, + ICH3 or ICH4 southbridge. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config X86_SPEEDSTEP_SMI + tristate "Intel SpeedStep on 440BX/ZX/MX chipsets (SMI interface)" + select CPU_FREQ_TABLE + depends on X86_32 && EXPERIMENTAL + help + This adds the CPUFreq driver for certain mobile Intel Pentium III + (Coppermine), all mobile Intel Pentium III-M (Tualatin) + on systems which have an Intel 440BX/ZX/MX southbridge. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config X86_P4_CLOCKMOD + tristate "Intel Pentium 4 clock modulation" + select CPU_FREQ_TABLE + help + This adds the CPUFreq driver for Intel Pentium 4 / XEON + processors. When enabled it will lower CPU temperature by skipping + clocks. + + This driver should be only used in exceptional + circumstances when very low power is needed because it causes severe + slowdowns and noticeable latencies. Normally Speedstep should be used + instead. + + To compile this driver as a module, choose M here: the + module will be called p4-clockmod. + + For details, take a look at <file:Documentation/cpu-freq/>. + + Unless you are absolutely sure say N. + +config X86_CPUFREQ_NFORCE2 + tristate "nVidia nForce2 FSB changing" + depends on X86_32 && EXPERIMENTAL + help + This adds the CPUFreq driver for FSB changing on nVidia nForce2 + platforms. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config X86_LONGRUN + tristate "Transmeta LongRun" + depends on X86_32 + help + This adds the CPUFreq driver for Transmeta Crusoe and Efficeon processors + which support LongRun. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config X86_LONGHAUL + tristate "VIA Cyrix III Longhaul" + select CPU_FREQ_TABLE + depends on X86_32 && ACPI_PROCESSOR + help + This adds the CPUFreq driver for VIA Samuel/CyrixIII, + VIA Cyrix Samuel/C3, VIA Cyrix Ezra and VIA Cyrix Ezra-T + processors. + + For details, take a look at <file:Documentation/cpu-freq/>. + + If in doubt, say N. + +config X86_E_POWERSAVER + tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" + select CPU_FREQ_TABLE + depends on X86_32 && EXPERIMENTAL + help + This adds the CPUFreq driver for VIA C7 processors. However, this driver + does not have any safeguards to prevent operating the CPU out of spec + and is thus considered dangerous. Please use the regular ACPI cpufreq + driver, enabled by CONFIG_X86_ACPI_CPUFREQ. + + If in doubt, say N. + +comment "shared options" + +config X86_SPEEDSTEP_LIB + tristate + default (X86_SPEEDSTEP_ICH || X86_SPEEDSTEP_SMI || X86_P4_CLOCKMOD) + +config X86_SPEEDSTEP_RELAXED_CAP_CHECK + bool "Relaxed speedstep capability checks" + depends on X86_32 && (X86_SPEEDSTEP_SMI || X86_SPEEDSTEP_ICH) + help + Don't perform all checks for a speedstep capable system which would + normally be done. Some ancient or strange systems, though speedstep + capable, don't always indicate that they are speedstep capable. This + option lets the probing code bypass some of those checks if the + parameter "relaxed_check=1" is passed to the module. + diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 71fc3b4173f..c7f1a6f16b6 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -13,3 +13,29 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o # CPUfreq cross-arch helpers obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o +##################################################################################d +# x86 drivers. +# Link order matters. K8 is preferred to ACPI because of firmware bugs in early +# K8 systems. ACPI is preferred to all other hardware-specific drivers. +# speedstep-* is preferred over p4-clockmod. + +obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o +obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o +obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o +obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o +obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o +obj-$(CONFIG_X86_LONGHAUL) += longhaul.o +obj-$(CONFIG_X86_E_POWERSAVER) += e_powersaver.o +obj-$(CONFIG_ELAN_CPUFREQ) += elanfreq.o +obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o +obj-$(CONFIG_X86_LONGRUN) += longrun.o +obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o +obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o +obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o +obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o +obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o +obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o +obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o + +##################################################################################d + diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c new file mode 100644 index 00000000000..4e04e127438 --- /dev/null +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -0,0 +1,773 @@ +/* + * acpi-cpufreq.c - ACPI Processor P-States Driver + * + * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> + * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> + * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/sched.h> +#include <linux/cpufreq.h> +#include <linux/compiler.h> +#include <linux/dmi.h> +#include <linux/slab.h> + +#include <linux/acpi.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/uaccess.h> + +#include <acpi/processor.h> + +#include <asm/msr.h> +#include <asm/processor.h> +#include <asm/cpufeature.h> +#include "mperf.h" + +MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); +MODULE_DESCRIPTION("ACPI Processor P-States Driver"); +MODULE_LICENSE("GPL"); + +enum { + UNDEFINED_CAPABLE = 0, + SYSTEM_INTEL_MSR_CAPABLE, + SYSTEM_IO_CAPABLE, +}; + +#define INTEL_MSR_RANGE (0xffff) + +struct acpi_cpufreq_data { + struct acpi_processor_performance *acpi_data; + struct cpufreq_frequency_table *freq_table; + unsigned int resume; + unsigned int cpu_feature; +}; + +static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); + +/* acpi_perf_data is a pointer to percpu data. */ +static struct acpi_processor_performance __percpu *acpi_perf_data; + +static struct cpufreq_driver acpi_cpufreq_driver; + +static unsigned int acpi_pstate_strict; + +static int check_est_cpu(unsigned int cpuid) +{ + struct cpuinfo_x86 *cpu = &cpu_data(cpuid); + + return cpu_has(cpu, X86_FEATURE_EST); +} + +static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) +{ + struct acpi_processor_performance *perf; + int i; + + perf = data->acpi_data; + + for (i = 0; i < perf->state_count; i++) { + if (value == perf->states[i].status) + return data->freq_table[i].frequency; + } + return 0; +} + +static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) +{ + int i; + struct acpi_processor_performance *perf; + + msr &= INTEL_MSR_RANGE; + perf = data->acpi_data; + + for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (msr == perf->states[data->freq_table[i].index].status) + return data->freq_table[i].frequency; + } + return data->freq_table[0].frequency; +} + +static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) +{ + switch (data->cpu_feature) { + case SYSTEM_INTEL_MSR_CAPABLE: + return extract_msr(val, data); + case SYSTEM_IO_CAPABLE: + return extract_io(val, data); + default: + return 0; + } +} + +struct msr_addr { + u32 reg; +}; + +struct io_addr { + u16 port; + u8 bit_width; +}; + +struct drv_cmd { + unsigned int type; + const struct cpumask *mask; + union { + struct msr_addr msr; + struct io_addr io; + } addr; + u32 val; +}; + +/* Called via smp_call_function_single(), on the target CPU */ +static void do_drv_read(void *_cmd) +{ + struct drv_cmd *cmd = _cmd; + u32 h; + + switch (cmd->type) { + case SYSTEM_INTEL_MSR_CAPABLE: + rdmsr(cmd->addr.msr.reg, cmd->val, h); + break; + case SYSTEM_IO_CAPABLE: + acpi_os_read_port((acpi_io_address)cmd->addr.io.port, + &cmd->val, + (u32)cmd->addr.io.bit_width); + break; + default: + break; + } +} + +/* Called via smp_call_function_many(), on the target CPUs */ +static void do_drv_write(void *_cmd) +{ + struct drv_cmd *cmd = _cmd; + u32 lo, hi; + + switch (cmd->type) { + case SYSTEM_INTEL_MSR_CAPABLE: + rdmsr(cmd->addr.msr.reg, lo, hi); + lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); + wrmsr(cmd->addr.msr.reg, lo, hi); + break; + case SYSTEM_IO_CAPABLE: + acpi_os_write_port((acpi_io_address)cmd->addr.io.port, + cmd->val, + (u32)cmd->addr.io.bit_width); + break; + default: + break; + } +} + +static void drv_read(struct drv_cmd *cmd) +{ + int err; + cmd->val = 0; + + err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); + WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ +} + +static void drv_write(struct drv_cmd *cmd) +{ + int this_cpu; + + this_cpu = get_cpu(); + if (cpumask_test_cpu(this_cpu, cmd->mask)) + do_drv_write(cmd); + smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); + put_cpu(); +} + +static u32 get_cur_val(const struct cpumask *mask) +{ + struct acpi_processor_performance *perf; + struct drv_cmd cmd; + + if (unlikely(cpumask_empty(mask))) + return 0; + + switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { + case SYSTEM_INTEL_MSR_CAPABLE: + cmd.type = SYSTEM_INTEL_MSR_CAPABLE; + cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; + break; + case SYSTEM_IO_CAPABLE: + cmd.type = SYSTEM_IO_CAPABLE; + perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; + cmd.addr.io.port = perf->control_register.address; + cmd.addr.io.bit_width = perf->control_register.bit_width; + break; + default: + return 0; + } + + cmd.mask = mask; + drv_read(&cmd); + + pr_debug("get_cur_val = %u\n", cmd.val); + + return cmd.val; +} + +static unsigned int get_cur_freq_on_cpu(unsigned int cpu) +{ + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); + unsigned int freq; + unsigned int cached_freq; + + pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); + + if (unlikely(data == NULL || + data->acpi_data == NULL || data->freq_table == NULL)) { + return 0; + } + + cached_freq = data->freq_table[data->acpi_data->state].frequency; + freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); + if (freq != cached_freq) { + /* + * The dreaded BIOS frequency change behind our back. + * Force set the frequency on next target call. + */ + data->resume = 1; + } + + pr_debug("cur freq = %u\n", freq); + + return freq; +} + +static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, + struct acpi_cpufreq_data *data) +{ + unsigned int cur_freq; + unsigned int i; + + for (i = 0; i < 100; i++) { + cur_freq = extract_freq(get_cur_val(mask), data); + if (cur_freq == freq) + return 1; + udelay(10); + } + return 0; +} + +static int acpi_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); + struct acpi_processor_performance *perf; + struct cpufreq_freqs freqs; + struct drv_cmd cmd; + unsigned int next_state = 0; /* Index into freq_table */ + unsigned int next_perf_state = 0; /* Index into perf table */ + unsigned int i; + int result = 0; + + pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); + + if (unlikely(data == NULL || + data->acpi_data == NULL || data->freq_table == NULL)) { + return -ENODEV; + } + + perf = data->acpi_data; + result = cpufreq_frequency_table_target(policy, + data->freq_table, + target_freq, + relation, &next_state); + if (unlikely(result)) { + result = -ENODEV; + goto out; + } + + next_perf_state = data->freq_table[next_state].index; + if (perf->state == next_perf_state) { + if (unlikely(data->resume)) { + pr_debug("Called after resume, resetting to P%d\n", + next_perf_state); + data->resume = 0; + } else { + pr_debug("Already at target state (P%d)\n", + next_perf_state); + goto out; + } + } + + switch (data->cpu_feature) { + case SYSTEM_INTEL_MSR_CAPABLE: + cmd.type = SYSTEM_INTEL_MSR_CAPABLE; + cmd.addr.msr.reg = MSR_IA32_PERF_CTL; + cmd.val = (u32) perf->states[next_perf_state].control; + break; + case SYSTEM_IO_CAPABLE: + cmd.type = SYSTEM_IO_CAPABLE; + cmd.addr.io.port = perf->control_register.address; + cmd.addr.io.bit_width = perf->control_register.bit_width; + cmd.val = (u32) perf->states[next_perf_state].control; + break; + default: + result = -ENODEV; + goto out; + } + + /* cpufreq holds the hotplug lock, so we are safe from here on */ + if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) + cmd.mask = policy->cpus; + else + cmd.mask = cpumask_of(policy->cpu); + + freqs.old = perf->states[perf->state].core_frequency * 1000; + freqs.new = data->freq_table[next_state].frequency; + for_each_cpu(i, policy->cpus) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } + + drv_write(&cmd); + + if (acpi_pstate_strict) { + if (!check_freqs(cmd.mask, freqs.new, data)) { + pr_debug("acpi_cpufreq_target failed (%d)\n", + policy->cpu); + result = -EAGAIN; + goto out; + } + } + + for_each_cpu(i, policy->cpus) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + perf->state = next_perf_state; + +out: + return result; +} + +static int acpi_cpufreq_verify(struct cpufreq_policy *policy) +{ + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); + + pr_debug("acpi_cpufreq_verify\n"); + + return cpufreq_frequency_table_verify(policy, data->freq_table); +} + +static unsigned long +acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) +{ + struct acpi_processor_performance *perf = data->acpi_data; + + if (cpu_khz) { + /* search the closest match to cpu_khz */ + unsigned int i; + unsigned long freq; + unsigned long freqn = perf->states[0].core_frequency * 1000; + + for (i = 0; i < (perf->state_count-1); i++) { + freq = freqn; + freqn = perf->states[i+1].core_frequency * 1000; + if ((2 * cpu_khz) > (freqn + freq)) { + perf->state = i; + return freq; + } + } + perf->state = perf->state_count-1; + return freqn; + } else { + /* assume CPU is at P0... */ + perf->state = 0; + return perf->states[0].core_frequency * 1000; + } +} + +static void free_acpi_perf_data(void) +{ + unsigned int i; + + /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ + for_each_possible_cpu(i) + free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) + ->shared_cpu_map); + free_percpu(acpi_perf_data); +} + +/* + * acpi_cpufreq_early_init - initialize ACPI P-States library + * + * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) + * in order to determine correct frequency and voltage pairings. We can + * do _PDC and _PSD and find out the processor dependency for the + * actual init that will happen later... + */ +static int __init acpi_cpufreq_early_init(void) +{ + unsigned int i; + pr_debug("acpi_cpufreq_early_init\n"); + + acpi_perf_data = alloc_percpu(struct acpi_processor_performance); + if (!acpi_perf_data) { + pr_debug("Memory allocation error for acpi_perf_data.\n"); + return -ENOMEM; + } + for_each_possible_cpu(i) { + if (!zalloc_cpumask_var_node( + &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, + GFP_KERNEL, cpu_to_node(i))) { + + /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ + free_acpi_perf_data(); + return -ENOMEM; + } + } + + /* Do initialization in ACPI core */ + acpi_processor_preregister_performance(acpi_perf_data); + return 0; +} + +#ifdef CONFIG_SMP +/* + * Some BIOSes do SW_ANY coordination internally, either set it up in hw + * or do it in BIOS firmware and won't inform about it to OS. If not + * detected, this has a side effect of making CPU run at a different speed + * than OS intended it to run at. Detect it and handle it cleanly. + */ +static int bios_with_sw_any_bug; + +static int sw_any_bug_found(const struct dmi_system_id *d) +{ + bios_with_sw_any_bug = 1; + return 0; +} + +static const struct dmi_system_id sw_any_bug_dmi_table[] = { + { + .callback = sw_any_bug_found, + .ident = "Supermicro Server X6DLP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), + DMI_MATCH(DMI_BIOS_VERSION, "080010"), + DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), + }, + }, + { } +}; + +static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) +{ + /* Intel Xeon Processor 7100 Series Specification Update + * http://www.intel.com/Assets/PDF/specupdate/314554.pdf + * AL30: A Machine Check Exception (MCE) Occurring during an + * Enhanced Intel SpeedStep Technology Ratio Change May Cause + * Both Processor Cores to Lock Up. */ + if (c->x86_vendor == X86_VENDOR_INTEL) { + if ((c->x86 == 15) && + (c->x86_model == 6) && + (c->x86_mask == 8)) { + printk(KERN_INFO "acpi-cpufreq: Intel(R) " + "Xeon(R) 7100 Errata AL30, processors may " + "lock up on frequency changes: disabling " + "acpi-cpufreq.\n"); + return -ENODEV; + } + } + return 0; +} +#endif + +static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int i; + unsigned int valid_states = 0; + unsigned int cpu = policy->cpu; + struct acpi_cpufreq_data *data; + unsigned int result = 0; + struct cpuinfo_x86 *c = &cpu_data(policy->cpu); + struct acpi_processor_performance *perf; +#ifdef CONFIG_SMP + static int blacklisted; +#endif + + pr_debug("acpi_cpufreq_cpu_init\n"); + +#ifdef CONFIG_SMP + if (blacklisted) + return blacklisted; + blacklisted = acpi_cpufreq_blacklist(c); + if (blacklisted) + return blacklisted; +#endif + + data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); + per_cpu(acfreq_data, cpu) = data; + + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) + acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; + + result = acpi_processor_register_performance(data->acpi_data, cpu); + if (result) + goto err_free; + + perf = data->acpi_data; + policy->shared_type = perf->shared_type; + + /* + * Will let policy->cpus know about dependency only when software + * coordination is required. + */ + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || + policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { + cpumask_copy(policy->cpus, perf->shared_cpu_map); + } + cpumask_copy(policy->related_cpus, perf->shared_cpu_map); + +#ifdef CONFIG_SMP + dmi_check_system(sw_any_bug_dmi_table); + if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) { + policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; + cpumask_copy(policy->cpus, cpu_core_mask(cpu)); + } +#endif + + /* capability check */ + if (perf->state_count <= 1) { + pr_debug("No P-States\n"); + result = -ENODEV; + goto err_unreg; + } + + if (perf->control_register.space_id != perf->status_register.space_id) { + result = -ENODEV; + goto err_unreg; + } + + switch (perf->control_register.space_id) { + case ACPI_ADR_SPACE_SYSTEM_IO: + pr_debug("SYSTEM IO addr space\n"); + data->cpu_feature = SYSTEM_IO_CAPABLE; + break; + case ACPI_ADR_SPACE_FIXED_HARDWARE: + pr_debug("HARDWARE addr space\n"); + if (!check_est_cpu(cpu)) { + result = -ENODEV; + goto err_unreg; + } + data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; + break; + default: + pr_debug("Unknown addr space %d\n", + (u32) (perf->control_register.space_id)); + result = -ENODEV; + goto err_unreg; + } + + data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * + (perf->state_count+1), GFP_KERNEL); + if (!data->freq_table) { + result = -ENOMEM; + goto err_unreg; + } + + /* detect transition latency */ + policy->cpuinfo.transition_latency = 0; + for (i = 0; i < perf->state_count; i++) { + if ((perf->states[i].transition_latency * 1000) > + policy->cpuinfo.transition_latency) + policy->cpuinfo.transition_latency = + perf->states[i].transition_latency * 1000; + } + + /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ + if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && + policy->cpuinfo.transition_latency > 20 * 1000) { + policy->cpuinfo.transition_latency = 20 * 1000; + printk_once(KERN_INFO + "P-state transition latency capped at 20 uS\n"); + } + + /* table init */ + for (i = 0; i < perf->state_count; i++) { + if (i > 0 && perf->states[i].core_frequency >= + data->freq_table[valid_states-1].frequency / 1000) + continue; + + data->freq_table[valid_states].index = i; + data->freq_table[valid_states].frequency = + perf->states[i].core_frequency * 1000; + valid_states++; + } + data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; + perf->state = 0; + + result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); + if (result) + goto err_freqfree; + + if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) + printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); + + switch (perf->control_register.space_id) { + case ACPI_ADR_SPACE_SYSTEM_IO: + /* Current speed is unknown and not detectable by IO port */ + policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); + break; + case ACPI_ADR_SPACE_FIXED_HARDWARE: + acpi_cpufreq_driver.get = get_cur_freq_on_cpu; + policy->cur = get_cur_freq_on_cpu(cpu); + break; + default: + break; + } + + /* notify BIOS that we exist */ + acpi_processor_notify_smm(THIS_MODULE); + + /* Check for APERF/MPERF support in hardware */ + if (cpu_has(c, X86_FEATURE_APERFMPERF)) + acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; + + pr_debug("CPU%u - ACPI performance management activated.\n", cpu); + for (i = 0; i < perf->state_count; i++) + pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", + (i == perf->state ? '*' : ' '), i, + (u32) perf->states[i].core_frequency, + (u32) perf->states[i].power, + (u32) perf->states[i].transition_latency); + + cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); + + /* + * the first call to ->target() should result in us actually + * writing something to the appropriate registers. + */ + data->resume = 1; + + return result; + +err_freqfree: + kfree(data->freq_table); +err_unreg: + acpi_processor_unregister_performance(perf, cpu); +err_free: + kfree(data); + per_cpu(acfreq_data, cpu) = NULL; + + return result; +} + +static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); + + pr_debug("acpi_cpufreq_cpu_exit\n"); + + if (data) { + cpufreq_frequency_table_put_attr(policy->cpu); + per_cpu(acfreq_data, policy->cpu) = NULL; + acpi_processor_unregister_performance(data->acpi_data, + policy->cpu); + kfree(data->freq_table); + kfree(data); + } + + return 0; +} + +static int acpi_cpufreq_resume(struct cpufreq_policy *policy) +{ + struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); + + pr_debug("acpi_cpufreq_resume\n"); + + data->resume = 1; + + return 0; +} + +static struct freq_attr *acpi_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver acpi_cpufreq_driver = { + .verify = acpi_cpufreq_verify, + .target = acpi_cpufreq_target, + .bios_limit = acpi_processor_get_bios_limit, + .init = acpi_cpufreq_cpu_init, + .exit = acpi_cpufreq_cpu_exit, + .resume = acpi_cpufreq_resume, + .name = "acpi-cpufreq", + .owner = THIS_MODULE, + .attr = acpi_cpufreq_attr, +}; + +static int __init acpi_cpufreq_init(void) +{ + int ret; + + if (acpi_disabled) + return 0; + + pr_debug("acpi_cpufreq_init\n"); + + ret = acpi_cpufreq_early_init(); + if (ret) + return ret; + + ret = cpufreq_register_driver(&acpi_cpufreq_driver); + if (ret) + free_acpi_perf_data(); + + return ret; +} + +static void __exit acpi_cpufreq_exit(void) +{ + pr_debug("acpi_cpufreq_exit\n"); + + cpufreq_unregister_driver(&acpi_cpufreq_driver); + + free_percpu(acpi_perf_data); +} + +module_param(acpi_pstate_strict, uint, 0644); +MODULE_PARM_DESC(acpi_pstate_strict, + "value 0 or non-zero. non-zero -> strict ACPI checks are " + "performed during frequency changes."); + +late_initcall(acpi_cpufreq_init); +module_exit(acpi_cpufreq_exit); + +MODULE_ALIAS("acpi"); diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c new file mode 100644 index 00000000000..7bac808804f --- /dev/null +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -0,0 +1,444 @@ +/* + * (C) 2004-2006 Sebastian Witt <se.witt@gmx.net> + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon reverse engineered information + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/pci.h> +#include <linux/delay.h> + +#define NFORCE2_XTAL 25 +#define NFORCE2_BOOTFSB 0x48 +#define NFORCE2_PLLENABLE 0xa8 +#define NFORCE2_PLLREG 0xa4 +#define NFORCE2_PLLADR 0xa0 +#define NFORCE2_PLL(mul, div) (0x100000 | (mul << 8) | div) + +#define NFORCE2_MIN_FSB 50 +#define NFORCE2_SAFE_DISTANCE 50 + +/* Delay in ms between FSB changes */ +/* #define NFORCE2_DELAY 10 */ + +/* + * nforce2_chipset: + * FSB is changed using the chipset + */ +static struct pci_dev *nforce2_dev; + +/* fid: + * multiplier * 10 + */ +static int fid; + +/* min_fsb, max_fsb: + * minimum and maximum FSB (= FSB at boot time) + */ +static int min_fsb; +static int max_fsb; + +MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>"); +MODULE_DESCRIPTION("nForce2 FSB changing cpufreq driver"); +MODULE_LICENSE("GPL"); + +module_param(fid, int, 0444); +module_param(min_fsb, int, 0444); + +MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)"); +MODULE_PARM_DESC(min_fsb, + "Minimum FSB to use, if not defined: current FSB - 50"); + +#define PFX "cpufreq-nforce2: " + +/** + * nforce2_calc_fsb - calculate FSB + * @pll: PLL value + * + * Calculates FSB from PLL value + */ +static int nforce2_calc_fsb(int pll) +{ + unsigned char mul, div; + + mul = (pll >> 8) & 0xff; + div = pll & 0xff; + + if (div > 0) + return NFORCE2_XTAL * mul / div; + + return 0; +} + +/** + * nforce2_calc_pll - calculate PLL value + * @fsb: FSB + * + * Calculate PLL value for given FSB + */ +static int nforce2_calc_pll(unsigned int fsb) +{ + unsigned char xmul, xdiv; + unsigned char mul = 0, div = 0; + int tried = 0; + + /* Try to calculate multiplier and divider up to 4 times */ + while (((mul == 0) || (div == 0)) && (tried <= 3)) { + for (xdiv = 2; xdiv <= 0x80; xdiv++) + for (xmul = 1; xmul <= 0xfe; xmul++) + if (nforce2_calc_fsb(NFORCE2_PLL(xmul, xdiv)) == + fsb + tried) { + mul = xmul; + div = xdiv; + } + tried++; + } + + if ((mul == 0) || (div == 0)) + return -1; + + return NFORCE2_PLL(mul, div); +} + +/** + * nforce2_write_pll - write PLL value to chipset + * @pll: PLL value + * + * Writes new FSB PLL value to chipset + */ +static void nforce2_write_pll(int pll) +{ + int temp; + + /* Set the pll addr. to 0x00 */ + pci_write_config_dword(nforce2_dev, NFORCE2_PLLADR, 0); + + /* Now write the value in all 64 registers */ + for (temp = 0; temp <= 0x3f; temp++) + pci_write_config_dword(nforce2_dev, NFORCE2_PLLREG, pll); + + return; +} + +/** + * nforce2_fsb_read - Read FSB + * + * Read FSB from chipset + * If bootfsb != 0, return FSB at boot-time + */ +static unsigned int nforce2_fsb_read(int bootfsb) +{ + struct pci_dev *nforce2_sub5; + u32 fsb, temp = 0; + + /* Get chipset boot FSB from subdevice 5 (FSB at boot-time) */ + nforce2_sub5 = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, 0x01EF, + PCI_ANY_ID, PCI_ANY_ID, NULL); + if (!nforce2_sub5) + return 0; + + pci_read_config_dword(nforce2_sub5, NFORCE2_BOOTFSB, &fsb); + fsb /= 1000000; + + /* Check if PLL register is already set */ + pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp); + + if (bootfsb || !temp) + return fsb; + + /* Use PLL register FSB value */ + pci_read_config_dword(nforce2_dev, NFORCE2_PLLREG, &temp); + fsb = nforce2_calc_fsb(temp); + + return fsb; +} + +/** + * nforce2_set_fsb - set new FSB + * @fsb: New FSB + * + * Sets new FSB + */ +static int nforce2_set_fsb(unsigned int fsb) +{ + u32 temp = 0; + unsigned int tfsb; + int diff; + int pll = 0; + + if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) { + printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb); + return -EINVAL; + } + + tfsb = nforce2_fsb_read(0); + if (!tfsb) { + printk(KERN_ERR PFX "Error while reading the FSB\n"); + return -EINVAL; + } + + /* First write? Then set actual value */ + pci_read_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8 *)&temp); + if (!temp) { + pll = nforce2_calc_pll(tfsb); + + if (pll < 0) + return -EINVAL; + + nforce2_write_pll(pll); + } + + /* Enable write access */ + temp = 0x01; + pci_write_config_byte(nforce2_dev, NFORCE2_PLLENABLE, (u8)temp); + + diff = tfsb - fsb; + + if (!diff) + return 0; + + while ((tfsb != fsb) && (tfsb <= max_fsb) && (tfsb >= min_fsb)) { + if (diff < 0) + tfsb++; + else + tfsb--; + + /* Calculate the PLL reg. value */ + pll = nforce2_calc_pll(tfsb); + if (pll == -1) + return -EINVAL; + + nforce2_write_pll(pll); +#ifdef NFORCE2_DELAY + mdelay(NFORCE2_DELAY); +#endif + } + + temp = 0x40; + pci_write_config_byte(nforce2_dev, NFORCE2_PLLADR, (u8)temp); + + return 0; +} + +/** + * nforce2_get - get the CPU frequency + * @cpu: CPU number + * + * Returns the CPU frequency + */ +static unsigned int nforce2_get(unsigned int cpu) +{ + if (cpu) + return 0; + return nforce2_fsb_read(0) * fid * 100; +} + +/** + * nforce2_target - set a new CPUFreq policy + * @policy: new policy + * @target_freq: the target frequency + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * + * Sets a new CPUFreq policy. + */ +static int nforce2_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ +/* unsigned long flags; */ + struct cpufreq_freqs freqs; + unsigned int target_fsb; + + if ((target_freq > policy->max) || (target_freq < policy->min)) + return -EINVAL; + + target_fsb = target_freq / (fid * 100); + + freqs.old = nforce2_get(policy->cpu); + freqs.new = target_fsb * fid * 100; + freqs.cpu = 0; /* Only one CPU on nForce2 platforms */ + + if (freqs.old == freqs.new) + return 0; + + pr_debug("Old CPU frequency %d kHz, new %d kHz\n", + freqs.old, freqs.new); + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* Disable IRQs */ + /* local_irq_save(flags); */ + + if (nforce2_set_fsb(target_fsb) < 0) + printk(KERN_ERR PFX "Changing FSB to %d failed\n", + target_fsb); + else + pr_debug("Changed FSB successfully to %d\n", + target_fsb); + + /* Enable IRQs */ + /* local_irq_restore(flags); */ + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + +/** + * nforce2_verify - verifies a new CPUFreq policy + * @policy: new policy + */ +static int nforce2_verify(struct cpufreq_policy *policy) +{ + unsigned int fsb_pol_max; + + fsb_pol_max = policy->max / (fid * 100); + + if (policy->min < (fsb_pol_max * fid * 100)) + policy->max = (fsb_pol_max + 1) * fid * 100; + + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + return 0; +} + +static int nforce2_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int fsb; + unsigned int rfid; + + /* capability check */ + if (policy->cpu != 0) + return -ENODEV; + + /* Get current FSB */ + fsb = nforce2_fsb_read(0); + + if (!fsb) + return -EIO; + + /* FIX: Get FID from CPU */ + if (!fid) { + if (!cpu_khz) { + printk(KERN_WARNING PFX + "cpu_khz not set, can't calculate multiplier!\n"); + return -ENODEV; + } + + fid = cpu_khz / (fsb * 100); + rfid = fid % 5; + + if (rfid) { + if (rfid > 2) + fid += 5 - rfid; + else + fid -= rfid; + } + } + + printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb, + fid / 10, fid % 10); + + /* Set maximum FSB to FSB at boot time */ + max_fsb = nforce2_fsb_read(1); + + if (!max_fsb) + return -EIO; + + if (!min_fsb) + min_fsb = max_fsb - NFORCE2_SAFE_DISTANCE; + + if (min_fsb < NFORCE2_MIN_FSB) + min_fsb = NFORCE2_MIN_FSB; + + /* cpuinfo and default policy values */ + policy->cpuinfo.min_freq = min_fsb * fid * 100; + policy->cpuinfo.max_freq = max_fsb * fid * 100; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + policy->cur = nforce2_get(policy->cpu); + policy->min = policy->cpuinfo.min_freq; + policy->max = policy->cpuinfo.max_freq; + + return 0; +} + +static int nforce2_cpu_exit(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct cpufreq_driver nforce2_driver = { + .name = "nforce2", + .verify = nforce2_verify, + .target = nforce2_target, + .get = nforce2_get, + .init = nforce2_cpu_init, + .exit = nforce2_cpu_exit, + .owner = THIS_MODULE, +}; + +/** + * nforce2_detect_chipset - detect the Southbridge which contains FSB PLL logic + * + * Detects nForce2 A2 and C1 stepping + * + */ +static int nforce2_detect_chipset(void) +{ + nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, + PCI_DEVICE_ID_NVIDIA_NFORCE2, + PCI_ANY_ID, PCI_ANY_ID, NULL); + + if (nforce2_dev == NULL) + return -ENODEV; + + printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n", + nforce2_dev->revision); + printk(KERN_INFO PFX + "FSB changing is maybe unstable and can lead to " + "crashes and data loss.\n"); + + return 0; +} + +/** + * nforce2_init - initializes the nForce2 CPUFreq driver + * + * Initializes the nForce2 FSB support. Returns -ENODEV on unsupported + * devices, -EINVAL on problems during initiatization, and zero on + * success. + */ +static int __init nforce2_init(void) +{ + /* TODO: do we need to detect the processor? */ + + /* detect chipset */ + if (nforce2_detect_chipset()) { + printk(KERN_INFO PFX "No nForce2 chipset.\n"); + return -ENODEV; + } + + return cpufreq_register_driver(&nforce2_driver); +} + +/** + * nforce2_exit - unregisters cpufreq module + * + * Unregisters nForce2 FSB change support. + */ +static void __exit nforce2_exit(void) +{ + cpufreq_unregister_driver(&nforce2_driver); +} + +module_init(nforce2_init); +module_exit(nforce2_exit); + diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2dafc5c38ae..0a5bea9e358 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -32,9 +32,6 @@ #include <trace/events/power.h> -#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ - "cpufreq-core", msg) - /** * The "cpufreq driver" - the arch- or hardware-dependent low * level driver of CPUFreq support, and its spinlock. This lock @@ -181,93 +178,6 @@ EXPORT_SYMBOL_GPL(cpufreq_cpu_put); /********************************************************************* - * UNIFIED DEBUG HELPERS * - *********************************************************************/ -#ifdef CONFIG_CPU_FREQ_DEBUG - -/* what part(s) of the CPUfreq subsystem are debugged? */ -static unsigned int debug; - -/* is the debug output ratelimit'ed using printk_ratelimit? User can - * set or modify this value. - */ -static unsigned int debug_ratelimit = 1; - -/* is the printk_ratelimit'ing enabled? It's enabled after a successful - * loading of a cpufreq driver, temporarily disabled when a new policy - * is set, and disabled upon cpufreq driver removal - */ -static unsigned int disable_ratelimit = 1; -static DEFINE_SPINLOCK(disable_ratelimit_lock); - -static void cpufreq_debug_enable_ratelimit(void) -{ - unsigned long flags; - - spin_lock_irqsave(&disable_ratelimit_lock, flags); - if (disable_ratelimit) - disable_ratelimit--; - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); -} - -static void cpufreq_debug_disable_ratelimit(void) -{ - unsigned long flags; - - spin_lock_irqsave(&disable_ratelimit_lock, flags); - disable_ratelimit++; - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); -} - -void cpufreq_debug_printk(unsigned int type, const char *prefix, - const char *fmt, ...) -{ - char s[256]; - va_list args; - unsigned int len; - unsigned long flags; - - WARN_ON(!prefix); - if (type & debug) { - spin_lock_irqsave(&disable_ratelimit_lock, flags); - if (!disable_ratelimit && debug_ratelimit - && !printk_ratelimit()) { - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); - return; - } - spin_unlock_irqrestore(&disable_ratelimit_lock, flags); - - len = snprintf(s, 256, KERN_DEBUG "%s: ", prefix); - - va_start(args, fmt); - len += vsnprintf(&s[len], (256 - len), fmt, args); - va_end(args); - - printk(s); - - WARN_ON(len < 5); - } -} -EXPORT_SYMBOL(cpufreq_debug_printk); - - -module_param(debug, uint, 0644); -MODULE_PARM_DESC(debug, "CPUfreq debugging: add 1 to debug core," - " 2 to debug drivers, and 4 to debug governors."); - -module_param(debug_ratelimit, uint, 0644); -MODULE_PARM_DESC(debug_ratelimit, "CPUfreq debugging:" - " set to 0 to disable ratelimiting."); - -#else /* !CONFIG_CPU_FREQ_DEBUG */ - -static inline void cpufreq_debug_enable_ratelimit(void) { return; } -static inline void cpufreq_debug_disable_ratelimit(void) { return; } - -#endif /* CONFIG_CPU_FREQ_DEBUG */ - - -/********************************************************************* * EXTERNALLY AFFECTING FREQUENCY CHANGES * *********************************************************************/ @@ -291,7 +201,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) if (!l_p_j_ref_freq) { l_p_j_ref = loops_per_jiffy; l_p_j_ref_freq = ci->old; - dprintk("saving %lu as reference value for loops_per_jiffy; " + pr_debug("saving %lu as reference value for loops_per_jiffy; " "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); } if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || @@ -299,7 +209,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, ci->new); - dprintk("scaling loops_per_jiffy to %lu " + pr_debug("scaling loops_per_jiffy to %lu " "for frequency %u kHz\n", loops_per_jiffy, ci->new); } } @@ -326,7 +236,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) BUG_ON(irqs_disabled()); freqs->flags = cpufreq_driver->flags; - dprintk("notification %u of frequency transition to %u kHz\n", + pr_debug("notification %u of frequency transition to %u kHz\n", state, freqs->new); policy = per_cpu(cpufreq_cpu_data, freqs->cpu); @@ -340,7 +250,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { if ((policy) && (policy->cpu == freqs->cpu) && (policy->cur) && (policy->cur != freqs->old)) { - dprintk("Warning: CPU frequency is" + pr_debug("Warning: CPU frequency is" " %u, cpufreq assumed %u kHz.\n", freqs->old, policy->cur); freqs->old = policy->cur; @@ -353,7 +263,7 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) case CPUFREQ_POSTCHANGE: adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); - dprintk("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, + pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_power_frequency(POWER_PSTATE, freqs->new, freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); @@ -411,21 +321,14 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, t = __find_governor(str_governor); if (t == NULL) { - char *name = kasprintf(GFP_KERNEL, "cpufreq_%s", - str_governor); - - if (name) { - int ret; + int ret; - mutex_unlock(&cpufreq_governor_mutex); - ret = request_module("%s", name); - mutex_lock(&cpufreq_governor_mutex); + mutex_unlock(&cpufreq_governor_mutex); + ret = request_module("cpufreq_%s", str_governor); + mutex_lock(&cpufreq_governor_mutex); - if (ret == 0) - t = __find_governor(str_governor); - } - - kfree(name); + if (ret == 0) + t = __find_governor(str_governor); } if (t != NULL) { @@ -753,7 +656,7 @@ no_policy: static void cpufreq_sysfs_release(struct kobject *kobj) { struct cpufreq_policy *policy = to_policy(kobj); - dprintk("last reference is dropped\n"); + pr_debug("last reference is dropped\n"); complete(&policy->kobj_unregister); } @@ -788,7 +691,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); if (gov) { policy->governor = gov; - dprintk("Restoring governor %s for cpu %d\n", + pr_debug("Restoring governor %s for cpu %d\n", policy->governor->name, cpu); } #endif @@ -824,7 +727,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu, per_cpu(cpufreq_cpu_data, cpu) = managed_policy; spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - dprintk("CPU already managed, adding link\n"); + pr_debug("CPU already managed, adding link\n"); ret = sysfs_create_link(&sys_dev->kobj, &managed_policy->kobj, "cpufreq"); @@ -865,7 +768,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, if (!cpu_online(j)) continue; - dprintk("CPU %u already managed, adding link\n", j); + pr_debug("CPU %u already managed, adding link\n", j); managed_policy = cpufreq_cpu_get(cpu); cpu_sys_dev = get_cpu_sysdev(j); ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, @@ -941,7 +844,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu, policy->user_policy.governor = policy->governor; if (ret) { - dprintk("setting policy failed\n"); + pr_debug("setting policy failed\n"); if (cpufreq_driver->exit) cpufreq_driver->exit(policy); } @@ -977,8 +880,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) if (cpu_is_offline(cpu)) return 0; - cpufreq_debug_disable_ratelimit(); - dprintk("adding CPU %u\n", cpu); + pr_debug("adding CPU %u\n", cpu); #ifdef CONFIG_SMP /* check whether a different CPU already registered this @@ -986,7 +888,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) policy = cpufreq_cpu_get(cpu); if (unlikely(policy)) { cpufreq_cpu_put(policy); - cpufreq_debug_enable_ratelimit(); return 0; } #endif @@ -1037,7 +938,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) */ ret = cpufreq_driver->init(policy); if (ret) { - dprintk("initialization failed\n"); + pr_debug("initialization failed\n"); goto err_unlock_policy; } policy->user_policy.min = policy->min; @@ -1063,8 +964,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) kobject_uevent(&policy->kobj, KOBJ_ADD); module_put(cpufreq_driver->owner); - dprintk("initialization complete\n"); - cpufreq_debug_enable_ratelimit(); + pr_debug("initialization complete\n"); return 0; @@ -1088,7 +988,6 @@ err_free_policy: nomem_out: module_put(cpufreq_driver->owner); module_out: - cpufreq_debug_enable_ratelimit(); return ret; } @@ -1112,15 +1011,13 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) unsigned int j; #endif - cpufreq_debug_disable_ratelimit(); - dprintk("unregistering CPU %u\n", cpu); + pr_debug("unregistering CPU %u\n", cpu); spin_lock_irqsave(&cpufreq_driver_lock, flags); data = per_cpu(cpufreq_cpu_data, cpu); if (!data) { spin_unlock_irqrestore(&cpufreq_driver_lock, flags); - cpufreq_debug_enable_ratelimit(); unlock_policy_rwsem_write(cpu); return -EINVAL; } @@ -1132,12 +1029,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) * only need to unlink, put and exit */ if (unlikely(cpu != data->cpu)) { - dprintk("removing link\n"); + pr_debug("removing link\n"); cpumask_clear_cpu(cpu, data->cpus); spin_unlock_irqrestore(&cpufreq_driver_lock, flags); kobj = &sys_dev->kobj; cpufreq_cpu_put(data); - cpufreq_debug_enable_ratelimit(); unlock_policy_rwsem_write(cpu); sysfs_remove_link(kobj, "cpufreq"); return 0; @@ -1170,7 +1066,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) for_each_cpu(j, data->cpus) { if (j == cpu) continue; - dprintk("removing link for cpu %u\n", j); + pr_debug("removing link for cpu %u\n", j); #ifdef CONFIG_HOTPLUG_CPU strncpy(per_cpu(cpufreq_cpu_governor, j), data->governor->name, CPUFREQ_NAME_LEN); @@ -1199,21 +1095,35 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) * not referenced anymore by anybody before we proceed with * unloading. */ - dprintk("waiting for dropping of refcount\n"); + pr_debug("waiting for dropping of refcount\n"); wait_for_completion(cmp); - dprintk("wait complete\n"); + pr_debug("wait complete\n"); lock_policy_rwsem_write(cpu); if (cpufreq_driver->exit) cpufreq_driver->exit(data); unlock_policy_rwsem_write(cpu); +#ifdef CONFIG_HOTPLUG_CPU + /* when the CPU which is the parent of the kobj is hotplugged + * offline, check for siblings, and create cpufreq sysfs interface + * and symlinks + */ + if (unlikely(cpumask_weight(data->cpus) > 1)) { + /* first sibling now owns the new sysfs dir */ + cpumask_clear_cpu(cpu, data->cpus); + cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus))); + + /* finally remove our own symlink */ + lock_policy_rwsem_write(cpu); + __cpufreq_remove_dev(sys_dev); + } +#endif + free_cpumask_var(data->related_cpus); free_cpumask_var(data->cpus); kfree(data); - per_cpu(cpufreq_cpu_data, cpu) = NULL; - cpufreq_debug_enable_ratelimit(); return 0; } @@ -1239,7 +1149,7 @@ static void handle_update(struct work_struct *work) struct cpufreq_policy *policy = container_of(work, struct cpufreq_policy, update); unsigned int cpu = policy->cpu; - dprintk("handle_update for cpu %u called\n", cpu); + pr_debug("handle_update for cpu %u called\n", cpu); cpufreq_update_policy(cpu); } @@ -1257,7 +1167,7 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, { struct cpufreq_freqs freqs; - dprintk("Warning: CPU frequency out of sync: cpufreq and timing " + pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " "core thinks of %u, is %u kHz.\n", old_freq, new_freq); freqs.cpu = cpu; @@ -1360,7 +1270,7 @@ static int cpufreq_bp_suspend(void) int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; - dprintk("suspending cpu %u\n", cpu); + pr_debug("suspending cpu %u\n", cpu); /* If there's no policy for the boot CPU, we have nothing to do. */ cpu_policy = cpufreq_cpu_get(cpu); @@ -1398,7 +1308,7 @@ static void cpufreq_bp_resume(void) int cpu = smp_processor_id(); struct cpufreq_policy *cpu_policy; - dprintk("resuming cpu %u\n", cpu); + pr_debug("resuming cpu %u\n", cpu); /* If there's no policy for the boot CPU, we have nothing to do. */ cpu_policy = cpufreq_cpu_get(cpu); @@ -1510,7 +1420,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, { int retval = -EINVAL; - dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, + pr_debug("target for CPU %u: %u kHz, relation %u\n", policy->cpu, target_freq, relation); if (cpu_online(policy->cpu) && cpufreq_driver->target) retval = cpufreq_driver->target(policy, target_freq, relation); @@ -1596,7 +1506,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, if (!try_module_get(policy->governor->owner)) return -EINVAL; - dprintk("__cpufreq_governor for CPU %u, event %u\n", + pr_debug("__cpufreq_governor for CPU %u, event %u\n", policy->cpu, event); ret = policy->governor->governor(policy, event); @@ -1697,8 +1607,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, { int ret = 0; - cpufreq_debug_disable_ratelimit(); - dprintk("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, + pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, policy->min, policy->max); memcpy(&policy->cpuinfo, &data->cpuinfo, @@ -1735,19 +1644,19 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, data->min = policy->min; data->max = policy->max; - dprintk("new min and max freqs are %u - %u kHz\n", + pr_debug("new min and max freqs are %u - %u kHz\n", data->min, data->max); if (cpufreq_driver->setpolicy) { data->policy = policy->policy; - dprintk("setting range\n"); + pr_debug("setting range\n"); ret = cpufreq_driver->setpolicy(policy); } else { if (policy->governor != data->governor) { /* save old, working values */ struct cpufreq_governor *old_gov = data->governor; - dprintk("governor switch\n"); + pr_debug("governor switch\n"); /* end old governor */ if (data->governor) @@ -1757,7 +1666,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, data->governor = policy->governor; if (__cpufreq_governor(data, CPUFREQ_GOV_START)) { /* new governor failed, so re-start old one */ - dprintk("starting governor %s failed\n", + pr_debug("starting governor %s failed\n", data->governor->name); if (old_gov) { data->governor = old_gov; @@ -1769,12 +1678,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, } /* might be a policy change, too, so fall through */ } - dprintk("governor: change or update limits\n"); + pr_debug("governor: change or update limits\n"); __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); } error_out: - cpufreq_debug_enable_ratelimit(); return ret; } @@ -1801,7 +1709,7 @@ int cpufreq_update_policy(unsigned int cpu) goto fail; } - dprintk("updating policy for CPU %u\n", cpu); + pr_debug("updating policy for CPU %u\n", cpu); memcpy(&policy, data, sizeof(struct cpufreq_policy)); policy.min = data->user_policy.min; policy.max = data->user_policy.max; @@ -1813,7 +1721,7 @@ int cpufreq_update_policy(unsigned int cpu) if (cpufreq_driver->get) { policy.cur = cpufreq_driver->get(cpu); if (!data->cur) { - dprintk("Driver did not initialize current freq"); + pr_debug("Driver did not initialize current freq"); data->cur = policy.cur; } else { if (data->cur != policy.cur) @@ -1889,7 +1797,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) ((!driver_data->setpolicy) && (!driver_data->target))) return -EINVAL; - dprintk("trying to register driver %s\n", driver_data->name); + pr_debug("trying to register driver %s\n", driver_data->name); if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; @@ -1920,15 +1828,14 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) /* if all ->init() calls failed, unregister */ if (ret) { - dprintk("no CPU initialized for driver %s\n", + pr_debug("no CPU initialized for driver %s\n", driver_data->name); goto err_sysdev_unreg; } } register_hotcpu_notifier(&cpufreq_cpu_notifier); - dprintk("driver %s up and running\n", driver_data->name); - cpufreq_debug_enable_ratelimit(); + pr_debug("driver %s up and running\n", driver_data->name); return 0; err_sysdev_unreg: @@ -1955,14 +1862,10 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) { unsigned long flags; - cpufreq_debug_disable_ratelimit(); - - if (!cpufreq_driver || (driver != cpufreq_driver)) { - cpufreq_debug_enable_ratelimit(); + if (!cpufreq_driver || (driver != cpufreq_driver)) return -EINVAL; - } - dprintk("unregistering driver %s\n", driver->name); + pr_debug("unregistering driver %s\n", driver->name); sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver); unregister_hotcpu_notifier(&cpufreq_cpu_notifier); diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index 7e2e515087f..f13a8a9af6a 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c @@ -15,9 +15,6 @@ #include <linux/cpufreq.h> #include <linux/init.h> -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "performance", msg) - static int cpufreq_governor_performance(struct cpufreq_policy *policy, unsigned int event) @@ -25,7 +22,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy, switch (event) { case CPUFREQ_GOV_START: case CPUFREQ_GOV_LIMITS: - dprintk("setting to %u kHz because of event %u\n", + pr_debug("setting to %u kHz because of event %u\n", policy->max, event); __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H); diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index e6db5faf3eb..4c2eb512f2b 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c @@ -15,16 +15,13 @@ #include <linux/cpufreq.h> #include <linux/init.h> -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "powersave", msg) - static int cpufreq_governor_powersave(struct cpufreq_policy *policy, unsigned int event) { switch (event) { case CPUFREQ_GOV_START: case CPUFREQ_GOV_LIMITS: - dprintk("setting to %u kHz because of event %u\n", + pr_debug("setting to %u kHz because of event %u\n", policy->min, event); __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 00d73fc8e4e..b60a4c26368 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -165,17 +165,27 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) return -1; } +/* should be called late in the CPU removal sequence so that the stats + * memory is still available in case someone tries to use it. + */ static void cpufreq_stats_free_table(unsigned int cpu) { struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); - struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); - if (policy && policy->cpu == cpu) - sysfs_remove_group(&policy->kobj, &stats_attr_group); if (stat) { kfree(stat->time_in_state); kfree(stat); } per_cpu(cpufreq_stats_table, cpu) = NULL; +} + +/* must be called early in the CPU removal sequence (before + * cpufreq_remove_dev) so that policy is still valid. + */ +static void cpufreq_stats_free_sysfs(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); + if (policy && policy->cpu == cpu) + sysfs_remove_group(&policy->kobj, &stats_attr_group); if (policy) cpufreq_cpu_put(policy); } @@ -316,6 +326,9 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, case CPU_ONLINE_FROZEN: cpufreq_update_policy(cpu); break; + case CPU_DOWN_PREPARE: + cpufreq_stats_free_sysfs(cpu); + break; case CPU_DEAD: case CPU_DEAD_FROZEN: cpufreq_stats_free_table(cpu); @@ -324,9 +337,10 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, return NOTIFY_OK; } -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = -{ +/* priority=1 so this will get called before cpufreq_remove_dev */ +static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { .notifier_call = cpufreq_stat_cpu_callback, + .priority = 1, }; static struct notifier_block notifier_policy_block = { diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 66d2d1d6c80..f231015904c 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c @@ -37,9 +37,6 @@ static DEFINE_PER_CPU(unsigned int, cpu_is_managed); static DEFINE_MUTEX(userspace_mutex); static int cpus_using_userspace_governor; -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) - /* keep track of frequency transitions */ static int userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, @@ -50,7 +47,7 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, if (!per_cpu(cpu_is_managed, freq->cpu)) return 0; - dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", + pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", freq->cpu, freq->new); per_cpu(cpu_cur_freq, freq->cpu) = freq->new; @@ -73,7 +70,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) { int ret = -EINVAL; - dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); + pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); mutex_lock(&userspace_mutex); if (!per_cpu(cpu_is_managed, policy->cpu)) @@ -134,7 +131,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, per_cpu(cpu_max_freq, cpu) = policy->max; per_cpu(cpu_cur_freq, cpu) = policy->cur; per_cpu(cpu_set_freq, cpu) = policy->cur; - dprintk("managing cpu %u started " + pr_debug("managing cpu %u started " "(%u - %u kHz, currently %u kHz)\n", cpu, per_cpu(cpu_min_freq, cpu), @@ -156,12 +153,12 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, per_cpu(cpu_min_freq, cpu) = 0; per_cpu(cpu_max_freq, cpu) = 0; per_cpu(cpu_set_freq, cpu) = 0; - dprintk("managing cpu %u stopped\n", cpu); + pr_debug("managing cpu %u stopped\n", cpu); mutex_unlock(&userspace_mutex); break; case CPUFREQ_GOV_LIMITS: mutex_lock(&userspace_mutex); - dprintk("limit event for cpu %u: %u - %u kHz, " + pr_debug("limit event for cpu %u: %u - %u kHz, " "currently %u kHz, last set to %u kHz\n", cpu, policy->min, policy->max, per_cpu(cpu_cur_freq, cpu), diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c new file mode 100644 index 00000000000..35a257dd4bb --- /dev/null +++ b/drivers/cpufreq/e_powersaver.c @@ -0,0 +1,367 @@ +/* + * Based on documentation provided by Dave Jones. Thanks! + * + * Licensed under the terms of the GNU GPL License version 2. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/ioport.h> +#include <linux/slab.h> +#include <linux/timex.h> +#include <linux/io.h> +#include <linux/delay.h> + +#include <asm/msr.h> +#include <asm/tsc.h> + +#define EPS_BRAND_C7M 0 +#define EPS_BRAND_C7 1 +#define EPS_BRAND_EDEN 2 +#define EPS_BRAND_C3 3 +#define EPS_BRAND_C7D 4 + +struct eps_cpu_data { + u32 fsb; + struct cpufreq_frequency_table freq_table[]; +}; + +static struct eps_cpu_data *eps_cpu[NR_CPUS]; + + +static unsigned int eps_get(unsigned int cpu) +{ + struct eps_cpu_data *centaur; + u32 lo, hi; + + if (cpu) + return 0; + centaur = eps_cpu[cpu]; + if (centaur == NULL) + return 0; + + /* Return current frequency */ + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + return centaur->fsb * ((lo >> 8) & 0xff); +} + +static int eps_set_state(struct eps_cpu_data *centaur, + unsigned int cpu, + u32 dest_state) +{ + struct cpufreq_freqs freqs; + u32 lo, hi; + int err = 0; + int i; + + freqs.old = eps_get(cpu); + freqs.new = centaur->fsb * ((dest_state >> 8) & 0xff); + freqs.cpu = cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* Wait while CPU is busy */ + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + i = 0; + while (lo & ((1 << 16) | (1 << 17))) { + udelay(16); + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + i++; + if (unlikely(i > 64)) { + err = -ENODEV; + goto postchange; + } + } + /* Set new multiplier and voltage */ + wrmsr(MSR_IA32_PERF_CTL, dest_state & 0xffff, 0); + /* Wait until transition end */ + i = 0; + do { + udelay(16); + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + i++; + if (unlikely(i > 64)) { + err = -ENODEV; + goto postchange; + } + } while (lo & ((1 << 16) | (1 << 17))); + + /* Return current frequency */ +postchange: + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + freqs.new = centaur->fsb * ((lo >> 8) & 0xff); + +#ifdef DEBUG + { + u8 current_multiplier, current_voltage; + + /* Print voltage and multiplier */ + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + current_voltage = lo & 0xff; + printk(KERN_INFO "eps: Current voltage = %dmV\n", + current_voltage * 16 + 700); + current_multiplier = (lo >> 8) & 0xff; + printk(KERN_INFO "eps: Current multiplier = %d\n", + current_multiplier); + } +#endif + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + return err; +} + +static int eps_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct eps_cpu_data *centaur; + unsigned int newstate = 0; + unsigned int cpu = policy->cpu; + unsigned int dest_state; + int ret; + + if (unlikely(eps_cpu[cpu] == NULL)) + return -ENODEV; + centaur = eps_cpu[cpu]; + + if (unlikely(cpufreq_frequency_table_target(policy, + &eps_cpu[cpu]->freq_table[0], + target_freq, + relation, + &newstate))) { + return -EINVAL; + } + + /* Make frequency transition */ + dest_state = centaur->freq_table[newstate].index & 0xffff; + ret = eps_set_state(centaur, cpu, dest_state); + if (ret) + printk(KERN_ERR "eps: Timeout!\n"); + return ret; +} + +static int eps_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, + &eps_cpu[policy->cpu]->freq_table[0]); +} + +static int eps_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int i; + u32 lo, hi; + u64 val; + u8 current_multiplier, current_voltage; + u8 max_multiplier, max_voltage; + u8 min_multiplier, min_voltage; + u8 brand = 0; + u32 fsb; + struct eps_cpu_data *centaur; + struct cpuinfo_x86 *c = &cpu_data(0); + struct cpufreq_frequency_table *f_table; + int k, step, voltage; + int ret; + int states; + + if (policy->cpu != 0) + return -ENODEV; + + /* Check brand */ + printk(KERN_INFO "eps: Detected VIA "); + + switch (c->x86_model) { + case 10: + rdmsr(0x1153, lo, hi); + brand = (((lo >> 2) ^ lo) >> 18) & 3; + printk(KERN_CONT "Model A "); + break; + case 13: + rdmsr(0x1154, lo, hi); + brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff; + printk(KERN_CONT "Model D "); + break; + } + + switch (brand) { + case EPS_BRAND_C7M: + printk(KERN_CONT "C7-M\n"); + break; + case EPS_BRAND_C7: + printk(KERN_CONT "C7\n"); + break; + case EPS_BRAND_EDEN: + printk(KERN_CONT "Eden\n"); + break; + case EPS_BRAND_C7D: + printk(KERN_CONT "C7-D\n"); + break; + case EPS_BRAND_C3: + printk(KERN_CONT "C3\n"); + return -ENODEV; + break; + } + /* Enable Enhanced PowerSaver */ + rdmsrl(MSR_IA32_MISC_ENABLE, val); + if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { + val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; + wrmsrl(MSR_IA32_MISC_ENABLE, val); + /* Can be locked at 0 */ + rdmsrl(MSR_IA32_MISC_ENABLE, val); + if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { + printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n"); + return -ENODEV; + } + } + + /* Print voltage and multiplier */ + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + current_voltage = lo & 0xff; + printk(KERN_INFO "eps: Current voltage = %dmV\n", + current_voltage * 16 + 700); + current_multiplier = (lo >> 8) & 0xff; + printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier); + + /* Print limits */ + max_voltage = hi & 0xff; + printk(KERN_INFO "eps: Highest voltage = %dmV\n", + max_voltage * 16 + 700); + max_multiplier = (hi >> 8) & 0xff; + printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier); + min_voltage = (hi >> 16) & 0xff; + printk(KERN_INFO "eps: Lowest voltage = %dmV\n", + min_voltage * 16 + 700); + min_multiplier = (hi >> 24) & 0xff; + printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier); + + /* Sanity checks */ + if (current_multiplier == 0 || max_multiplier == 0 + || min_multiplier == 0) + return -EINVAL; + if (current_multiplier > max_multiplier + || max_multiplier <= min_multiplier) + return -EINVAL; + if (current_voltage > 0x1f || max_voltage > 0x1f) + return -EINVAL; + if (max_voltage < min_voltage) + return -EINVAL; + + /* Calc FSB speed */ + fsb = cpu_khz / current_multiplier; + /* Calc number of p-states supported */ + if (brand == EPS_BRAND_C7M) + states = max_multiplier - min_multiplier + 1; + else + states = 2; + + /* Allocate private data and frequency table for current cpu */ + centaur = kzalloc(sizeof(struct eps_cpu_data) + + (states + 1) * sizeof(struct cpufreq_frequency_table), + GFP_KERNEL); + if (!centaur) + return -ENOMEM; + eps_cpu[0] = centaur; + + /* Copy basic values */ + centaur->fsb = fsb; + + /* Fill frequency and MSR value table */ + f_table = ¢aur->freq_table[0]; + if (brand != EPS_BRAND_C7M) { + f_table[0].frequency = fsb * min_multiplier; + f_table[0].index = (min_multiplier << 8) | min_voltage; + f_table[1].frequency = fsb * max_multiplier; + f_table[1].index = (max_multiplier << 8) | max_voltage; + f_table[2].frequency = CPUFREQ_TABLE_END; + } else { + k = 0; + step = ((max_voltage - min_voltage) * 256) + / (max_multiplier - min_multiplier); + for (i = min_multiplier; i <= max_multiplier; i++) { + voltage = (k * step) / 256 + min_voltage; + f_table[k].frequency = fsb * i; + f_table[k].index = (i << 8) | voltage; + k++; + } + f_table[k].frequency = CPUFREQ_TABLE_END; + } + + policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ + policy->cur = fsb * current_multiplier; + + ret = cpufreq_frequency_table_cpuinfo(policy, ¢aur->freq_table[0]); + if (ret) { + kfree(centaur); + return ret; + } + + cpufreq_frequency_table_get_attr(¢aur->freq_table[0], policy->cpu); + return 0; +} + +static int eps_cpu_exit(struct cpufreq_policy *policy) +{ + unsigned int cpu = policy->cpu; + struct eps_cpu_data *centaur; + u32 lo, hi; + + if (eps_cpu[cpu] == NULL) + return -ENODEV; + centaur = eps_cpu[cpu]; + + /* Get max frequency */ + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + /* Set max frequency */ + eps_set_state(centaur, cpu, hi & 0xffff); + /* Bye */ + cpufreq_frequency_table_put_attr(policy->cpu); + kfree(eps_cpu[cpu]); + eps_cpu[cpu] = NULL; + return 0; +} + +static struct freq_attr *eps_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver eps_driver = { + .verify = eps_verify, + .target = eps_target, + .init = eps_cpu_init, + .exit = eps_cpu_exit, + .get = eps_get, + .name = "e_powersaver", + .owner = THIS_MODULE, + .attr = eps_attr, +}; + +static int __init eps_init(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + + /* This driver will work only on Centaur C7 processors with + * Enhanced SpeedStep/PowerSaver registers */ + if (c->x86_vendor != X86_VENDOR_CENTAUR + || c->x86 != 6 || c->x86_model < 10) + return -ENODEV; + if (!cpu_has(c, X86_FEATURE_EST)) + return -ENODEV; + + if (cpufreq_register_driver(&eps_driver)) + return -EINVAL; + return 0; +} + +static void __exit eps_exit(void) +{ + cpufreq_unregister_driver(&eps_driver); +} + +MODULE_AUTHOR("Rafal Bilski <rafalbilski@interia.pl>"); +MODULE_DESCRIPTION("Enhanced PowerSaver driver for VIA C7 CPU's."); +MODULE_LICENSE("GPL"); + +module_init(eps_init); +module_exit(eps_exit); diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c new file mode 100644 index 00000000000..c587db472a7 --- /dev/null +++ b/drivers/cpufreq/elanfreq.c @@ -0,0 +1,309 @@ +/* + * elanfreq: cpufreq driver for the AMD ELAN family + * + * (c) Copyright 2002 Robert Schwebel <r.schwebel@pengutronix.de> + * + * Parts of this code are (c) Sven Geggus <sven@geggus.net> + * + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * 2002-02-13: - initial revision for 2.4.18-pre9 by Robert Schwebel + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> + +#include <linux/delay.h> +#include <linux/cpufreq.h> + +#include <asm/msr.h> +#include <linux/timex.h> +#include <linux/io.h> + +#define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ +#define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ + +/* Module parameter */ +static int max_freq; + +struct s_elan_multiplier { + int clock; /* frequency in kHz */ + int val40h; /* PMU Force Mode register */ + int val80h; /* CPU Clock Speed Register */ +}; + +/* + * It is important that the frequencies + * are listed in ascending order here! + */ +static struct s_elan_multiplier elan_multiplier[] = { + {1000, 0x02, 0x18}, + {2000, 0x02, 0x10}, + {4000, 0x02, 0x08}, + {8000, 0x00, 0x00}, + {16000, 0x00, 0x02}, + {33000, 0x00, 0x04}, + {66000, 0x01, 0x04}, + {99000, 0x01, 0x05} +}; + +static struct cpufreq_frequency_table elanfreq_table[] = { + {0, 1000}, + {1, 2000}, + {2, 4000}, + {3, 8000}, + {4, 16000}, + {5, 33000}, + {6, 66000}, + {7, 99000}, + {0, CPUFREQ_TABLE_END}, +}; + + +/** + * elanfreq_get_cpu_frequency: determine current cpu speed + * + * Finds out at which frequency the CPU of the Elan SOC runs + * at the moment. Frequencies from 1 to 33 MHz are generated + * the normal way, 66 and 99 MHz are called "Hyperspeed Mode" + * and have the rest of the chip running with 33 MHz. + */ + +static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) +{ + u8 clockspeed_reg; /* Clock Speed Register */ + + local_irq_disable(); + outb_p(0x80, REG_CSCIR); + clockspeed_reg = inb_p(REG_CSCDR); + local_irq_enable(); + + if ((clockspeed_reg & 0xE0) == 0xE0) + return 0; + + /* Are we in CPU clock multiplied mode (66/99 MHz)? */ + if ((clockspeed_reg & 0xE0) == 0xC0) { + if ((clockspeed_reg & 0x01) == 0) + return 66000; + else + return 99000; + } + + /* 33 MHz is not 32 MHz... */ + if ((clockspeed_reg & 0xE0) == 0xA0) + return 33000; + + return (1<<((clockspeed_reg & 0xE0) >> 5)) * 1000; +} + + +/** + * elanfreq_set_cpu_frequency: Change the CPU core frequency + * @cpu: cpu number + * @freq: frequency in kHz + * + * This function takes a frequency value and changes the CPU frequency + * according to this. Note that the frequency has to be checked by + * elanfreq_validatespeed() for correctness! + * + * There is no return value. + */ + +static void elanfreq_set_cpu_state(unsigned int state) +{ + struct cpufreq_freqs freqs; + + freqs.old = elanfreq_get_cpu_frequency(0); + freqs.new = elan_multiplier[state].clock; + freqs.cpu = 0; /* elanfreq.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + printk(KERN_INFO "elanfreq: attempting to set frequency to %i kHz\n", + elan_multiplier[state].clock); + + + /* + * Access to the Elan's internal registers is indexed via + * 0x22: Chip Setup & Control Register Index Register (CSCI) + * 0x23: Chip Setup & Control Register Data Register (CSCD) + * + */ + + /* + * 0x40 is the Power Management Unit's Force Mode Register. + * Bit 6 enables Hyperspeed Mode (66/100 MHz core frequency) + */ + + local_irq_disable(); + outb_p(0x40, REG_CSCIR); /* Disable hyperspeed mode */ + outb_p(0x00, REG_CSCDR); + local_irq_enable(); /* wait till internal pipelines and */ + udelay(1000); /* buffers have cleaned up */ + + local_irq_disable(); + + /* now, set the CPU clock speed register (0x80) */ + outb_p(0x80, REG_CSCIR); + outb_p(elan_multiplier[state].val80h, REG_CSCDR); + + /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */ + outb_p(0x40, REG_CSCIR); + outb_p(elan_multiplier[state].val40h, REG_CSCDR); + udelay(10000); + local_irq_enable(); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +}; + + +/** + * elanfreq_validatespeed: test if frequency range is valid + * @policy: the policy to validate + * + * This function checks if a given frequency range in kHz is valid + * for the hardware supported by the driver. + */ + +static int elanfreq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); +} + +static int elanfreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_target(policy, &elanfreq_table[0], + target_freq, relation, &newstate)) + return -EINVAL; + + elanfreq_set_cpu_state(newstate); + + return 0; +} + + +/* + * Module init and exit code + */ + +static int elanfreq_cpu_init(struct cpufreq_policy *policy) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + unsigned int i; + int result; + + /* capability check */ + if ((c->x86_vendor != X86_VENDOR_AMD) || + (c->x86 != 4) || (c->x86_model != 10)) + return -ENODEV; + + /* max freq */ + if (!max_freq) + max_freq = elanfreq_get_cpu_frequency(0); + + /* table init */ + for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { + if (elanfreq_table[i].frequency > max_freq) + elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; + } + + /* cpuinfo and default policy values */ + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + policy->cur = elanfreq_get_cpu_frequency(0); + + result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); + if (result) + return result; + + cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); + return 0; +} + + +static int elanfreq_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + + +#ifndef MODULE +/** + * elanfreq_setup - elanfreq command line parameter parsing + * + * elanfreq command line parameter. Use: + * elanfreq=66000 + * to set the maximum CPU frequency to 66 MHz. Note that in + * case you do not give this boot parameter, the maximum + * frequency will fall back to _current_ CPU frequency which + * might be lower. If you build this as a module, use the + * max_freq module parameter instead. + */ +static int __init elanfreq_setup(char *str) +{ + max_freq = simple_strtoul(str, &str, 0); + printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n"); + return 1; +} +__setup("elanfreq=", elanfreq_setup); +#endif + + +static struct freq_attr *elanfreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + + +static struct cpufreq_driver elanfreq_driver = { + .get = elanfreq_get_cpu_frequency, + .verify = elanfreq_verify, + .target = elanfreq_target, + .init = elanfreq_cpu_init, + .exit = elanfreq_cpu_exit, + .name = "elanfreq", + .owner = THIS_MODULE, + .attr = elanfreq_attr, +}; + + +static int __init elanfreq_init(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + + /* Test if we have the right hardware */ + if ((c->x86_vendor != X86_VENDOR_AMD) || + (c->x86 != 4) || (c->x86_model != 10)) { + printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); + return -ENODEV; + } + return cpufreq_register_driver(&elanfreq_driver); +} + + +static void __exit elanfreq_exit(void) +{ + cpufreq_unregister_driver(&elanfreq_driver); +} + + +module_param(max_freq, int, 0444); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, " + "Sven Geggus <sven@geggus.net>"); +MODULE_DESCRIPTION("cpufreq driver for AMD's Elan CPUs"); + +module_init(elanfreq_init); +module_exit(elanfreq_exit); diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 05432216e22..90431cb9280 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -14,9 +14,6 @@ #include <linux/init.h> #include <linux/cpufreq.h> -#define dprintk(msg...) \ - cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "freq-table", msg) - /********************************************************************* * FREQUENCY TABLE HELPERS * *********************************************************************/ @@ -31,11 +28,11 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { unsigned int freq = table[i].frequency; if (freq == CPUFREQ_ENTRY_INVALID) { - dprintk("table entry %u is invalid, skipping\n", i); + pr_debug("table entry %u is invalid, skipping\n", i); continue; } - dprintk("table entry %u: %u kHz, %u index\n", + pr_debug("table entry %u: %u kHz, %u index\n", i, freq, table[i].index); if (freq < min_freq) min_freq = freq; @@ -61,7 +58,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, unsigned int i; unsigned int count = 0; - dprintk("request for verification of policy (%u - %u kHz) for cpu %u\n", + pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); if (!cpu_online(policy->cpu)) @@ -86,7 +83,7 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); - dprintk("verification lead to (%u - %u kHz) for cpu %u\n", + pr_debug("verification lead to (%u - %u kHz) for cpu %u\n", policy->min, policy->max, policy->cpu); return 0; @@ -110,7 +107,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, }; unsigned int i; - dprintk("request for target %u kHz (relation: %u) for cpu %u\n", + pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", target_freq, relation, policy->cpu); switch (relation) { @@ -167,7 +164,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, } else *index = optimal.index; - dprintk("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, + pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency, table[*index].index); return 0; @@ -216,14 +213,14 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs); void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, unsigned int cpu) { - dprintk("setting show_table for cpu %u to %p\n", cpu, table); + pr_debug("setting show_table for cpu %u to %p\n", cpu, table); per_cpu(cpufreq_show_table, cpu) = table; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_attr); void cpufreq_frequency_table_put_attr(unsigned int cpu) { - dprintk("clearing show_table for cpu %u\n", cpu); + pr_debug("clearing show_table for cpu %u\n", cpu); per_cpu(cpufreq_show_table, cpu) = NULL; } EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c new file mode 100644 index 00000000000..ffe1f2c92ed --- /dev/null +++ b/drivers/cpufreq/gx-suspmod.c @@ -0,0 +1,514 @@ +/* + * Cyrix MediaGX and NatSemi Geode Suspend Modulation + * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com> + * (C) 2002 Hiroshi Miura <miura@da-cha.org> + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation + * + * The author(s) of this software shall not be held liable for damages + * of any nature resulting due to the use of this software. This + * software is provided AS-IS with no warranties. + * + * Theoretical note: + * + * (see Geode(tm) CS5530 manual (rev.4.1) page.56) + * + * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0 + * are based on Suspend Modulation. + * + * Suspend Modulation works by asserting and de-asserting the SUSP# pin + * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# + * the CPU enters an idle state. GX1 stops its core clock when SUSP# is + * asserted then power consumption is reduced. + * + * Suspend Modulation's OFF/ON duration are configurable + * with 'Suspend Modulation OFF Count Register' + * and 'Suspend Modulation ON Count Register'. + * These registers are 8bit counters that represent the number of + * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF) + * to the processor. + * + * These counters define a ratio which is the effective frequency + * of operation of the system. + * + * OFF Count + * F_eff = Fgx * ---------------------- + * OFF Count + ON Count + * + * 0 <= On Count, Off Count <= 255 + * + * From these limits, we can get register values + * + * off_duration + on_duration <= MAX_DURATION + * on_duration = off_duration * (stock_freq - freq) / freq + * + * off_duration = (freq * DURATION) / stock_freq + * on_duration = DURATION - off_duration + * + * + *--------------------------------------------------------------------------- + * + * ChangeLog: + * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org> + * - fix on/off register mistake + * - fix cpu_khz calc when it stops cpu modulation. + * + * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org> + * - rewrite for Cyrix MediaGX Cx5510/5520 and + * NatSemi Geode Cs5530(A). + * + * Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com> + * - cs5530_mod patch for 2.4.19-rc1. + * + *--------------------------------------------------------------------------- + * + * Todo + * Test on machines with 5510, 5530, 5530A + */ + +/************************************************************************ + * Suspend Modulation - Definitions * + ************************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/cpufreq.h> +#include <linux/pci.h> +#include <linux/errno.h> +#include <linux/slab.h> + +#include <asm/processor-cyrix.h> + +/* PCI config registers, all at F0 */ +#define PCI_PMER1 0x80 /* power management enable register 1 */ +#define PCI_PMER2 0x81 /* power management enable register 2 */ +#define PCI_PMER3 0x82 /* power management enable register 3 */ +#define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */ +#define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */ +#define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */ +#define PCI_MODON 0x95 /* suspend modulation ON counter register */ +#define PCI_SUSCFG 0x96 /* suspend configuration register */ + +/* PMER1 bits */ +#define GPM (1<<0) /* global power management */ +#define GIT (1<<1) /* globally enable PM device idle timers */ +#define GTR (1<<2) /* globally enable IO traps */ +#define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */ +#define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */ + +/* SUSCFG bits */ +#define SUSMOD (1<<0) /* enable/disable suspend modulation */ +/* the below is supported only with cs5530 (after rev.1.2)/cs5530A */ +#define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ + /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ +#define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ +/* the below is supported only with cs5530A */ +#define PWRSVE_ISA (1<<3) /* stop ISA clock */ +#define PWRSVE (1<<4) /* active idle */ + +struct gxfreq_params { + u8 on_duration; + u8 off_duration; + u8 pci_suscfg; + u8 pci_pmer1; + u8 pci_pmer2; + struct pci_dev *cs55x0; +}; + +static struct gxfreq_params *gx_params; +static int stock_freq; + +/* PCI bus clock - defaults to 30.000 if cpu_khz is not available */ +static int pci_busclk; +module_param(pci_busclk, int, 0444); + +/* maximum duration for which the cpu may be suspended + * (32us * MAX_DURATION). If no parameter is given, this defaults + * to 255. + * Note that this leads to a maximum of 8 ms(!) where the CPU clock + * is suspended -- processing power is just 0.39% of what it used to be, + * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ +static int max_duration = 255; +module_param(max_duration, int, 0444); + +/* For the default policy, we want at least some processing power + * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV) + */ +#define POLICY_MIN_DIV 20 + + +/** + * we can detect a core multipiler from dir0_lsb + * from GX1 datasheet p.56, + * MULT[3:0]: + * 0000 = SYSCLK multiplied by 4 (test only) + * 0001 = SYSCLK multiplied by 10 + * 0010 = SYSCLK multiplied by 4 + * 0011 = SYSCLK multiplied by 6 + * 0100 = SYSCLK multiplied by 9 + * 0101 = SYSCLK multiplied by 5 + * 0110 = SYSCLK multiplied by 7 + * 0111 = SYSCLK multiplied by 8 + * of 33.3MHz + **/ +static int gx_freq_mult[16] = { + 4, 10, 4, 6, 9, 5, 7, 8, + 0, 0, 0, 0, 0, 0, 0, 0 +}; + + +/**************************************************************** + * Low Level chipset interface * + ****************************************************************/ +static struct pci_device_id gx_chipset_tbl[] __initdata = { + { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY), }, + { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5520), }, + { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5510), }, + { 0, }, +}; + +static void gx_write_byte(int reg, int value) +{ + pci_write_config_byte(gx_params->cs55x0, reg, value); +} + +/** + * gx_detect_chipset: + * + **/ +static __init struct pci_dev *gx_detect_chipset(void) +{ + struct pci_dev *gx_pci = NULL; + + /* check if CPU is a MediaGX or a Geode. */ + if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) && + (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { + pr_debug("error: no MediaGX/Geode processor found!\n"); + return NULL; + } + + /* detect which companion chip is used */ + for_each_pci_dev(gx_pci) { + if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL) + return gx_pci; + } + + pr_debug("error: no supported chipset found!\n"); + return NULL; +} + +/** + * gx_get_cpuspeed: + * + * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi + * Geode CPU runs. + */ +static unsigned int gx_get_cpuspeed(unsigned int cpu) +{ + if ((gx_params->pci_suscfg & SUSMOD) == 0) + return stock_freq; + + return (stock_freq * gx_params->off_duration) + / (gx_params->on_duration + gx_params->off_duration); +} + +/** + * gx_validate_speed: + * determine current cpu speed + * + **/ + +static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, + u8 *off_duration) +{ + unsigned int i; + u8 tmp_on, tmp_off; + int old_tmp_freq = stock_freq; + int tmp_freq; + + *off_duration = 1; + *on_duration = 0; + + for (i = max_duration; i > 0; i--) { + tmp_off = ((khz * i) / stock_freq) & 0xff; + tmp_on = i - tmp_off; + tmp_freq = (stock_freq * tmp_off) / i; + /* if this relation is closer to khz, use this. If it's equal, + * prefer it, too - lower latency */ + if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) { + *on_duration = tmp_on; + *off_duration = tmp_off; + old_tmp_freq = tmp_freq; + } + } + + return old_tmp_freq; +} + + +/** + * gx_set_cpuspeed: + * set cpu speed in khz. + **/ + +static void gx_set_cpuspeed(unsigned int khz) +{ + u8 suscfg, pmer1; + unsigned int new_khz; + unsigned long flags; + struct cpufreq_freqs freqs; + + freqs.cpu = 0; + freqs.old = gx_get_cpuspeed(0); + + new_khz = gx_validate_speed(khz, &gx_params->on_duration, + &gx_params->off_duration); + + freqs.new = new_khz; + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + local_irq_save(flags); + + + + if (new_khz != stock_freq) { + /* if new khz == 100% of CPU speed, it is special case */ + switch (gx_params->cs55x0->device) { + case PCI_DEVICE_ID_CYRIX_5530_LEGACY: + pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP; + /* FIXME: need to test other values -- Zwane,Miura */ + /* typical 2 to 4ms */ + gx_write_byte(PCI_IRQTC, 4); + /* typical 50 to 100ms */ + gx_write_byte(PCI_VIDTC, 100); + gx_write_byte(PCI_PMER1, pmer1); + + if (gx_params->cs55x0->revision < 0x10) { + /* CS5530(rev 1.2, 1.3) */ + suscfg = gx_params->pci_suscfg|SUSMOD; + } else { + /* CS5530A,B.. */ + suscfg = gx_params->pci_suscfg|SUSMOD|PWRSVE; + } + break; + case PCI_DEVICE_ID_CYRIX_5520: + case PCI_DEVICE_ID_CYRIX_5510: + suscfg = gx_params->pci_suscfg | SUSMOD; + break; + default: + local_irq_restore(flags); + pr_debug("fatal: try to set unknown chipset.\n"); + return; + } + } else { + suscfg = gx_params->pci_suscfg & ~(SUSMOD); + gx_params->off_duration = 0; + gx_params->on_duration = 0; + pr_debug("suspend modulation disabled: cpu runs 100%% speed.\n"); + } + + gx_write_byte(PCI_MODOFF, gx_params->off_duration); + gx_write_byte(PCI_MODON, gx_params->on_duration); + + gx_write_byte(PCI_SUSCFG, suscfg); + pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg); + + local_irq_restore(flags); + + gx_params->pci_suscfg = suscfg; + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", + gx_params->on_duration * 32, gx_params->off_duration * 32); + pr_debug("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); +} + +/**************************************************************** + * High level functions * + ****************************************************************/ + +/* + * cpufreq_gx_verify: test if frequency range is valid + * + * This function checks if a given frequency range in kHz is valid + * for the hardware supported by the driver. + */ + +static int cpufreq_gx_verify(struct cpufreq_policy *policy) +{ + unsigned int tmp_freq = 0; + u8 tmp1, tmp2; + + if (!stock_freq || !policy) + return -EINVAL; + + policy->cpu = 0; + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), + stock_freq); + + /* it needs to be assured that at least one supported frequency is + * within policy->min and policy->max. If it is not, policy->max + * needs to be increased until one freuqency is supported. + * policy->min may not be decreased, though. This way we guarantee a + * specific processing capacity. + */ + tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); + if (tmp_freq < policy->min) + tmp_freq += stock_freq / max_duration; + policy->min = tmp_freq; + if (policy->min > policy->max) + policy->max = tmp_freq; + tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2); + if (tmp_freq > policy->max) + tmp_freq -= stock_freq / max_duration; + policy->max = tmp_freq; + if (policy->max < policy->min) + policy->max = policy->min; + cpufreq_verify_within_limits(policy, (stock_freq / max_duration), + stock_freq); + + return 0; +} + +/* + * cpufreq_gx_target: + * + */ +static int cpufreq_gx_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + u8 tmp1, tmp2; + unsigned int tmp_freq; + + if (!stock_freq || !policy) + return -EINVAL; + + policy->cpu = 0; + + tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2); + while (tmp_freq < policy->min) { + tmp_freq += stock_freq / max_duration; + tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); + } + while (tmp_freq > policy->max) { + tmp_freq -= stock_freq / max_duration; + tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); + } + + gx_set_cpuspeed(tmp_freq); + + return 0; +} + +static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int maxfreq, curfreq; + + if (!policy || policy->cpu != 0) + return -ENODEV; + + /* determine maximum frequency */ + if (pci_busclk) + maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; + else if (cpu_khz) + maxfreq = cpu_khz; + else + maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; + + stock_freq = maxfreq; + curfreq = gx_get_cpuspeed(0); + + pr_debug("cpu max frequency is %d.\n", maxfreq); + pr_debug("cpu current frequency is %dkHz.\n", curfreq); + + /* setup basic struct for cpufreq API */ + policy->cpu = 0; + + if (max_duration < POLICY_MIN_DIV) + policy->min = maxfreq / max_duration; + else + policy->min = maxfreq / POLICY_MIN_DIV; + policy->max = maxfreq; + policy->cur = curfreq; + policy->cpuinfo.min_freq = maxfreq / max_duration; + policy->cpuinfo.max_freq = maxfreq; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + + return 0; +} + +/* + * cpufreq_gx_init: + * MediaGX/Geode GX initialize cpufreq driver + */ +static struct cpufreq_driver gx_suspmod_driver = { + .get = gx_get_cpuspeed, + .verify = cpufreq_gx_verify, + .target = cpufreq_gx_target, + .init = cpufreq_gx_cpu_init, + .name = "gx-suspmod", + .owner = THIS_MODULE, +}; + +static int __init cpufreq_gx_init(void) +{ + int ret; + struct gxfreq_params *params; + struct pci_dev *gx_pci; + + /* Test if we have the right hardware */ + gx_pci = gx_detect_chipset(); + if (gx_pci == NULL) + return -ENODEV; + + /* check whether module parameters are sane */ + if (max_duration > 0xff) + max_duration = 0xff; + + pr_debug("geode suspend modulation available.\n"); + + params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); + if (params == NULL) + return -ENOMEM; + + params->cs55x0 = gx_pci; + gx_params = params; + + /* keep cs55x0 configurations */ + pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg)); + pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1)); + pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); + pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); + pci_read_config_byte(params->cs55x0, PCI_MODOFF, + &(params->off_duration)); + + ret = cpufreq_register_driver(&gx_suspmod_driver); + if (ret) { + kfree(params); + return ret; /* register error! */ + } + + return 0; +} + +static void __exit cpufreq_gx_exit(void) +{ + cpufreq_unregister_driver(&gx_suspmod_driver); + pci_dev_put(gx_params->cs55x0); + kfree(gx_params); +} + +MODULE_AUTHOR("Hiroshi Miura <miura@da-cha.org>"); +MODULE_DESCRIPTION("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); +MODULE_LICENSE("GPL"); + +module_init(cpufreq_gx_init); +module_exit(cpufreq_gx_exit); + diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c new file mode 100644 index 00000000000..f47d26e2a13 --- /dev/null +++ b/drivers/cpufreq/longhaul.c @@ -0,0 +1,1024 @@ +/* + * (C) 2001-2004 Dave Jones. <davej@redhat.com> + * (C) 2002 Padraig Brady. <padraig@antefacto.com> + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon datasheets & sample CPUs kindly provided by VIA. + * + * VIA have currently 3 different versions of Longhaul. + * Version 1 (Longhaul) uses the BCR2 MSR at 0x1147. + * It is present only in Samuel 1 (C5A), Samuel 2 (C5B) stepping 0. + * Version 2 of longhaul is backward compatible with v1, but adds + * LONGHAUL MSR for purpose of both frequency and voltage scaling. + * Present in Samuel 2 (steppings 1-7 only) (C5B), and Ezra (C5C). + * Version 3 of longhaul got renamed to Powersaver and redesigned + * to use only the POWERSAVER MSR at 0x110a. + * It is present in Ezra-T (C5M), Nehemiah (C5X) and above. + * It's pretty much the same feature wise to longhaul v2, though + * there is provision for scaling FSB too, but this doesn't work + * too well in practice so we don't even try to use this. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/delay.h> +#include <linux/timex.h> +#include <linux/io.h> +#include <linux/acpi.h> + +#include <asm/msr.h> +#include <acpi/processor.h> + +#include "longhaul.h" + +#define PFX "longhaul: " + +#define TYPE_LONGHAUL_V1 1 +#define TYPE_LONGHAUL_V2 2 +#define TYPE_POWERSAVER 3 + +#define CPU_SAMUEL 1 +#define CPU_SAMUEL2 2 +#define CPU_EZRA 3 +#define CPU_EZRA_T 4 +#define CPU_NEHEMIAH 5 +#define CPU_NEHEMIAH_C 6 + +/* Flags */ +#define USE_ACPI_C3 (1 << 1) +#define USE_NORTHBRIDGE (1 << 2) + +static int cpu_model; +static unsigned int numscales = 16; +static unsigned int fsb; + +static const struct mV_pos *vrm_mV_table; +static const unsigned char *mV_vrm_table; + +static unsigned int highest_speed, lowest_speed; /* kHz */ +static unsigned int minmult, maxmult; +static int can_scale_voltage; +static struct acpi_processor *pr; +static struct acpi_processor_cx *cx; +static u32 acpi_regs_addr; +static u8 longhaul_flags; +static unsigned int longhaul_index; + +/* Module parameters */ +static int scale_voltage; +static int disable_acpi_c3; +static int revid_errata; + + +/* Clock ratios multiplied by 10 */ +static int mults[32]; +static int eblcr[32]; +static int longhaul_version; +static struct cpufreq_frequency_table *longhaul_table; + +static char speedbuffer[8]; + +static char *print_speed(int speed) +{ + if (speed < 1000) { + snprintf(speedbuffer, sizeof(speedbuffer), "%dMHz", speed); + return speedbuffer; + } + + if (speed%1000 == 0) + snprintf(speedbuffer, sizeof(speedbuffer), + "%dGHz", speed/1000); + else + snprintf(speedbuffer, sizeof(speedbuffer), + "%d.%dGHz", speed/1000, (speed%1000)/100); + + return speedbuffer; +} + + +static unsigned int calc_speed(int mult) +{ + int khz; + khz = (mult/10)*fsb; + if (mult%10) + khz += fsb/2; + khz *= 1000; + return khz; +} + + +static int longhaul_get_cpu_mult(void) +{ + unsigned long invalue = 0, lo, hi; + + rdmsr(MSR_IA32_EBL_CR_POWERON, lo, hi); + invalue = (lo & (1<<22|1<<23|1<<24|1<<25))>>22; + if (longhaul_version == TYPE_LONGHAUL_V2 || + longhaul_version == TYPE_POWERSAVER) { + if (lo & (1<<27)) + invalue += 16; + } + return eblcr[invalue]; +} + +/* For processor with BCR2 MSR */ + +static void do_longhaul1(unsigned int mults_index) +{ + union msr_bcr2 bcr2; + + rdmsrl(MSR_VIA_BCR2, bcr2.val); + /* Enable software clock multiplier */ + bcr2.bits.ESOFTBF = 1; + bcr2.bits.CLOCKMUL = mults_index & 0xff; + + /* Sync to timer tick */ + safe_halt(); + /* Change frequency on next halt or sleep */ + wrmsrl(MSR_VIA_BCR2, bcr2.val); + /* Invoke transition */ + ACPI_FLUSH_CPU_CACHE(); + halt(); + + /* Disable software clock multiplier */ + local_irq_disable(); + rdmsrl(MSR_VIA_BCR2, bcr2.val); + bcr2.bits.ESOFTBF = 0; + wrmsrl(MSR_VIA_BCR2, bcr2.val); +} + +/* For processor with Longhaul MSR */ + +static void do_powersaver(int cx_address, unsigned int mults_index, + unsigned int dir) +{ + union msr_longhaul longhaul; + u32 t; + + rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); + /* Setup new frequency */ + if (!revid_errata) + longhaul.bits.RevisionKey = longhaul.bits.RevisionID; + else + longhaul.bits.RevisionKey = 0; + longhaul.bits.SoftBusRatio = mults_index & 0xf; + longhaul.bits.SoftBusRatio4 = (mults_index & 0x10) >> 4; + /* Setup new voltage */ + if (can_scale_voltage) + longhaul.bits.SoftVID = (mults_index >> 8) & 0x1f; + /* Sync to timer tick */ + safe_halt(); + /* Raise voltage if necessary */ + if (can_scale_voltage && dir) { + longhaul.bits.EnableSoftVID = 1; + wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + /* Change voltage */ + if (!cx_address) { + ACPI_FLUSH_CPU_CACHE(); + halt(); + } else { + ACPI_FLUSH_CPU_CACHE(); + /* Invoke C3 */ + inb(cx_address); + /* Dummy op - must do something useless after P_LVL3 + * read */ + t = inl(acpi_gbl_FADT.xpm_timer_block.address); + } + longhaul.bits.EnableSoftVID = 0; + wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + } + + /* Change frequency on next halt or sleep */ + longhaul.bits.EnableSoftBusRatio = 1; + wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + if (!cx_address) { + ACPI_FLUSH_CPU_CACHE(); + halt(); + } else { + ACPI_FLUSH_CPU_CACHE(); + /* Invoke C3 */ + inb(cx_address); + /* Dummy op - must do something useless after P_LVL3 read */ + t = inl(acpi_gbl_FADT.xpm_timer_block.address); + } + /* Disable bus ratio bit */ + longhaul.bits.EnableSoftBusRatio = 0; + wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + + /* Reduce voltage if necessary */ + if (can_scale_voltage && !dir) { + longhaul.bits.EnableSoftVID = 1; + wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + /* Change voltage */ + if (!cx_address) { + ACPI_FLUSH_CPU_CACHE(); + halt(); + } else { + ACPI_FLUSH_CPU_CACHE(); + /* Invoke C3 */ + inb(cx_address); + /* Dummy op - must do something useless after P_LVL3 + * read */ + t = inl(acpi_gbl_FADT.xpm_timer_block.address); + } + longhaul.bits.EnableSoftVID = 0; + wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); + } +} + +/** + * longhaul_set_cpu_frequency() + * @mults_index : bitpattern of the new multiplier. + * + * Sets a new clock ratio. + */ + +static void longhaul_setstate(unsigned int table_index) +{ + unsigned int mults_index; + int speed, mult; + struct cpufreq_freqs freqs; + unsigned long flags; + unsigned int pic1_mask, pic2_mask; + u16 bm_status = 0; + u32 bm_timeout = 1000; + unsigned int dir = 0; + + mults_index = longhaul_table[table_index].index; + /* Safety precautions */ + mult = mults[mults_index & 0x1f]; + if (mult == -1) + return; + speed = calc_speed(mult); + if ((speed > highest_speed) || (speed < lowest_speed)) + return; + /* Voltage transition before frequency transition? */ + if (can_scale_voltage && longhaul_index < table_index) + dir = 1; + + freqs.old = calc_speed(longhaul_get_cpu_mult()); + freqs.new = speed; + freqs.cpu = 0; /* longhaul.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", + fsb, mult/10, mult%10, print_speed(speed/1000)); +retry_loop: + preempt_disable(); + local_irq_save(flags); + + pic2_mask = inb(0xA1); + pic1_mask = inb(0x21); /* works on C3. save mask. */ + outb(0xFF, 0xA1); /* Overkill */ + outb(0xFE, 0x21); /* TMR0 only */ + + /* Wait while PCI bus is busy. */ + if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE + || ((pr != NULL) && pr->flags.bm_control))) { + bm_status = inw(acpi_regs_addr); + bm_status &= 1 << 4; + while (bm_status && bm_timeout) { + outw(1 << 4, acpi_regs_addr); + bm_timeout--; + bm_status = inw(acpi_regs_addr); + bm_status &= 1 << 4; + } + } + + if (longhaul_flags & USE_NORTHBRIDGE) { + /* Disable AGP and PCI arbiters */ + outb(3, 0x22); + } else if ((pr != NULL) && pr->flags.bm_control) { + /* Disable bus master arbitration */ + acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); + } + switch (longhaul_version) { + + /* + * Longhaul v1. (Samuel[C5A] and Samuel2 stepping 0[C5B]) + * Software controlled multipliers only. + */ + case TYPE_LONGHAUL_V1: + do_longhaul1(mults_index); + break; + + /* + * Longhaul v2 appears in Samuel2 Steppings 1->7 [C5B] and Ezra [C5C] + * + * Longhaul v3 (aka Powersaver). (Ezra-T [C5M] & Nehemiah [C5N]) + * Nehemiah can do FSB scaling too, but this has never been proven + * to work in practice. + */ + case TYPE_LONGHAUL_V2: + case TYPE_POWERSAVER: + if (longhaul_flags & USE_ACPI_C3) { + /* Don't allow wakeup */ + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 0); + do_powersaver(cx->address, mults_index, dir); + } else { + do_powersaver(0, mults_index, dir); + } + break; + } + + if (longhaul_flags & USE_NORTHBRIDGE) { + /* Enable arbiters */ + outb(0, 0x22); + } else if ((pr != NULL) && pr->flags.bm_control) { + /* Enable bus master arbitration */ + acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); + } + outb(pic2_mask, 0xA1); /* restore mask */ + outb(pic1_mask, 0x21); + + local_irq_restore(flags); + preempt_enable(); + + freqs.new = calc_speed(longhaul_get_cpu_mult()); + /* Check if requested frequency is set. */ + if (unlikely(freqs.new != speed)) { + printk(KERN_INFO PFX "Failed to set requested frequency!\n"); + /* Revision ID = 1 but processor is expecting revision key + * equal to 0. Jumpers at the bottom of processor will change + * multiplier and FSB, but will not change bits in Longhaul + * MSR nor enable voltage scaling. */ + if (!revid_errata) { + printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" " + "option.\n"); + revid_errata = 1; + msleep(200); + goto retry_loop; + } + /* Why ACPI C3 sometimes doesn't work is a mystery for me. + * But it does happen. Processor is entering ACPI C3 state, + * but it doesn't change frequency. I tried poking various + * bits in northbridge registers, but without success. */ + if (longhaul_flags & USE_ACPI_C3) { + printk(KERN_INFO PFX "Disabling ACPI C3 support.\n"); + longhaul_flags &= ~USE_ACPI_C3; + if (revid_errata) { + printk(KERN_INFO PFX "Disabling \"Ignore " + "Revision ID\" option.\n"); + revid_errata = 0; + } + msleep(200); + goto retry_loop; + } + /* This shouldn't happen. Longhaul ver. 2 was reported not + * working on processors without voltage scaling, but with + * RevID = 1. RevID errata will make things right. Just + * to be 100% sure. */ + if (longhaul_version == TYPE_LONGHAUL_V2) { + printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n"); + longhaul_version = TYPE_LONGHAUL_V1; + msleep(200); + goto retry_loop; + } + } + /* Report true CPU frequency */ + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + if (!bm_timeout) + printk(KERN_INFO PFX "Warning: Timeout while waiting for " + "idle PCI bus.\n"); +} + +/* + * Centaur decided to make life a little more tricky. + * Only longhaul v1 is allowed to read EBLCR BSEL[0:1]. + * Samuel2 and above have to try and guess what the FSB is. + * We do this by assuming we booted at maximum multiplier, and interpolate + * between that value multiplied by possible FSBs and cpu_mhz which + * was calculated at boot time. Really ugly, but no other way to do this. + */ + +#define ROUNDING 0xf + +static int guess_fsb(int mult) +{ + int speed = cpu_khz / 1000; + int i; + int speeds[] = { 666, 1000, 1333, 2000 }; + int f_max, f_min; + + for (i = 0; i < 4; i++) { + f_max = ((speeds[i] * mult) + 50) / 100; + f_max += (ROUNDING / 2); + f_min = f_max - ROUNDING; + if ((speed <= f_max) && (speed >= f_min)) + return speeds[i] / 10; + } + return 0; +} + + +static int __cpuinit longhaul_get_ranges(void) +{ + unsigned int i, j, k = 0; + unsigned int ratio; + int mult; + + /* Get current frequency */ + mult = longhaul_get_cpu_mult(); + if (mult == -1) { + printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n"); + return -EINVAL; + } + fsb = guess_fsb(mult); + if (fsb == 0) { + printk(KERN_INFO PFX "Invalid (reserved) FSB!\n"); + return -EINVAL; + } + /* Get max multiplier - as we always did. + * Longhaul MSR is useful only when voltage scaling is enabled. + * C3 is booting at max anyway. */ + maxmult = mult; + /* Get min multiplier */ + switch (cpu_model) { + case CPU_NEHEMIAH: + minmult = 50; + break; + case CPU_NEHEMIAH_C: + minmult = 40; + break; + default: + minmult = 30; + break; + } + + pr_debug("MinMult:%d.%dx MaxMult:%d.%dx\n", + minmult/10, minmult%10, maxmult/10, maxmult%10); + + highest_speed = calc_speed(maxmult); + lowest_speed = calc_speed(minmult); + pr_debug("FSB:%dMHz Lowest speed: %s Highest speed:%s\n", fsb, + print_speed(lowest_speed/1000), + print_speed(highest_speed/1000)); + + if (lowest_speed == highest_speed) { + printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n"); + return -EINVAL; + } + if (lowest_speed > highest_speed) { + printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n", + lowest_speed, highest_speed); + return -EINVAL; + } + + longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table), + GFP_KERNEL); + if (!longhaul_table) + return -ENOMEM; + + for (j = 0; j < numscales; j++) { + ratio = mults[j]; + if (ratio == -1) + continue; + if (ratio > maxmult || ratio < minmult) + continue; + longhaul_table[k].frequency = calc_speed(ratio); + longhaul_table[k].index = j; + k++; + } + if (k <= 1) { + kfree(longhaul_table); + return -ENODEV; + } + /* Sort */ + for (j = 0; j < k - 1; j++) { + unsigned int min_f, min_i; + min_f = longhaul_table[j].frequency; + min_i = j; + for (i = j + 1; i < k; i++) { + if (longhaul_table[i].frequency < min_f) { + min_f = longhaul_table[i].frequency; + min_i = i; + } + } + if (min_i != j) { + swap(longhaul_table[j].frequency, + longhaul_table[min_i].frequency); + swap(longhaul_table[j].index, + longhaul_table[min_i].index); + } + } + + longhaul_table[k].frequency = CPUFREQ_TABLE_END; + + /* Find index we are running on */ + for (j = 0; j < k; j++) { + if (mults[longhaul_table[j].index & 0x1f] == mult) { + longhaul_index = j; + break; + } + } + return 0; +} + + +static void __cpuinit longhaul_setup_voltagescaling(void) +{ + union msr_longhaul longhaul; + struct mV_pos minvid, maxvid, vid; + unsigned int j, speed, pos, kHz_step, numvscales; + int min_vid_speed; + + rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); + if (!(longhaul.bits.RevisionID & 1)) { + printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n"); + return; + } + + if (!longhaul.bits.VRMRev) { + printk(KERN_INFO PFX "VRM 8.5\n"); + vrm_mV_table = &vrm85_mV[0]; + mV_vrm_table = &mV_vrm85[0]; + } else { + printk(KERN_INFO PFX "Mobile VRM\n"); + if (cpu_model < CPU_NEHEMIAH) + return; + vrm_mV_table = &mobilevrm_mV[0]; + mV_vrm_table = &mV_mobilevrm[0]; + } + + minvid = vrm_mV_table[longhaul.bits.MinimumVID]; + maxvid = vrm_mV_table[longhaul.bits.MaximumVID]; + + if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) { + printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. " + "Voltage scaling disabled.\n", + minvid.mV/1000, minvid.mV%1000, + maxvid.mV/1000, maxvid.mV%1000); + return; + } + + if (minvid.mV == maxvid.mV) { + printk(KERN_INFO PFX "Claims to support voltage scaling but " + "min & max are both %d.%03d. " + "Voltage scaling disabled\n", + maxvid.mV/1000, maxvid.mV%1000); + return; + } + + /* How many voltage steps*/ + numvscales = maxvid.pos - minvid.pos + 1; + printk(KERN_INFO PFX + "Max VID=%d.%03d " + "Min VID=%d.%03d, " + "%d possible voltage scales\n", + maxvid.mV/1000, maxvid.mV%1000, + minvid.mV/1000, minvid.mV%1000, + numvscales); + + /* Calculate max frequency at min voltage */ + j = longhaul.bits.MinMHzBR; + if (longhaul.bits.MinMHzBR4) + j += 16; + min_vid_speed = eblcr[j]; + if (min_vid_speed == -1) + return; + switch (longhaul.bits.MinMHzFSB) { + case 0: + min_vid_speed *= 13333; + break; + case 1: + min_vid_speed *= 10000; + break; + case 3: + min_vid_speed *= 6666; + break; + default: + return; + break; + } + if (min_vid_speed >= highest_speed) + return; + /* Calculate kHz for one voltage step */ + kHz_step = (highest_speed - min_vid_speed) / numvscales; + + j = 0; + while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { + speed = longhaul_table[j].frequency; + if (speed > min_vid_speed) + pos = (speed - min_vid_speed) / kHz_step + minvid.pos; + else + pos = minvid.pos; + longhaul_table[j].index |= mV_vrm_table[pos] << 8; + vid = vrm_mV_table[mV_vrm_table[pos]]; + printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", + speed, j, vid.mV); + j++; + } + + can_scale_voltage = 1; + printk(KERN_INFO PFX "Voltage scaling enabled.\n"); +} + + +static int longhaul_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, longhaul_table); +} + + +static int longhaul_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + unsigned int table_index = 0; + unsigned int i; + unsigned int dir = 0; + u8 vid, current_vid; + + if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, + relation, &table_index)) + return -EINVAL; + + /* Don't set same frequency again */ + if (longhaul_index == table_index) + return 0; + + if (!can_scale_voltage) + longhaul_setstate(table_index); + else { + /* On test system voltage transitions exceeding single + * step up or down were turning motherboard off. Both + * "ondemand" and "userspace" are unsafe. C7 is doing + * this in hardware, C3 is old and we need to do this + * in software. */ + i = longhaul_index; + current_vid = (longhaul_table[longhaul_index].index >> 8); + current_vid &= 0x1f; + if (table_index > longhaul_index) + dir = 1; + while (i != table_index) { + vid = (longhaul_table[i].index >> 8) & 0x1f; + if (vid != current_vid) { + longhaul_setstate(i); + current_vid = vid; + msleep(200); + } + if (dir) + i++; + else + i--; + } + longhaul_setstate(table_index); + } + longhaul_index = table_index; + return 0; +} + + +static unsigned int longhaul_get(unsigned int cpu) +{ + if (cpu) + return 0; + return calc_speed(longhaul_get_cpu_mult()); +} + +static acpi_status longhaul_walk_callback(acpi_handle obj_handle, + u32 nesting_level, + void *context, void **return_value) +{ + struct acpi_device *d; + + if (acpi_bus_get_device(obj_handle, &d)) + return 0; + + *return_value = acpi_driver_data(d); + return 1; +} + +/* VIA don't support PM2 reg, but have something similar */ +static int enable_arbiter_disable(void) +{ + struct pci_dev *dev; + int status = 1; + int reg; + u8 pci_cmd; + + /* Find PLE133 host bridge */ + reg = 0x78; + dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, + NULL); + /* Find PM133/VT8605 host bridge */ + if (dev == NULL) + dev = pci_get_device(PCI_VENDOR_ID_VIA, + PCI_DEVICE_ID_VIA_8605_0, NULL); + /* Find CLE266 host bridge */ + if (dev == NULL) { + reg = 0x76; + dev = pci_get_device(PCI_VENDOR_ID_VIA, + PCI_DEVICE_ID_VIA_862X_0, NULL); + /* Find CN400 V-Link host bridge */ + if (dev == NULL) + dev = pci_get_device(PCI_VENDOR_ID_VIA, 0x7259, NULL); + } + if (dev != NULL) { + /* Enable access to port 0x22 */ + pci_read_config_byte(dev, reg, &pci_cmd); + if (!(pci_cmd & 1<<7)) { + pci_cmd |= 1<<7; + pci_write_config_byte(dev, reg, pci_cmd); + pci_read_config_byte(dev, reg, &pci_cmd); + if (!(pci_cmd & 1<<7)) { + printk(KERN_ERR PFX + "Can't enable access to port 0x22.\n"); + status = 0; + } + } + pci_dev_put(dev); + return status; + } + return 0; +} + +static int longhaul_setup_southbridge(void) +{ + struct pci_dev *dev; + u8 pci_cmd; + + /* Find VT8235 southbridge */ + dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); + if (dev == NULL) + /* Find VT8237 southbridge */ + dev = pci_get_device(PCI_VENDOR_ID_VIA, + PCI_DEVICE_ID_VIA_8237, NULL); + if (dev != NULL) { + /* Set transition time to max */ + pci_read_config_byte(dev, 0xec, &pci_cmd); + pci_cmd &= ~(1 << 2); + pci_write_config_byte(dev, 0xec, pci_cmd); + pci_read_config_byte(dev, 0xe4, &pci_cmd); + pci_cmd &= ~(1 << 7); + pci_write_config_byte(dev, 0xe4, pci_cmd); + pci_read_config_byte(dev, 0xe5, &pci_cmd); + pci_cmd |= 1 << 7; + pci_write_config_byte(dev, 0xe5, pci_cmd); + /* Get address of ACPI registers block*/ + pci_read_config_byte(dev, 0x81, &pci_cmd); + if (pci_cmd & 1 << 7) { + pci_read_config_dword(dev, 0x88, &acpi_regs_addr); + acpi_regs_addr &= 0xff00; + printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", + acpi_regs_addr); + } + + pci_dev_put(dev); + return 1; + } + return 0; +} + +static int __cpuinit longhaul_cpu_init(struct cpufreq_policy *policy) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + char *cpuname = NULL; + int ret; + u32 lo, hi; + + /* Check what we have on this motherboard */ + switch (c->x86_model) { + case 6: + cpu_model = CPU_SAMUEL; + cpuname = "C3 'Samuel' [C5A]"; + longhaul_version = TYPE_LONGHAUL_V1; + memcpy(mults, samuel1_mults, sizeof(samuel1_mults)); + memcpy(eblcr, samuel1_eblcr, sizeof(samuel1_eblcr)); + break; + + case 7: + switch (c->x86_mask) { + case 0: + longhaul_version = TYPE_LONGHAUL_V1; + cpu_model = CPU_SAMUEL2; + cpuname = "C3 'Samuel 2' [C5B]"; + /* Note, this is not a typo, early Samuel2's had + * Samuel1 ratios. */ + memcpy(mults, samuel1_mults, sizeof(samuel1_mults)); + memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr)); + break; + case 1 ... 15: + longhaul_version = TYPE_LONGHAUL_V2; + if (c->x86_mask < 8) { + cpu_model = CPU_SAMUEL2; + cpuname = "C3 'Samuel 2' [C5B]"; + } else { + cpu_model = CPU_EZRA; + cpuname = "C3 'Ezra' [C5C]"; + } + memcpy(mults, ezra_mults, sizeof(ezra_mults)); + memcpy(eblcr, ezra_eblcr, sizeof(ezra_eblcr)); + break; + } + break; + + case 8: + cpu_model = CPU_EZRA_T; + cpuname = "C3 'Ezra-T' [C5M]"; + longhaul_version = TYPE_POWERSAVER; + numscales = 32; + memcpy(mults, ezrat_mults, sizeof(ezrat_mults)); + memcpy(eblcr, ezrat_eblcr, sizeof(ezrat_eblcr)); + break; + + case 9: + longhaul_version = TYPE_POWERSAVER; + numscales = 32; + memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults)); + memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr)); + switch (c->x86_mask) { + case 0 ... 1: + cpu_model = CPU_NEHEMIAH; + cpuname = "C3 'Nehemiah A' [C5XLOE]"; + break; + case 2 ... 4: + cpu_model = CPU_NEHEMIAH; + cpuname = "C3 'Nehemiah B' [C5XLOH]"; + break; + case 5 ... 15: + cpu_model = CPU_NEHEMIAH_C; + cpuname = "C3 'Nehemiah C' [C5P]"; + break; + } + break; + + default: + cpuname = "Unknown"; + break; + } + /* Check Longhaul ver. 2 */ + if (longhaul_version == TYPE_LONGHAUL_V2) { + rdmsr(MSR_VIA_LONGHAUL, lo, hi); + if (lo == 0 && hi == 0) + /* Looks like MSR isn't present */ + longhaul_version = TYPE_LONGHAUL_V1; + } + + printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname); + switch (longhaul_version) { + case TYPE_LONGHAUL_V1: + case TYPE_LONGHAUL_V2: + printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version); + break; + case TYPE_POWERSAVER: + printk(KERN_CONT "Powersaver supported.\n"); + break; + }; + + /* Doesn't hurt */ + longhaul_setup_southbridge(); + + /* Find ACPI data for processor */ + acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, + ACPI_UINT32_MAX, &longhaul_walk_callback, NULL, + NULL, (void *)&pr); + + /* Check ACPI support for C3 state */ + if (pr != NULL && longhaul_version == TYPE_POWERSAVER) { + cx = &pr->power.states[ACPI_STATE_C3]; + if (cx->address > 0 && cx->latency <= 1000) + longhaul_flags |= USE_ACPI_C3; + } + /* Disable if it isn't working */ + if (disable_acpi_c3) + longhaul_flags &= ~USE_ACPI_C3; + /* Check if northbridge is friendly */ + if (enable_arbiter_disable()) + longhaul_flags |= USE_NORTHBRIDGE; + + /* Check ACPI support for bus master arbiter disable */ + if (!(longhaul_flags & USE_ACPI_C3 + || longhaul_flags & USE_NORTHBRIDGE) + && ((pr == NULL) || !(pr->flags.bm_control))) { + printk(KERN_ERR PFX + "No ACPI support. Unsupported northbridge.\n"); + return -ENODEV; + } + + if (longhaul_flags & USE_NORTHBRIDGE) + printk(KERN_INFO PFX "Using northbridge support.\n"); + if (longhaul_flags & USE_ACPI_C3) + printk(KERN_INFO PFX "Using ACPI support.\n"); + + ret = longhaul_get_ranges(); + if (ret != 0) + return ret; + + if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) + longhaul_setup_voltagescaling(); + + policy->cpuinfo.transition_latency = 200000; /* nsec */ + policy->cur = calc_speed(longhaul_get_cpu_mult()); + + ret = cpufreq_frequency_table_cpuinfo(policy, longhaul_table); + if (ret) + return ret; + + cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); + + return 0; +} + +static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + +static struct freq_attr *longhaul_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver longhaul_driver = { + .verify = longhaul_verify, + .target = longhaul_target, + .get = longhaul_get, + .init = longhaul_cpu_init, + .exit = __devexit_p(longhaul_cpu_exit), + .name = "longhaul", + .owner = THIS_MODULE, + .attr = longhaul_attr, +}; + + +static int __init longhaul_init(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + + if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) + return -ENODEV; + +#ifdef CONFIG_SMP + if (num_online_cpus() > 1) { + printk(KERN_ERR PFX "More than 1 CPU detected, " + "longhaul disabled.\n"); + return -ENODEV; + } +#endif +#ifdef CONFIG_X86_IO_APIC + if (cpu_has_apic) { + printk(KERN_ERR PFX "APIC detected. Longhaul is currently " + "broken in this configuration.\n"); + return -ENODEV; + } +#endif + switch (c->x86_model) { + case 6 ... 9: + return cpufreq_register_driver(&longhaul_driver); + case 10: + printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n"); + default: + ; + } + + return -ENODEV; +} + + +static void __exit longhaul_exit(void) +{ + int i; + + for (i = 0; i < numscales; i++) { + if (mults[i] == maxmult) { + longhaul_setstate(i); + break; + } + } + + cpufreq_unregister_driver(&longhaul_driver); + kfree(longhaul_table); +} + +/* Even if BIOS is exporting ACPI C3 state, and it is used + * with success when CPU is idle, this state doesn't + * trigger frequency transition in some cases. */ +module_param(disable_acpi_c3, int, 0644); +MODULE_PARM_DESC(disable_acpi_c3, "Don't use ACPI C3 support"); +/* Change CPU voltage with frequency. Very useful to save + * power, but most VIA C3 processors aren't supporting it. */ +module_param(scale_voltage, int, 0644); +MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor"); +/* Force revision key to 0 for processors which doesn't + * support voltage scaling, but are introducing itself as + * such. */ +module_param(revid_errata, int, 0644); +MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID"); + +MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); +MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors."); +MODULE_LICENSE("GPL"); + +late_initcall(longhaul_init); +module_exit(longhaul_exit); diff --git a/drivers/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h new file mode 100644 index 00000000000..cbf48fbca88 --- /dev/null +++ b/drivers/cpufreq/longhaul.h @@ -0,0 +1,353 @@ +/* + * longhaul.h + * (C) 2003 Dave Jones. + * + * Licensed under the terms of the GNU GPL License version 2. + * + * VIA-specific information + */ + +union msr_bcr2 { + struct { + unsigned Reseved:19, // 18:0 + ESOFTBF:1, // 19 + Reserved2:3, // 22:20 + CLOCKMUL:4, // 26:23 + Reserved3:5; // 31:27 + } bits; + unsigned long val; +}; + +union msr_longhaul { + struct { + unsigned RevisionID:4, // 3:0 + RevisionKey:4, // 7:4 + EnableSoftBusRatio:1, // 8 + EnableSoftVID:1, // 9 + EnableSoftBSEL:1, // 10 + Reserved:3, // 11:13 + SoftBusRatio4:1, // 14 + VRMRev:1, // 15 + SoftBusRatio:4, // 19:16 + SoftVID:5, // 24:20 + Reserved2:3, // 27:25 + SoftBSEL:2, // 29:28 + Reserved3:2, // 31:30 + MaxMHzBR:4, // 35:32 + MaximumVID:5, // 40:36 + MaxMHzFSB:2, // 42:41 + MaxMHzBR4:1, // 43 + Reserved4:4, // 47:44 + MinMHzBR:4, // 51:48 + MinimumVID:5, // 56:52 + MinMHzFSB:2, // 58:57 + MinMHzBR4:1, // 59 + Reserved5:4; // 63:60 + } bits; + unsigned long long val; +}; + +/* + * Clock ratio tables. Div/Mod by 10 to get ratio. + * The eblcr values specify the ratio read from the CPU. + * The mults values specify what to write to the CPU. + */ + +/* + * VIA C3 Samuel 1 & Samuel 2 (stepping 0) + */ +static const int __cpuinitdata samuel1_mults[16] = { + -1, /* 0000 -> RESERVED */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + -1, /* 0011 -> RESERVED */ + -1, /* 0100 -> RESERVED */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + -1, /* 1110 -> RESERVED */ + -1, /* 1111 -> RESERVED */ +}; + +static const int __cpuinitdata samuel1_eblcr[16] = { + 50, /* 0000 -> RESERVED */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + -1, /* 0011 -> RESERVED */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + -1, /* 0111 -> RESERVED */ + -1, /* 1000 -> RESERVED */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + -1, /* 1100 -> RESERVED */ + 75, /* 1101 -> 7.5x */ + -1, /* 1110 -> RESERVED */ + 65, /* 1111 -> 6.5x */ +}; + +/* + * VIA C3 Samuel2 Stepping 1->15 + */ +static const int __cpuinitdata samuel2_eblcr[16] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 110, /* 0111 -> 11.0x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 130, /* 1110 -> 13.0x */ + 65, /* 1111 -> 6.5x */ +}; + +/* + * VIA C3 Ezra + */ +static const int __cpuinitdata ezra_mults[16] = { + 100, /* 0000 -> 10.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 90, /* 0011 -> 9.0x */ + 95, /* 0100 -> 9.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 120, /* 1111 -> 12.0x */ +}; + +static const int __cpuinitdata ezra_eblcr[16] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 95, /* 0111 -> 9.5x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 65, /* 1111 -> 6.5x */ +}; + +/* + * VIA C3 (Ezra-T) [C5M]. + */ +static const int __cpuinitdata ezrat_mults[32] = { + 100, /* 0000 -> 10.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 90, /* 0011 -> 9.0x */ + 95, /* 0100 -> 9.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 120, /* 1111 -> 12.0x */ + + -1, /* 0000 -> RESERVED (10.0x) */ + 110, /* 0001 -> 11.0x */ + -1, /* 0010 -> 12.0x */ + -1, /* 0011 -> RESERVED (9.0x)*/ + 105, /* 0100 -> 10.5x */ + 115, /* 0101 -> 11.5x */ + 125, /* 0110 -> 12.5x */ + 135, /* 0111 -> 13.5x */ + 140, /* 1000 -> 14.0x */ + 150, /* 1001 -> 15.0x */ + 160, /* 1010 -> 16.0x */ + 130, /* 1011 -> 13.0x */ + 145, /* 1100 -> 14.5x */ + 155, /* 1101 -> 15.5x */ + -1, /* 1110 -> RESERVED (13.0x) */ + -1, /* 1111 -> RESERVED (12.0x) */ +}; + +static const int __cpuinitdata ezrat_eblcr[32] = { + 50, /* 0000 -> 5.0x */ + 30, /* 0001 -> 3.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + 35, /* 0101 -> 3.5x */ + 45, /* 0110 -> 4.5x */ + 95, /* 0111 -> 9.5x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 65, /* 1111 -> 6.5x */ + + -1, /* 0000 -> RESERVED (9.0x) */ + 110, /* 0001 -> 11.0x */ + 120, /* 0010 -> 12.0x */ + -1, /* 0011 -> RESERVED (10.0x)*/ + 135, /* 0100 -> 13.5x */ + 115, /* 0101 -> 11.5x */ + 125, /* 0110 -> 12.5x */ + 105, /* 0111 -> 10.5x */ + 130, /* 1000 -> 13.0x */ + 150, /* 1001 -> 15.0x */ + 160, /* 1010 -> 16.0x */ + 140, /* 1011 -> 14.0x */ + -1, /* 1100 -> RESERVED (12.0x) */ + 155, /* 1101 -> 15.5x */ + -1, /* 1110 -> RESERVED (13.0x) */ + 145, /* 1111 -> 14.5x */ +}; + +/* + * VIA C3 Nehemiah */ + +static const int __cpuinitdata nehemiah_mults[32] = { + 100, /* 0000 -> 10.0x */ + -1, /* 0001 -> 16.0x */ + 40, /* 0010 -> 4.0x */ + 90, /* 0011 -> 9.0x */ + 95, /* 0100 -> 9.5x */ + -1, /* 0101 -> RESERVED */ + 45, /* 0110 -> 4.5x */ + 55, /* 0111 -> 5.5x */ + 60, /* 1000 -> 6.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 50, /* 1011 -> 5.0x */ + 65, /* 1100 -> 6.5x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 120, /* 1111 -> 12.0x */ + -1, /* 0000 -> 10.0x */ + 110, /* 0001 -> 11.0x */ + -1, /* 0010 -> 12.0x */ + -1, /* 0011 -> 9.0x */ + 105, /* 0100 -> 10.5x */ + 115, /* 0101 -> 11.5x */ + 125, /* 0110 -> 12.5x */ + 135, /* 0111 -> 13.5x */ + 140, /* 1000 -> 14.0x */ + 150, /* 1001 -> 15.0x */ + 160, /* 1010 -> 16.0x */ + 130, /* 1011 -> 13.0x */ + 145, /* 1100 -> 14.5x */ + 155, /* 1101 -> 15.5x */ + -1, /* 1110 -> RESERVED (13.0x) */ + -1, /* 1111 -> 12.0x */ +}; + +static const int __cpuinitdata nehemiah_eblcr[32] = { + 50, /* 0000 -> 5.0x */ + 160, /* 0001 -> 16.0x */ + 40, /* 0010 -> 4.0x */ + 100, /* 0011 -> 10.0x */ + 55, /* 0100 -> 5.5x */ + -1, /* 0101 -> RESERVED */ + 45, /* 0110 -> 4.5x */ + 95, /* 0111 -> 9.5x */ + 90, /* 1000 -> 9.0x */ + 70, /* 1001 -> 7.0x */ + 80, /* 1010 -> 8.0x */ + 60, /* 1011 -> 6.0x */ + 120, /* 1100 -> 12.0x */ + 75, /* 1101 -> 7.5x */ + 85, /* 1110 -> 8.5x */ + 65, /* 1111 -> 6.5x */ + 90, /* 0000 -> 9.0x */ + 110, /* 0001 -> 11.0x */ + 120, /* 0010 -> 12.0x */ + 100, /* 0011 -> 10.0x */ + 135, /* 0100 -> 13.5x */ + 115, /* 0101 -> 11.5x */ + 125, /* 0110 -> 12.5x */ + 105, /* 0111 -> 10.5x */ + 130, /* 1000 -> 13.0x */ + 150, /* 1001 -> 15.0x */ + 160, /* 1010 -> 16.0x */ + 140, /* 1011 -> 14.0x */ + 120, /* 1100 -> 12.0x */ + 155, /* 1101 -> 15.5x */ + -1, /* 1110 -> RESERVED (13.0x) */ + 145 /* 1111 -> 14.5x */ +}; + +/* + * Voltage scales. Div/Mod by 1000 to get actual voltage. + * Which scale to use depends on the VRM type in use. + */ + +struct mV_pos { + unsigned short mV; + unsigned short pos; +}; + +static const struct mV_pos __cpuinitdata vrm85_mV[32] = { + {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2}, + {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26}, + {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18}, + {1450, 16}, {1400, 14}, {1350, 12}, {1300, 10}, + {1275, 9}, {1225, 7}, {1175, 5}, {1125, 3}, + {1075, 1}, {1825, 31}, {1775, 29}, {1725, 27}, + {1675, 25}, {1625, 23}, {1575, 21}, {1525, 19}, + {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11} +}; + +static const unsigned char __cpuinitdata mV_vrm85[32] = { + 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11, + 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d, + 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19, + 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15 +}; + +static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = { + {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28}, + {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24}, + {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20}, + {1150, 19}, {1100, 18}, {1050, 17}, {1000, 16}, + {975, 15}, {950, 14}, {925, 13}, {900, 12}, + {875, 11}, {850, 10}, {825, 9}, {800, 8}, + {775, 7}, {750, 6}, {725, 5}, {700, 4}, + {675, 3}, {650, 2}, {625, 1}, {600, 0} +}; + +static const unsigned char __cpuinitdata mV_mobilevrm[32] = { + 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, + 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, + 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, + 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00 +}; + diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c new file mode 100644 index 00000000000..34ea359b370 --- /dev/null +++ b/drivers/cpufreq/longrun.c @@ -0,0 +1,324 @@ +/* + * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> + * + * Licensed under the terms of the GNU GPL License version 2. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/timex.h> + +#include <asm/msr.h> +#include <asm/processor.h> + +static struct cpufreq_driver longrun_driver; + +/** + * longrun_{low,high}_freq is needed for the conversion of cpufreq kHz + * values into per cent values. In TMTA microcode, the following is valid: + * performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) + */ +static unsigned int longrun_low_freq, longrun_high_freq; + + +/** + * longrun_get_policy - get the current LongRun policy + * @policy: struct cpufreq_policy where current policy is written into + * + * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS + * and MSR_TMTA_LONGRUN_CTRL + */ +static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) +{ + u32 msr_lo, msr_hi; + + rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + pr_debug("longrun flags are %x - %x\n", msr_lo, msr_hi); + if (msr_lo & 0x01) + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + else + policy->policy = CPUFREQ_POLICY_POWERSAVE; + + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + pr_debug("longrun ctrl is %x - %x\n", msr_lo, msr_hi); + msr_lo &= 0x0000007F; + msr_hi &= 0x0000007F; + + if (longrun_high_freq <= longrun_low_freq) { + /* Assume degenerate Longrun table */ + policy->min = policy->max = longrun_high_freq; + } else { + policy->min = longrun_low_freq + msr_lo * + ((longrun_high_freq - longrun_low_freq) / 100); + policy->max = longrun_low_freq + msr_hi * + ((longrun_high_freq - longrun_low_freq) / 100); + } + policy->cpu = 0; +} + + +/** + * longrun_set_policy - sets a new CPUFreq policy + * @policy: new policy + * + * Sets a new CPUFreq policy on LongRun-capable processors. This function + * has to be called with cpufreq_driver locked. + */ +static int longrun_set_policy(struct cpufreq_policy *policy) +{ + u32 msr_lo, msr_hi; + u32 pctg_lo, pctg_hi; + + if (!policy) + return -EINVAL; + + if (longrun_high_freq <= longrun_low_freq) { + /* Assume degenerate Longrun table */ + pctg_lo = pctg_hi = 100; + } else { + pctg_lo = (policy->min - longrun_low_freq) / + ((longrun_high_freq - longrun_low_freq) / 100); + pctg_hi = (policy->max - longrun_low_freq) / + ((longrun_high_freq - longrun_low_freq) / 100); + } + + if (pctg_hi > 100) + pctg_hi = 100; + if (pctg_lo > pctg_hi) + pctg_lo = pctg_hi; + + /* performance or economy mode */ + rdmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + msr_lo &= 0xFFFFFFFE; + switch (policy->policy) { + case CPUFREQ_POLICY_PERFORMANCE: + msr_lo |= 0x00000001; + break; + case CPUFREQ_POLICY_POWERSAVE: + break; + } + wrmsr(MSR_TMTA_LONGRUN_FLAGS, msr_lo, msr_hi); + + /* lower and upper boundary */ + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + msr_lo &= 0xFFFFFF80; + msr_hi &= 0xFFFFFF80; + msr_lo |= pctg_lo; + msr_hi |= pctg_hi; + wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + + return 0; +} + + +/** + * longrun_verify_poliy - verifies a new CPUFreq policy + * @policy: the policy to verify + * + * Validates a new CPUFreq policy. This function has to be called with + * cpufreq_driver locked. + */ +static int longrun_verify_policy(struct cpufreq_policy *policy) +{ + if (!policy) + return -EINVAL; + + policy->cpu = 0; + cpufreq_verify_within_limits(policy, + policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + + if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && + (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) + return -EINVAL; + + return 0; +} + +static unsigned int longrun_get(unsigned int cpu) +{ + u32 eax, ebx, ecx, edx; + + if (cpu) + return 0; + + cpuid(0x80860007, &eax, &ebx, &ecx, &edx); + pr_debug("cpuid eax is %u\n", eax); + + return eax * 1000; +} + +/** + * longrun_determine_freqs - determines the lowest and highest possible core frequency + * @low_freq: an int to put the lowest frequency into + * @high_freq: an int to put the highest frequency into + * + * Determines the lowest and highest possible core frequencies on this CPU. + * This is necessary to calculate the performance percentage according to + * TMTA rules: + * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) + */ +static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, + unsigned int *high_freq) +{ + u32 msr_lo, msr_hi; + u32 save_lo, save_hi; + u32 eax, ebx, ecx, edx; + u32 try_hi; + struct cpuinfo_x86 *c = &cpu_data(0); + + if (!low_freq || !high_freq) + return -EINVAL; + + if (cpu_has(c, X86_FEATURE_LRTI)) { + /* if the LongRun Table Interface is present, the + * detection is a bit easier: + * For minimum frequency, read out the maximum + * level (msr_hi), write that into "currently + * selected level", and read out the frequency. + * For maximum frequency, read out level zero. + */ + /* minimum */ + rdmsr(MSR_TMTA_LRTI_READOUT, msr_lo, msr_hi); + wrmsr(MSR_TMTA_LRTI_READOUT, msr_hi, msr_hi); + rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); + *low_freq = msr_lo * 1000; /* to kHz */ + + /* maximum */ + wrmsr(MSR_TMTA_LRTI_READOUT, 0, msr_hi); + rdmsr(MSR_TMTA_LRTI_VOLT_MHZ, msr_lo, msr_hi); + *high_freq = msr_lo * 1000; /* to kHz */ + + pr_debug("longrun table interface told %u - %u kHz\n", + *low_freq, *high_freq); + + if (*low_freq > *high_freq) + *low_freq = *high_freq; + return 0; + } + + /* set the upper border to the value determined during TSC init */ + *high_freq = (cpu_khz / 1000); + *high_freq = *high_freq * 1000; + pr_debug("high frequency is %u kHz\n", *high_freq); + + /* get current borders */ + rdmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + save_lo = msr_lo & 0x0000007F; + save_hi = msr_hi & 0x0000007F; + + /* if current perf_pctg is larger than 90%, we need to decrease the + * upper limit to make the calculation more accurate. + */ + cpuid(0x80860007, &eax, &ebx, &ecx, &edx); + /* try decreasing in 10% steps, some processors react only + * on some barrier values */ + for (try_hi = 80; try_hi > 0 && ecx > 90; try_hi -= 10) { + /* set to 0 to try_hi perf_pctg */ + msr_lo &= 0xFFFFFF80; + msr_hi &= 0xFFFFFF80; + msr_hi |= try_hi; + wrmsr(MSR_TMTA_LONGRUN_CTRL, msr_lo, msr_hi); + + /* read out current core MHz and current perf_pctg */ + cpuid(0x80860007, &eax, &ebx, &ecx, &edx); + + /* restore values */ + wrmsr(MSR_TMTA_LONGRUN_CTRL, save_lo, save_hi); + } + pr_debug("percentage is %u %%, freq is %u MHz\n", ecx, eax); + + /* performance_pctg = (current_freq - low_freq)/(high_freq - low_freq) + * eqals + * low_freq * (1 - perf_pctg) = (cur_freq - high_freq * perf_pctg) + * + * high_freq * perf_pctg is stored tempoarily into "ebx". + */ + ebx = (((cpu_khz / 1000) * ecx) / 100); /* to MHz */ + + if ((ecx > 95) || (ecx == 0) || (eax < ebx)) + return -EIO; + + edx = ((eax - ebx) * 100) / (100 - ecx); + *low_freq = edx * 1000; /* back to kHz */ + + pr_debug("low frequency is %u kHz\n", *low_freq); + + if (*low_freq > *high_freq) + *low_freq = *high_freq; + + return 0; +} + + +static int __cpuinit longrun_cpu_init(struct cpufreq_policy *policy) +{ + int result = 0; + + /* capability check */ + if (policy->cpu != 0) + return -ENODEV; + + /* detect low and high frequency */ + result = longrun_determine_freqs(&longrun_low_freq, &longrun_high_freq); + if (result) + return result; + + /* cpuinfo and default policy values */ + policy->cpuinfo.min_freq = longrun_low_freq; + policy->cpuinfo.max_freq = longrun_high_freq; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + longrun_get_policy(policy); + + return 0; +} + + +static struct cpufreq_driver longrun_driver = { + .flags = CPUFREQ_CONST_LOOPS, + .verify = longrun_verify_policy, + .setpolicy = longrun_set_policy, + .get = longrun_get, + .init = longrun_cpu_init, + .name = "longrun", + .owner = THIS_MODULE, +}; + + +/** + * longrun_init - initializes the Transmeta Crusoe LongRun CPUFreq driver + * + * Initializes the LongRun support. + */ +static int __init longrun_init(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + + if (c->x86_vendor != X86_VENDOR_TRANSMETA || + !cpu_has(c, X86_FEATURE_LONGRUN)) + return -ENODEV; + + return cpufreq_register_driver(&longrun_driver); +} + + +/** + * longrun_exit - unregisters LongRun support + */ +static void __exit longrun_exit(void) +{ + cpufreq_unregister_driver(&longrun_driver); +} + + +MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); +MODULE_DESCRIPTION("LongRun driver for Transmeta Crusoe and " + "Efficeon processors."); +MODULE_LICENSE("GPL"); + +module_init(longrun_init); +module_exit(longrun_exit); diff --git a/drivers/cpufreq/mperf.c b/drivers/cpufreq/mperf.c new file mode 100644 index 00000000000..911e193018a --- /dev/null +++ b/drivers/cpufreq/mperf.c @@ -0,0 +1,51 @@ +#include <linux/kernel.h> +#include <linux/smp.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/slab.h> + +#include "mperf.h" + +static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf); + +/* Called via smp_call_function_single(), on the target CPU */ +static void read_measured_perf_ctrs(void *_cur) +{ + struct aperfmperf *am = _cur; + + get_aperfmperf(am); +} + +/* + * Return the measured active (C0) frequency on this CPU since last call + * to this function. + * Input: cpu number + * Return: Average CPU frequency in terms of max frequency (zero on error) + * + * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance + * over a period of time, while CPU is in C0 state. + * IA32_MPERF counts at the rate of max advertised frequency + * IA32_APERF counts at the rate of actual CPU frequency + * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and + * no meaning should be associated with absolute values of these MSRs. + */ +unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, + unsigned int cpu) +{ + struct aperfmperf perf; + unsigned long ratio; + unsigned int retval; + + if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1)) + return 0; + + ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf); + per_cpu(acfreq_old_perf, cpu) = perf; + + retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT; + + return retval; +} +EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/mperf.h b/drivers/cpufreq/mperf.h new file mode 100644 index 00000000000..5dbf2950dc2 --- /dev/null +++ b/drivers/cpufreq/mperf.h @@ -0,0 +1,9 @@ +/* + * (c) 2010 Advanced Micro Devices, Inc. + * Your use of this code is subject to the terms and conditions of the + * GNU general public license version 2. See "COPYING" or + * http://www.gnu.org/licenses/gpl.html + */ + +unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy, + unsigned int cpu); diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c new file mode 100644 index 00000000000..6be3e0760c2 --- /dev/null +++ b/drivers/cpufreq/p4-clockmod.c @@ -0,0 +1,329 @@ +/* + * Pentium 4/Xeon CPU on demand clock modulation/speed scaling + * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> + * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com> + * (C) 2002 Arjan van de Ven <arjanv@redhat.com> + * (C) 2002 Tora T. Engstad + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * The author(s) of this software shall not be held liable for damages + * of any nature resulting due to the use of this software. This + * software is provided AS-IS with no warranties. + * + * Date Errata Description + * 20020525 N44, O17 12.5% or 25% DC causes lockup + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/cpufreq.h> +#include <linux/cpumask.h> +#include <linux/timex.h> + +#include <asm/processor.h> +#include <asm/msr.h> +#include <asm/timer.h> + +#include "speedstep-lib.h" + +#define PFX "p4-clockmod: " + +/* + * Duty Cycle (3bits), note DC_DISABLE is not specified in + * intel docs i just use it to mean disable + */ +enum { + DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT, + DC_64PT, DC_75PT, DC_88PT, DC_DISABLE +}; + +#define DC_ENTRIES 8 + + +static int has_N44_O17_errata[NR_CPUS]; +static unsigned int stock_freq; +static struct cpufreq_driver p4clockmod_driver; +static unsigned int cpufreq_p4_get(unsigned int cpu); + +static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate) +{ + u32 l, h; + + if (!cpu_online(cpu) || + (newstate > DC_DISABLE) || (newstate == DC_RESV)) + return -EINVAL; + + rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h); + + if (l & 0x01) + pr_debug("CPU#%d currently thermal throttled\n", cpu); + + if (has_N44_O17_errata[cpu] && + (newstate == DC_25PT || newstate == DC_DFLT)) + newstate = DC_38PT; + + rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); + if (newstate == DC_DISABLE) { + pr_debug("CPU#%d disabling modulation\n", cpu); + wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h); + } else { + pr_debug("CPU#%d setting duty cycle to %d%%\n", + cpu, ((125 * newstate) / 10)); + /* bits 63 - 5 : reserved + * bit 4 : enable/disable + * bits 3-1 : duty cycle + * bit 0 : reserved + */ + l = (l & ~14); + l = l | (1<<4) | ((newstate & 0x7)<<1); + wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h); + } + + return 0; +} + + +static struct cpufreq_frequency_table p4clockmod_table[] = { + {DC_RESV, CPUFREQ_ENTRY_INVALID}, + {DC_DFLT, 0}, + {DC_25PT, 0}, + {DC_38PT, 0}, + {DC_50PT, 0}, + {DC_64PT, 0}, + {DC_75PT, 0}, + {DC_88PT, 0}, + {DC_DISABLE, 0}, + {DC_RESV, CPUFREQ_TABLE_END}, +}; + + +static int cpufreq_p4_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = DC_RESV; + struct cpufreq_freqs freqs; + int i; + + if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], + target_freq, relation, &newstate)) + return -EINVAL; + + freqs.old = cpufreq_p4_get(policy->cpu); + freqs.new = stock_freq * p4clockmod_table[newstate].index / 8; + + if (freqs.new == freqs.old) + return 0; + + /* notifiers */ + for_each_cpu(i, policy->cpus) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } + + /* run on each logical CPU, + * see section 13.15.3 of IA32 Intel Architecture Software + * Developer's Manual, Volume 3 + */ + for_each_cpu(i, policy->cpus) + cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); + + /* notifiers */ + for_each_cpu(i, policy->cpus) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + + return 0; +} + + +static int cpufreq_p4_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]); +} + + +static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) +{ + if (c->x86 == 0x06) { + if (cpu_has(c, X86_FEATURE_EST)) + printk_once(KERN_WARNING PFX "Warning: EST-capable " + "CPU detected. The acpi-cpufreq module offers " + "voltage scaling in addition to frequency " + "scaling. You should use that instead of " + "p4-clockmod, if possible.\n"); + switch (c->x86_model) { + case 0x0E: /* Core */ + case 0x0F: /* Core Duo */ + case 0x16: /* Celeron Core */ + case 0x1C: /* Atom */ + p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; + return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE); + case 0x0D: /* Pentium M (Dothan) */ + p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; + /* fall through */ + case 0x09: /* Pentium M (Banias) */ + return speedstep_get_frequency(SPEEDSTEP_CPU_PM); + } + } + + if (c->x86 != 0xF) + return 0; + + /* on P-4s, the TSC runs with constant frequency independent whether + * throttling is active or not. */ + p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS; + + if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) { + printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. " + "The speedstep-ich or acpi cpufreq modules offer " + "voltage scaling in addition of frequency scaling. " + "You should use either one instead of p4-clockmod, " + "if possible.\n"); + return speedstep_get_frequency(SPEEDSTEP_CPU_P4M); + } + + return speedstep_get_frequency(SPEEDSTEP_CPU_P4D); +} + + + +static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) +{ + struct cpuinfo_x86 *c = &cpu_data(policy->cpu); + int cpuid = 0; + unsigned int i; + +#ifdef CONFIG_SMP + cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); +#endif + + /* Errata workaround */ + cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask; + switch (cpuid) { + case 0x0f07: + case 0x0f0a: + case 0x0f11: + case 0x0f12: + has_N44_O17_errata[policy->cpu] = 1; + pr_debug("has errata -- disabling low frequencies\n"); + } + + if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4D && + c->x86_model < 2) { + /* switch to maximum frequency and measure result */ + cpufreq_p4_setdc(policy->cpu, DC_DISABLE); + recalibrate_cpu_khz(); + } + /* get max frequency */ + stock_freq = cpufreq_p4_get_frequency(c); + if (!stock_freq) + return -EINVAL; + + /* table init */ + for (i = 1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { + if ((i < 2) && (has_N44_O17_errata[policy->cpu])) + p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; + else + p4clockmod_table[i].frequency = (stock_freq * i)/8; + } + cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu); + + /* cpuinfo and default policy values */ + + /* the transition latency is set to be 1 higher than the maximum + * transition latency of the ondemand governor */ + policy->cpuinfo.transition_latency = 10000001; + policy->cur = stock_freq; + + return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]); +} + + +static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + +static unsigned int cpufreq_p4_get(unsigned int cpu) +{ + u32 l, h; + + rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h); + + if (l & 0x10) { + l = l >> 1; + l &= 0x7; + } else + l = DC_DISABLE; + + if (l != DC_DISABLE) + return stock_freq * l / 8; + + return stock_freq; +} + +static struct freq_attr *p4clockmod_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver p4clockmod_driver = { + .verify = cpufreq_p4_verify, + .target = cpufreq_p4_target, + .init = cpufreq_p4_cpu_init, + .exit = cpufreq_p4_cpu_exit, + .get = cpufreq_p4_get, + .name = "p4-clockmod", + .owner = THIS_MODULE, + .attr = p4clockmod_attr, +}; + + +static int __init cpufreq_p4_init(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + int ret; + + /* + * THERM_CONTROL is architectural for IA32 now, so + * we can rely on the capability checks + */ + if (c->x86_vendor != X86_VENDOR_INTEL) + return -ENODEV; + + if (!test_cpu_cap(c, X86_FEATURE_ACPI) || + !test_cpu_cap(c, X86_FEATURE_ACC)) + return -ENODEV; + + ret = cpufreq_register_driver(&p4clockmod_driver); + if (!ret) + printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock " + "Modulation available\n"); + + return ret; +} + + +static void __exit cpufreq_p4_exit(void) +{ + cpufreq_unregister_driver(&p4clockmod_driver); +} + + +MODULE_AUTHOR("Zwane Mwaikambo <zwane@commfireservices.com>"); +MODULE_DESCRIPTION("cpufreq driver for Pentium(TM) 4/Xeon(TM)"); +MODULE_LICENSE("GPL"); + +late_initcall(cpufreq_p4_init); +module_exit(cpufreq_p4_exit); diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c new file mode 100644 index 00000000000..7b0603eb012 --- /dev/null +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -0,0 +1,621 @@ +/* + * pcc-cpufreq.c - Processor Clocking Control firmware cpufreq interface + * + * Copyright (C) 2009 Red Hat, Matthew Garrett <mjg@redhat.com> + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. + * Nagananda Chumbalkar <nagananda.chumbalkar@hp.com> + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON + * INFRINGEMENT. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/sched.h> +#include <linux/cpufreq.h> +#include <linux/compiler.h> +#include <linux/slab.h> + +#include <linux/acpi.h> +#include <linux/io.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> + +#include <acpi/processor.h> + +#define PCC_VERSION "1.10.00" +#define POLL_LOOPS 300 + +#define CMD_COMPLETE 0x1 +#define CMD_GET_FREQ 0x0 +#define CMD_SET_FREQ 0x1 + +#define BUF_SZ 4 + +struct pcc_register_resource { + u8 descriptor; + u16 length; + u8 space_id; + u8 bit_width; + u8 bit_offset; + u8 access_size; + u64 address; +} __attribute__ ((packed)); + +struct pcc_memory_resource { + u8 descriptor; + u16 length; + u8 space_id; + u8 resource_usage; + u8 type_specific; + u64 granularity; + u64 minimum; + u64 maximum; + u64 translation_offset; + u64 address_length; +} __attribute__ ((packed)); + +static struct cpufreq_driver pcc_cpufreq_driver; + +struct pcc_header { + u32 signature; + u16 length; + u8 major; + u8 minor; + u32 features; + u16 command; + u16 status; + u32 latency; + u32 minimum_time; + u32 maximum_time; + u32 nominal; + u32 throttled_frequency; + u32 minimum_frequency; +}; + +static void __iomem *pcch_virt_addr; +static struct pcc_header __iomem *pcch_hdr; + +static DEFINE_SPINLOCK(pcc_lock); + +static struct acpi_generic_address doorbell; + +static u64 doorbell_preserve; +static u64 doorbell_write; + +static u8 OSC_UUID[16] = {0x9F, 0x2C, 0x9B, 0x63, 0x91, 0x70, 0x1f, 0x49, + 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46}; + +struct pcc_cpu { + u32 input_offset; + u32 output_offset; +}; + +static struct pcc_cpu __percpu *pcc_cpu_info; + +static int pcc_cpufreq_verify(struct cpufreq_policy *policy) +{ + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, + policy->cpuinfo.max_freq); + return 0; +} + +static inline void pcc_cmd(void) +{ + u64 doorbell_value; + int i; + + acpi_read(&doorbell_value, &doorbell); + acpi_write((doorbell_value & doorbell_preserve) | doorbell_write, + &doorbell); + + for (i = 0; i < POLL_LOOPS; i++) { + if (ioread16(&pcch_hdr->status) & CMD_COMPLETE) + break; + } +} + +static inline void pcc_clear_mapping(void) +{ + if (pcch_virt_addr) + iounmap(pcch_virt_addr); + pcch_virt_addr = NULL; +} + +static unsigned int pcc_get_freq(unsigned int cpu) +{ + struct pcc_cpu *pcc_cpu_data; + unsigned int curr_freq; + unsigned int freq_limit; + u16 status; + u32 input_buffer; + u32 output_buffer; + + spin_lock(&pcc_lock); + + pr_debug("get: get_freq for CPU %d\n", cpu); + pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); + + input_buffer = 0x1; + iowrite32(input_buffer, + (pcch_virt_addr + pcc_cpu_data->input_offset)); + iowrite16(CMD_GET_FREQ, &pcch_hdr->command); + + pcc_cmd(); + + output_buffer = + ioread32(pcch_virt_addr + pcc_cpu_data->output_offset); + + /* Clear the input buffer - we are done with the current command */ + memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); + + status = ioread16(&pcch_hdr->status); + if (status != CMD_COMPLETE) { + pr_debug("get: FAILED: for CPU %d, status is %d\n", + cpu, status); + goto cmd_incomplete; + } + iowrite16(0, &pcch_hdr->status); + curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff)) + / 100) * 1000); + + pr_debug("get: SUCCESS: (virtual) output_offset for cpu %d is " + "0x%p, contains a value of: 0x%x. Speed is: %d MHz\n", + cpu, (pcch_virt_addr + pcc_cpu_data->output_offset), + output_buffer, curr_freq); + + freq_limit = (output_buffer >> 8) & 0xff; + if (freq_limit != 0xff) { + pr_debug("get: frequency for cpu %d is being temporarily" + " capped at %d\n", cpu, curr_freq); + } + + spin_unlock(&pcc_lock); + return curr_freq; + +cmd_incomplete: + iowrite16(0, &pcch_hdr->status); + spin_unlock(&pcc_lock); + return 0; +} + +static int pcc_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct pcc_cpu *pcc_cpu_data; + struct cpufreq_freqs freqs; + u16 status; + u32 input_buffer; + int cpu; + + spin_lock(&pcc_lock); + cpu = policy->cpu; + pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); + + pr_debug("target: CPU %d should go to target freq: %d " + "(virtual) input_offset is 0x%p\n", + cpu, target_freq, + (pcch_virt_addr + pcc_cpu_data->input_offset)); + + freqs.new = target_freq; + freqs.cpu = cpu; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + input_buffer = 0x1 | (((target_freq * 100) + / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); + iowrite32(input_buffer, + (pcch_virt_addr + pcc_cpu_data->input_offset)); + iowrite16(CMD_SET_FREQ, &pcch_hdr->command); + + pcc_cmd(); + + /* Clear the input buffer - we are done with the current command */ + memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); + + status = ioread16(&pcch_hdr->status); + if (status != CMD_COMPLETE) { + pr_debug("target: FAILED for cpu %d, with status: 0x%x\n", + cpu, status); + goto cmd_incomplete; + } + iowrite16(0, &pcch_hdr->status); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + pr_debug("target: was SUCCESSFUL for cpu %d\n", cpu); + spin_unlock(&pcc_lock); + + return 0; + +cmd_incomplete: + iowrite16(0, &pcch_hdr->status); + spin_unlock(&pcc_lock); + return -EINVAL; +} + +static int pcc_get_offset(int cpu) +{ + acpi_status status; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *pccp, *offset; + struct pcc_cpu *pcc_cpu_data; + struct acpi_processor *pr; + int ret = 0; + + pr = per_cpu(processors, cpu); + pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); + + status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer); + if (ACPI_FAILURE(status)) + return -ENODEV; + + pccp = buffer.pointer; + if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) { + ret = -ENODEV; + goto out_free; + }; + + offset = &(pccp->package.elements[0]); + if (!offset || offset->type != ACPI_TYPE_INTEGER) { + ret = -ENODEV; + goto out_free; + } + + pcc_cpu_data->input_offset = offset->integer.value; + + offset = &(pccp->package.elements[1]); + if (!offset || offset->type != ACPI_TYPE_INTEGER) { + ret = -ENODEV; + goto out_free; + } + + pcc_cpu_data->output_offset = offset->integer.value; + + memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ); + memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ); + + pr_debug("pcc_get_offset: for CPU %d: pcc_cpu_data " + "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n", + cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset); +out_free: + kfree(buffer.pointer); + return ret; +} + +static int __init pcc_cpufreq_do_osc(acpi_handle *handle) +{ + acpi_status status; + struct acpi_object_list input; + struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object in_params[4]; + union acpi_object *out_obj; + u32 capabilities[2]; + u32 errors; + u32 supported; + int ret = 0; + + input.count = 4; + input.pointer = in_params; + in_params[0].type = ACPI_TYPE_BUFFER; + in_params[0].buffer.length = 16; + in_params[0].buffer.pointer = OSC_UUID; + in_params[1].type = ACPI_TYPE_INTEGER; + in_params[1].integer.value = 1; + in_params[2].type = ACPI_TYPE_INTEGER; + in_params[2].integer.value = 2; + in_params[3].type = ACPI_TYPE_BUFFER; + in_params[3].buffer.length = 8; + in_params[3].buffer.pointer = (u8 *)&capabilities; + + capabilities[0] = OSC_QUERY_ENABLE; + capabilities[1] = 0x1; + + status = acpi_evaluate_object(*handle, "_OSC", &input, &output); + if (ACPI_FAILURE(status)) + return -ENODEV; + + if (!output.length) + return -ENODEV; + + out_obj = output.pointer; + if (out_obj->type != ACPI_TYPE_BUFFER) { + ret = -ENODEV; + goto out_free; + } + + errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); + if (errors) { + ret = -ENODEV; + goto out_free; + } + + supported = *((u32 *)(out_obj->buffer.pointer + 4)); + if (!(supported & 0x1)) { + ret = -ENODEV; + goto out_free; + } + + kfree(output.pointer); + capabilities[0] = 0x0; + capabilities[1] = 0x1; + + status = acpi_evaluate_object(*handle, "_OSC", &input, &output); + if (ACPI_FAILURE(status)) + return -ENODEV; + + if (!output.length) + return -ENODEV; + + out_obj = output.pointer; + if (out_obj->type != ACPI_TYPE_BUFFER) { + ret = -ENODEV; + goto out_free; + } + + errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0); + if (errors) { + ret = -ENODEV; + goto out_free; + } + + supported = *((u32 *)(out_obj->buffer.pointer + 4)); + if (!(supported & 0x1)) { + ret = -ENODEV; + goto out_free; + } + +out_free: + kfree(output.pointer); + return ret; +} + +static int __init pcc_cpufreq_probe(void) +{ + acpi_status status; + struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; + struct pcc_memory_resource *mem_resource; + struct pcc_register_resource *reg_resource; + union acpi_object *out_obj, *member; + acpi_handle handle, osc_handle, pcch_handle; + int ret = 0; + + status = acpi_get_handle(NULL, "\\_SB", &handle); + if (ACPI_FAILURE(status)) + return -ENODEV; + + status = acpi_get_handle(handle, "PCCH", &pcch_handle); + if (ACPI_FAILURE(status)) + return -ENODEV; + + status = acpi_get_handle(handle, "_OSC", &osc_handle); + if (ACPI_SUCCESS(status)) { + ret = pcc_cpufreq_do_osc(&osc_handle); + if (ret) + pr_debug("probe: _OSC evaluation did not succeed\n"); + /* Firmware's use of _OSC is optional */ + ret = 0; + } + + status = acpi_evaluate_object(handle, "PCCH", NULL, &output); + if (ACPI_FAILURE(status)) + return -ENODEV; + + out_obj = output.pointer; + if (out_obj->type != ACPI_TYPE_PACKAGE) { + ret = -ENODEV; + goto out_free; + } + + member = &out_obj->package.elements[0]; + if (member->type != ACPI_TYPE_BUFFER) { + ret = -ENODEV; + goto out_free; + } + + mem_resource = (struct pcc_memory_resource *)member->buffer.pointer; + + pr_debug("probe: mem_resource descriptor: 0x%x," + " length: %d, space_id: %d, resource_usage: %d," + " type_specific: %d, granularity: 0x%llx," + " minimum: 0x%llx, maximum: 0x%llx," + " translation_offset: 0x%llx, address_length: 0x%llx\n", + mem_resource->descriptor, mem_resource->length, + mem_resource->space_id, mem_resource->resource_usage, + mem_resource->type_specific, mem_resource->granularity, + mem_resource->minimum, mem_resource->maximum, + mem_resource->translation_offset, + mem_resource->address_length); + + if (mem_resource->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { + ret = -ENODEV; + goto out_free; + } + + pcch_virt_addr = ioremap_nocache(mem_resource->minimum, + mem_resource->address_length); + if (pcch_virt_addr == NULL) { + pr_debug("probe: could not map shared mem region\n"); + goto out_free; + } + pcch_hdr = pcch_virt_addr; + + pr_debug("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr); + pr_debug("probe: PCCH header is at physical address: 0x%llx," + " signature: 0x%x, length: %d bytes, major: %d, minor: %d," + " supported features: 0x%x, command field: 0x%x," + " status field: 0x%x, nominal latency: %d us\n", + mem_resource->minimum, ioread32(&pcch_hdr->signature), + ioread16(&pcch_hdr->length), ioread8(&pcch_hdr->major), + ioread8(&pcch_hdr->minor), ioread32(&pcch_hdr->features), + ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status), + ioread32(&pcch_hdr->latency)); + + pr_debug("probe: min time between commands: %d us," + " max time between commands: %d us," + " nominal CPU frequency: %d MHz," + " minimum CPU frequency: %d MHz," + " minimum CPU frequency without throttling: %d MHz\n", + ioread32(&pcch_hdr->minimum_time), + ioread32(&pcch_hdr->maximum_time), + ioread32(&pcch_hdr->nominal), + ioread32(&pcch_hdr->throttled_frequency), + ioread32(&pcch_hdr->minimum_frequency)); + + member = &out_obj->package.elements[1]; + if (member->type != ACPI_TYPE_BUFFER) { + ret = -ENODEV; + goto pcch_free; + } + + reg_resource = (struct pcc_register_resource *)member->buffer.pointer; + + doorbell.space_id = reg_resource->space_id; + doorbell.bit_width = reg_resource->bit_width; + doorbell.bit_offset = reg_resource->bit_offset; + doorbell.access_width = 64; + doorbell.address = reg_resource->address; + + pr_debug("probe: doorbell: space_id is %d, bit_width is %d, " + "bit_offset is %d, access_width is %d, address is 0x%llx\n", + doorbell.space_id, doorbell.bit_width, doorbell.bit_offset, + doorbell.access_width, reg_resource->address); + + member = &out_obj->package.elements[2]; + if (member->type != ACPI_TYPE_INTEGER) { + ret = -ENODEV; + goto pcch_free; + } + + doorbell_preserve = member->integer.value; + + member = &out_obj->package.elements[3]; + if (member->type != ACPI_TYPE_INTEGER) { + ret = -ENODEV; + goto pcch_free; + } + + doorbell_write = member->integer.value; + + pr_debug("probe: doorbell_preserve: 0x%llx," + " doorbell_write: 0x%llx\n", + doorbell_preserve, doorbell_write); + + pcc_cpu_info = alloc_percpu(struct pcc_cpu); + if (!pcc_cpu_info) { + ret = -ENOMEM; + goto pcch_free; + } + + printk(KERN_DEBUG "pcc-cpufreq: (v%s) driver loaded with frequency" + " limits: %d MHz, %d MHz\n", PCC_VERSION, + ioread32(&pcch_hdr->minimum_frequency), + ioread32(&pcch_hdr->nominal)); + kfree(output.pointer); + return ret; +pcch_free: + pcc_clear_mapping(); +out_free: + kfree(output.pointer); + return ret; +} + +static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int cpu = policy->cpu; + unsigned int result = 0; + + if (!pcch_virt_addr) { + result = -1; + goto out; + } + + result = pcc_get_offset(cpu); + if (result) { + pr_debug("init: PCCP evaluation failed\n"); + goto out; + } + + policy->max = policy->cpuinfo.max_freq = + ioread32(&pcch_hdr->nominal) * 1000; + policy->min = policy->cpuinfo.min_freq = + ioread32(&pcch_hdr->minimum_frequency) * 1000; + policy->cur = pcc_get_freq(cpu); + + if (!policy->cur) { + pr_debug("init: Unable to get current CPU frequency\n"); + result = -EINVAL; + goto out; + } + + pr_debug("init: policy->max is %d, policy->min is %d\n", + policy->max, policy->min); +out: + return result; +} + +static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct cpufreq_driver pcc_cpufreq_driver = { + .flags = CPUFREQ_CONST_LOOPS, + .get = pcc_get_freq, + .verify = pcc_cpufreq_verify, + .target = pcc_cpufreq_target, + .init = pcc_cpufreq_cpu_init, + .exit = pcc_cpufreq_cpu_exit, + .name = "pcc-cpufreq", + .owner = THIS_MODULE, +}; + +static int __init pcc_cpufreq_init(void) +{ + int ret; + + if (acpi_disabled) + return 0; + + ret = pcc_cpufreq_probe(); + if (ret) { + pr_debug("pcc_cpufreq_init: PCCH evaluation failed\n"); + return ret; + } + + ret = cpufreq_register_driver(&pcc_cpufreq_driver); + + return ret; +} + +static void __exit pcc_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&pcc_cpufreq_driver); + + pcc_clear_mapping(); + + free_percpu(pcc_cpu_info); +} + +MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar"); +MODULE_VERSION(PCC_VERSION); +MODULE_DESCRIPTION("Processor Clocking Control interface driver"); +MODULE_LICENSE("GPL"); + +late_initcall(pcc_cpufreq_init); +module_exit(pcc_cpufreq_exit); diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c new file mode 100644 index 00000000000..b3379d6a5c5 --- /dev/null +++ b/drivers/cpufreq/powernow-k6.c @@ -0,0 +1,261 @@ +/* + * This file was based upon code in Powertweak Linux (http://powertweak.sf.net) + * (C) 2000-2003 Dave Jones, Arjan van de Ven, Janne Pänkälä, + * Dominik Brodowski. + * + * Licensed under the terms of the GNU GPL License version 2. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/ioport.h> +#include <linux/timex.h> +#include <linux/io.h> + +#include <asm/msr.h> + +#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long + as it is unused */ + +#define PFX "powernow-k6: " +static unsigned int busfreq; /* FSB, in 10 kHz */ +static unsigned int max_multiplier; + + +/* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */ +static struct cpufreq_frequency_table clock_ratio[] = { + {45, /* 000 -> 4.5x */ 0}, + {50, /* 001 -> 5.0x */ 0}, + {40, /* 010 -> 4.0x */ 0}, + {55, /* 011 -> 5.5x */ 0}, + {20, /* 100 -> 2.0x */ 0}, + {30, /* 101 -> 3.0x */ 0}, + {60, /* 110 -> 6.0x */ 0}, + {35, /* 111 -> 3.5x */ 0}, + {0, CPUFREQ_TABLE_END} +}; + + +/** + * powernow_k6_get_cpu_multiplier - returns the current FSB multiplier + * + * Returns the current setting of the frequency multiplier. Core clock + * speed is frequency of the Front-Side Bus multiplied with this value. + */ +static int powernow_k6_get_cpu_multiplier(void) +{ + u64 invalue = 0; + u32 msrval; + + msrval = POWERNOW_IOPORT + 0x1; + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ + invalue = inl(POWERNOW_IOPORT + 0x8); + msrval = POWERNOW_IOPORT + 0x0; + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + + return clock_ratio[(invalue >> 5)&7].index; +} + + +/** + * powernow_k6_set_state - set the PowerNow! multiplier + * @best_i: clock_ratio[best_i] is the target multiplier + * + * Tries to change the PowerNow! multiplier + */ +static void powernow_k6_set_state(unsigned int best_i) +{ + unsigned long outvalue = 0, invalue = 0; + unsigned long msrval; + struct cpufreq_freqs freqs; + + if (clock_ratio[best_i].index > max_multiplier) { + printk(KERN_ERR PFX "invalid target frequency\n"); + return; + } + + freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); + freqs.new = busfreq * clock_ratio[best_i].index; + freqs.cpu = 0; /* powernow-k6.c is UP only driver */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* we now need to transform best_i to the BVC format, see AMD#23446 */ + + outvalue = (1<<12) | (1<<10) | (1<<9) | (best_i<<5); + + msrval = POWERNOW_IOPORT + 0x1; + wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ + invalue = inl(POWERNOW_IOPORT + 0x8); + invalue = invalue & 0xf; + outvalue = outvalue | invalue; + outl(outvalue , (POWERNOW_IOPORT + 0x8)); + msrval = POWERNOW_IOPORT + 0x0; + wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return; +} + + +/** + * powernow_k6_verify - verifies a new CPUfreq policy + * @policy: new policy + * + * Policy must be within lowest and highest possible CPU Frequency, + * and at least one possible state must be within min and max. + */ +static int powernow_k6_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &clock_ratio[0]); +} + + +/** + * powernow_k6_setpolicy - sets a new CPUFreq policy + * @policy: new policy + * @target_freq: the target frequency + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * + * sets a new CPUFreq policy + */ +static int powernow_k6_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_target(policy, &clock_ratio[0], + target_freq, relation, &newstate)) + return -EINVAL; + + powernow_k6_set_state(newstate); + + return 0; +} + + +static int powernow_k6_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int i, f; + int result; + + if (policy->cpu != 0) + return -ENODEV; + + /* get frequencies */ + max_multiplier = powernow_k6_get_cpu_multiplier(); + busfreq = cpu_khz / max_multiplier; + + /* table init */ + for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { + f = clock_ratio[i].index; + if (f > max_multiplier) + clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; + else + clock_ratio[i].frequency = busfreq * f; + } + + /* cpuinfo and default policy values */ + policy->cpuinfo.transition_latency = 200000; + policy->cur = busfreq * max_multiplier; + + result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); + if (result) + return result; + + cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); + + return 0; +} + + +static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) +{ + unsigned int i; + for (i = 0; i < 8; i++) { + if (i == max_multiplier) + powernow_k6_set_state(i); + } + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + +static unsigned int powernow_k6_get(unsigned int cpu) +{ + unsigned int ret; + ret = (busfreq * powernow_k6_get_cpu_multiplier()); + return ret; +} + +static struct freq_attr *powernow_k6_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver powernow_k6_driver = { + .verify = powernow_k6_verify, + .target = powernow_k6_target, + .init = powernow_k6_cpu_init, + .exit = powernow_k6_cpu_exit, + .get = powernow_k6_get, + .name = "powernow-k6", + .owner = THIS_MODULE, + .attr = powernow_k6_attr, +}; + + +/** + * powernow_k6_init - initializes the k6 PowerNow! CPUFreq driver + * + * Initializes the K6 PowerNow! support. Returns -ENODEV on unsupported + * devices, -EINVAL or -ENOMEM on problems during initiatization, and zero + * on success. + */ +static int __init powernow_k6_init(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + + if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || + ((c->x86_model != 12) && (c->x86_model != 13))) + return -ENODEV; + + if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) { + printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n"); + return -EIO; + } + + if (cpufreq_register_driver(&powernow_k6_driver)) { + release_region(POWERNOW_IOPORT, 16); + return -EINVAL; + } + + return 0; +} + + +/** + * powernow_k6_exit - unregisters AMD K6-2+/3+ PowerNow! support + * + * Unregisters AMD K6-2+ / K6-3+ PowerNow! support. + */ +static void __exit powernow_k6_exit(void) +{ + cpufreq_unregister_driver(&powernow_k6_driver); + release_region(POWERNOW_IOPORT, 16); +} + + +MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, " + "Dominik Brodowski <linux@brodo.de>"); +MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); +MODULE_LICENSE("GPL"); + +module_init(powernow_k6_init); +module_exit(powernow_k6_exit); diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c new file mode 100644 index 00000000000..d71d9f37235 --- /dev/null +++ b/drivers/cpufreq/powernow-k7.c @@ -0,0 +1,747 @@ +/* + * AMD K7 Powernow driver. + * (C) 2003 Dave Jones on behalf of SuSE Labs. + * (C) 2003-2004 Dave Jones <davej@redhat.com> + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon datasheets & sample CPUs kindly provided by AMD. + * + * Errata 5: + * CPU may fail to execute a FID/VID change in presence of interrupt. + * - We cli/sti on stepping A0 CPUs around the FID/VID transition. + * Errata 15: + * CPU with half frequency multipliers may hang upon wakeup from disconnect. + * - We disable half multipliers if ACPI is used on A0 stepping CPUs. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/dmi.h> +#include <linux/timex.h> +#include <linux/io.h> + +#include <asm/timer.h> /* Needed for recalibrate_cpu_khz() */ +#include <asm/msr.h> +#include <asm/system.h> + +#ifdef CONFIG_X86_POWERNOW_K7_ACPI +#include <linux/acpi.h> +#include <acpi/processor.h> +#endif + +#include "powernow-k7.h" + +#define PFX "powernow: " + + +struct psb_s { + u8 signature[10]; + u8 tableversion; + u8 flags; + u16 settlingtime; + u8 reserved1; + u8 numpst; +}; + +struct pst_s { + u32 cpuid; + u8 fsbspeed; + u8 maxfid; + u8 startvid; + u8 numpstates; +}; + +#ifdef CONFIG_X86_POWERNOW_K7_ACPI +union powernow_acpi_control_t { + struct { + unsigned long fid:5, + vid:5, + sgtc:20, + res1:2; + } bits; + unsigned long val; +}; +#endif + +/* divide by 1000 to get VCore voltage in V. */ +static const int mobile_vid_table[32] = { + 2000, 1950, 1900, 1850, 1800, 1750, 1700, 1650, + 1600, 1550, 1500, 1450, 1400, 1350, 1300, 0, + 1275, 1250, 1225, 1200, 1175, 1150, 1125, 1100, + 1075, 1050, 1025, 1000, 975, 950, 925, 0, +}; + +/* divide by 10 to get FID. */ +static const int fid_codes[32] = { + 110, 115, 120, 125, 50, 55, 60, 65, + 70, 75, 80, 85, 90, 95, 100, 105, + 30, 190, 40, 200, 130, 135, 140, 210, + 150, 225, 160, 165, 170, 180, -1, -1, +}; + +/* This parameter is used in order to force ACPI instead of legacy method for + * configuration purpose. + */ + +static int acpi_force; + +static struct cpufreq_frequency_table *powernow_table; + +static unsigned int can_scale_bus; +static unsigned int can_scale_vid; +static unsigned int minimum_speed = -1; +static unsigned int maximum_speed; +static unsigned int number_scales; +static unsigned int fsb; +static unsigned int latency; +static char have_a0; + +static int check_fsb(unsigned int fsbspeed) +{ + int delta; + unsigned int f = fsb / 1000; + + delta = (fsbspeed > f) ? fsbspeed - f : f - fsbspeed; + return delta < 5; +} + +static int check_powernow(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + unsigned int maxei, eax, ebx, ecx, edx; + + if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 6)) { +#ifdef MODULE + printk(KERN_INFO PFX "This module only works with " + "AMD K7 CPUs\n"); +#endif + return 0; + } + + /* Get maximum capabilities */ + maxei = cpuid_eax(0x80000000); + if (maxei < 0x80000007) { /* Any powernow info ? */ +#ifdef MODULE + printk(KERN_INFO PFX "No powernow capabilities detected\n"); +#endif + return 0; + } + + if ((c->x86_model == 6) && (c->x86_mask == 0)) { + printk(KERN_INFO PFX "K7 660[A0] core detected, " + "enabling errata workarounds\n"); + have_a0 = 1; + } + + cpuid(0x80000007, &eax, &ebx, &ecx, &edx); + + /* Check we can actually do something before we say anything.*/ + if (!(edx & (1 << 1 | 1 << 2))) + return 0; + + printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: "); + + if (edx & 1 << 1) { + printk("frequency"); + can_scale_bus = 1; + } + + if ((edx & (1 << 1 | 1 << 2)) == 0x6) + printk(" and "); + + if (edx & 1 << 2) { + printk("voltage"); + can_scale_vid = 1; + } + + printk(".\n"); + return 1; +} + +#ifdef CONFIG_X86_POWERNOW_K7_ACPI +static void invalidate_entry(unsigned int entry) +{ + powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; +} +#endif + +static int get_ranges(unsigned char *pst) +{ + unsigned int j; + unsigned int speed; + u8 fid, vid; + + powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * + (number_scales + 1)), GFP_KERNEL); + if (!powernow_table) + return -ENOMEM; + + for (j = 0 ; j < number_scales; j++) { + fid = *pst++; + + powernow_table[j].frequency = (fsb * fid_codes[fid]) / 10; + powernow_table[j].index = fid; /* lower 8 bits */ + + speed = powernow_table[j].frequency; + + if ((fid_codes[fid] % 10) == 5) { +#ifdef CONFIG_X86_POWERNOW_K7_ACPI + if (have_a0 == 1) + invalidate_entry(j); +#endif + } + + if (speed < minimum_speed) + minimum_speed = speed; + if (speed > maximum_speed) + maximum_speed = speed; + + vid = *pst++; + powernow_table[j].index |= (vid << 8); /* upper 8 bits */ + + pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " + "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, + fid_codes[fid] % 10, speed/1000, vid, + mobile_vid_table[vid]/1000, + mobile_vid_table[vid]%1000); + } + powernow_table[number_scales].frequency = CPUFREQ_TABLE_END; + powernow_table[number_scales].index = 0; + + return 0; +} + + +static void change_FID(int fid) +{ + union msr_fidvidctl fidvidctl; + + rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + if (fidvidctl.bits.FID != fid) { + fidvidctl.bits.SGTC = latency; + fidvidctl.bits.FID = fid; + fidvidctl.bits.VIDC = 0; + fidvidctl.bits.FIDC = 1; + wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + } +} + + +static void change_VID(int vid) +{ + union msr_fidvidctl fidvidctl; + + rdmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + if (fidvidctl.bits.VID != vid) { + fidvidctl.bits.SGTC = latency; + fidvidctl.bits.VID = vid; + fidvidctl.bits.FIDC = 0; + fidvidctl.bits.VIDC = 1; + wrmsrl(MSR_K7_FID_VID_CTL, fidvidctl.val); + } +} + + +static void change_speed(unsigned int index) +{ + u8 fid, vid; + struct cpufreq_freqs freqs; + union msr_fidvidstatus fidvidstatus; + int cfid; + + /* fid are the lower 8 bits of the index we stored into + * the cpufreq frequency table in powernow_decode_bios, + * vid are the upper 8 bits. + */ + + fid = powernow_table[index].index & 0xFF; + vid = (powernow_table[index].index & 0xFF00) >> 8; + + freqs.cpu = 0; + + rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + cfid = fidvidstatus.bits.CFID; + freqs.old = fsb * fid_codes[cfid] / 10; + + freqs.new = powernow_table[index].frequency; + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* Now do the magic poking into the MSRs. */ + + if (have_a0 == 1) /* A0 errata 5 */ + local_irq_disable(); + + if (freqs.old > freqs.new) { + /* Going down, so change FID first */ + change_FID(fid); + change_VID(vid); + } else { + /* Going up, so change VID first */ + change_VID(vid); + change_FID(fid); + } + + + if (have_a0 == 1) + local_irq_enable(); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +} + + +#ifdef CONFIG_X86_POWERNOW_K7_ACPI + +static struct acpi_processor_performance *acpi_processor_perf; + +static int powernow_acpi_init(void) +{ + int i; + int retval = 0; + union powernow_acpi_control_t pc; + + if (acpi_processor_perf != NULL && powernow_table != NULL) { + retval = -EINVAL; + goto err0; + } + + acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance), + GFP_KERNEL); + if (!acpi_processor_perf) { + retval = -ENOMEM; + goto err0; + } + + if (!zalloc_cpumask_var(&acpi_processor_perf->shared_cpu_map, + GFP_KERNEL)) { + retval = -ENOMEM; + goto err05; + } + + if (acpi_processor_register_performance(acpi_processor_perf, 0)) { + retval = -EIO; + goto err1; + } + + if (acpi_processor_perf->control_register.space_id != + ACPI_ADR_SPACE_FIXED_HARDWARE) { + retval = -ENODEV; + goto err2; + } + + if (acpi_processor_perf->status_register.space_id != + ACPI_ADR_SPACE_FIXED_HARDWARE) { + retval = -ENODEV; + goto err2; + } + + number_scales = acpi_processor_perf->state_count; + + if (number_scales < 2) { + retval = -ENODEV; + goto err2; + } + + powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * + (number_scales + 1)), GFP_KERNEL); + if (!powernow_table) { + retval = -ENOMEM; + goto err2; + } + + pc.val = (unsigned long) acpi_processor_perf->states[0].control; + for (i = 0; i < number_scales; i++) { + u8 fid, vid; + struct acpi_processor_px *state = + &acpi_processor_perf->states[i]; + unsigned int speed, speed_mhz; + + pc.val = (unsigned long) state->control; + pr_debug("acpi: P%d: %d MHz %d mW %d uS control %08x SGTC %d\n", + i, + (u32) state->core_frequency, + (u32) state->power, + (u32) state->transition_latency, + (u32) state->control, + pc.bits.sgtc); + + vid = pc.bits.vid; + fid = pc.bits.fid; + + powernow_table[i].frequency = fsb * fid_codes[fid] / 10; + powernow_table[i].index = fid; /* lower 8 bits */ + powernow_table[i].index |= (vid << 8); /* upper 8 bits */ + + speed = powernow_table[i].frequency; + speed_mhz = speed / 1000; + + /* processor_perflib will multiply the MHz value by 1000 to + * get a KHz value (e.g. 1266000). However, powernow-k7 works + * with true KHz values (e.g. 1266768). To ensure that all + * powernow frequencies are available, we must ensure that + * ACPI doesn't restrict them, so we round up the MHz value + * to ensure that perflib's computed KHz value is greater than + * or equal to powernow's KHz value. + */ + if (speed % 1000 > 0) + speed_mhz++; + + if ((fid_codes[fid] % 10) == 5) { + if (have_a0 == 1) + invalidate_entry(i); + } + + pr_debug(" FID: 0x%x (%d.%dx [%dMHz]) " + "VID: 0x%x (%d.%03dV)\n", fid, fid_codes[fid] / 10, + fid_codes[fid] % 10, speed_mhz, vid, + mobile_vid_table[vid]/1000, + mobile_vid_table[vid]%1000); + + if (state->core_frequency != speed_mhz) { + state->core_frequency = speed_mhz; + pr_debug(" Corrected ACPI frequency to %d\n", + speed_mhz); + } + + if (latency < pc.bits.sgtc) + latency = pc.bits.sgtc; + + if (speed < minimum_speed) + minimum_speed = speed; + if (speed > maximum_speed) + maximum_speed = speed; + } + + powernow_table[i].frequency = CPUFREQ_TABLE_END; + powernow_table[i].index = 0; + + /* notify BIOS that we exist */ + acpi_processor_notify_smm(THIS_MODULE); + + return 0; + +err2: + acpi_processor_unregister_performance(acpi_processor_perf, 0); +err1: + free_cpumask_var(acpi_processor_perf->shared_cpu_map); +err05: + kfree(acpi_processor_perf); +err0: + printk(KERN_WARNING PFX "ACPI perflib can not be used on " + "this platform\n"); + acpi_processor_perf = NULL; + return retval; +} +#else +static int powernow_acpi_init(void) +{ + printk(KERN_INFO PFX "no support for ACPI processor found." + " Please recompile your kernel with ACPI processor\n"); + return -EINVAL; +} +#endif + +static void print_pst_entry(struct pst_s *pst, unsigned int j) +{ + pr_debug("PST:%d (@%p)\n", j, pst); + pr_debug(" cpuid: 0x%x fsb: %d maxFID: 0x%x startvid: 0x%x\n", + pst->cpuid, pst->fsbspeed, pst->maxfid, pst->startvid); +} + +static int powernow_decode_bios(int maxfid, int startvid) +{ + struct psb_s *psb; + struct pst_s *pst; + unsigned int i, j; + unsigned char *p; + unsigned int etuple; + unsigned int ret; + + etuple = cpuid_eax(0x80000001); + + for (i = 0xC0000; i < 0xffff0 ; i += 16) { + + p = phys_to_virt(i); + + if (memcmp(p, "AMDK7PNOW!", 10) == 0) { + pr_debug("Found PSB header at %p\n", p); + psb = (struct psb_s *) p; + pr_debug("Table version: 0x%x\n", psb->tableversion); + if (psb->tableversion != 0x12) { + printk(KERN_INFO PFX "Sorry, only v1.2 tables" + " supported right now\n"); + return -ENODEV; + } + + pr_debug("Flags: 0x%x\n", psb->flags); + if ((psb->flags & 1) == 0) + pr_debug("Mobile voltage regulator\n"); + else + pr_debug("Desktop voltage regulator\n"); + + latency = psb->settlingtime; + if (latency < 100) { + printk(KERN_INFO PFX "BIOS set settling time " + "to %d microseconds. " + "Should be at least 100. " + "Correcting.\n", latency); + latency = 100; + } + pr_debug("Settling Time: %d microseconds.\n", + psb->settlingtime); + pr_debug("Has %d PST tables. (Only dumping ones " + "relevant to this CPU).\n", + psb->numpst); + + p += sizeof(struct psb_s); + + pst = (struct pst_s *) p; + + for (j = 0; j < psb->numpst; j++) { + pst = (struct pst_s *) p; + number_scales = pst->numpstates; + + if ((etuple == pst->cpuid) && + check_fsb(pst->fsbspeed) && + (maxfid == pst->maxfid) && + (startvid == pst->startvid)) { + print_pst_entry(pst, j); + p = (char *)pst + sizeof(struct pst_s); + ret = get_ranges(p); + return ret; + } else { + unsigned int k; + p = (char *)pst + sizeof(struct pst_s); + for (k = 0; k < number_scales; k++) + p += 2; + } + } + printk(KERN_INFO PFX "No PST tables match this cpuid " + "(0x%x)\n", etuple); + printk(KERN_INFO PFX "This is indicative of a broken " + "BIOS.\n"); + + return -EINVAL; + } + p++; + } + + return -ENODEV; +} + + +static int powernow_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate; + + if (cpufreq_frequency_table_target(policy, powernow_table, target_freq, + relation, &newstate)) + return -EINVAL; + + change_speed(newstate); + + return 0; +} + + +static int powernow_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, powernow_table); +} + +/* + * We use the fact that the bus frequency is somehow + * a multiple of 100000/3 khz, then we compute sgtc according + * to this multiple. + * That way, we match more how AMD thinks all of that work. + * We will then get the same kind of behaviour already tested under + * the "well-known" other OS. + */ +static int __cpuinit fixup_sgtc(void) +{ + unsigned int sgtc; + unsigned int m; + + m = fsb / 3333; + if ((m % 10) >= 5) + m += 5; + + m /= 10; + + sgtc = 100 * m * latency; + sgtc = sgtc / 3; + if (sgtc > 0xfffff) { + printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc); + sgtc = 0xfffff; + } + return sgtc; +} + +static unsigned int powernow_get(unsigned int cpu) +{ + union msr_fidvidstatus fidvidstatus; + unsigned int cfid; + + if (cpu) + return 0; + rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + cfid = fidvidstatus.bits.CFID; + + return fsb * fid_codes[cfid] / 10; +} + + +static int __cpuinit acer_cpufreq_pst(const struct dmi_system_id *d) +{ + printk(KERN_WARNING PFX + "%s laptop with broken PST tables in BIOS detected.\n", + d->ident); + printk(KERN_WARNING PFX + "You need to downgrade to 3A21 (09/09/2002), or try a newer " + "BIOS than 3A71 (01/20/2003)\n"); + printk(KERN_WARNING PFX + "cpufreq scaling has been disabled as a result of this.\n"); + return 0; +} + +/* + * Some Athlon laptops have really fucked PST tables. + * A BIOS update is all that can save them. + * Mention this, and disable cpufreq. + */ +static struct dmi_system_id __cpuinitdata powernow_dmi_table[] = { + { + .callback = acer_cpufreq_pst, + .ident = "Acer Aspire", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde Software"), + DMI_MATCH(DMI_BIOS_VERSION, "3A71"), + }, + }, + { } +}; + +static int __cpuinit powernow_cpu_init(struct cpufreq_policy *policy) +{ + union msr_fidvidstatus fidvidstatus; + int result; + + if (policy->cpu != 0) + return -ENODEV; + + rdmsrl(MSR_K7_FID_VID_STATUS, fidvidstatus.val); + + recalibrate_cpu_khz(); + + fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID]; + if (!fsb) { + printk(KERN_WARNING PFX "can not determine bus frequency\n"); + return -EINVAL; + } + pr_debug("FSB: %3dMHz\n", fsb/1000); + + if (dmi_check_system(powernow_dmi_table) || acpi_force) { + printk(KERN_INFO PFX "PSB/PST known to be broken. " + "Trying ACPI instead\n"); + result = powernow_acpi_init(); + } else { + result = powernow_decode_bios(fidvidstatus.bits.MFID, + fidvidstatus.bits.SVID); + if (result) { + printk(KERN_INFO PFX "Trying ACPI perflib\n"); + maximum_speed = 0; + minimum_speed = -1; + latency = 0; + result = powernow_acpi_init(); + if (result) { + printk(KERN_INFO PFX + "ACPI and legacy methods failed\n"); + } + } else { + /* SGTC use the bus clock as timer */ + latency = fixup_sgtc(); + printk(KERN_INFO PFX "SGTC: %d\n", latency); + } + } + + if (result) + return result; + + printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n", + minimum_speed/1000, maximum_speed/1000); + + policy->cpuinfo.transition_latency = + cpufreq_scale(2000000UL, fsb, latency); + + policy->cur = powernow_get(0); + + cpufreq_frequency_table_get_attr(powernow_table, policy->cpu); + + return cpufreq_frequency_table_cpuinfo(policy, powernow_table); +} + +static int powernow_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + +#ifdef CONFIG_X86_POWERNOW_K7_ACPI + if (acpi_processor_perf) { + acpi_processor_unregister_performance(acpi_processor_perf, 0); + free_cpumask_var(acpi_processor_perf->shared_cpu_map); + kfree(acpi_processor_perf); + } +#endif + + kfree(powernow_table); + return 0; +} + +static struct freq_attr *powernow_table_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver powernow_driver = { + .verify = powernow_verify, + .target = powernow_target, + .get = powernow_get, +#ifdef CONFIG_X86_POWERNOW_K7_ACPI + .bios_limit = acpi_processor_get_bios_limit, +#endif + .init = powernow_cpu_init, + .exit = powernow_cpu_exit, + .name = "powernow-k7", + .owner = THIS_MODULE, + .attr = powernow_table_attr, +}; + +static int __init powernow_init(void) +{ + if (check_powernow() == 0) + return -ENODEV; + return cpufreq_register_driver(&powernow_driver); +} + + +static void __exit powernow_exit(void) +{ + cpufreq_unregister_driver(&powernow_driver); +} + +module_param(acpi_force, int, 0444); +MODULE_PARM_DESC(acpi_force, "Force ACPI to be used."); + +MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); +MODULE_DESCRIPTION("Powernow driver for AMD K7 processors."); +MODULE_LICENSE("GPL"); + +late_initcall(powernow_init); +module_exit(powernow_exit); + diff --git a/drivers/cpufreq/powernow-k7.h b/drivers/cpufreq/powernow-k7.h new file mode 100644 index 00000000000..35fb4eaf6e1 --- /dev/null +++ b/drivers/cpufreq/powernow-k7.h @@ -0,0 +1,43 @@ +/* + * (C) 2003 Dave Jones. + * + * Licensed under the terms of the GNU GPL License version 2. + * + * AMD-specific information + * + */ + +union msr_fidvidctl { + struct { + unsigned FID:5, // 4:0 + reserved1:3, // 7:5 + VID:5, // 12:8 + reserved2:3, // 15:13 + FIDC:1, // 16 + VIDC:1, // 17 + reserved3:2, // 19:18 + FIDCHGRATIO:1, // 20 + reserved4:11, // 31-21 + SGTC:20, // 32:51 + reserved5:12; // 63:52 + } bits; + unsigned long long val; +}; + +union msr_fidvidstatus { + struct { + unsigned CFID:5, // 4:0 + reserved1:3, // 7:5 + SFID:5, // 12:8 + reserved2:3, // 15:13 + MFID:5, // 20:16 + reserved3:11, // 31:21 + CVID:5, // 36:32 + reserved4:3, // 39:37 + SVID:5, // 44:40 + reserved5:3, // 47:45 + MVID:5, // 52:48 + reserved6:11; // 63:53 + } bits; + unsigned long long val; +}; diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c new file mode 100644 index 00000000000..83479b6fb9a --- /dev/null +++ b/drivers/cpufreq/powernow-k8.c @@ -0,0 +1,1607 @@ +/* + * (c) 2003-2010 Advanced Micro Devices, Inc. + * Your use of this code is subject to the terms and conditions of the + * GNU general public license version 2. See "COPYING" or + * http://www.gnu.org/licenses/gpl.html + * + * Support : mark.langsdorf@amd.com + * + * Based on the powernow-k7.c module written by Dave Jones. + * (C) 2003 Dave Jones on behalf of SuSE Labs + * (C) 2004 Dominik Brodowski <linux@brodo.de> + * (C) 2004 Pavel Machek <pavel@ucw.cz> + * Licensed under the terms of the GNU GPL License version 2. + * Based upon datasheets & sample CPUs kindly provided by AMD. + * + * Valuable input gratefully received from Dave Jones, Pavel Machek, + * Dominik Brodowski, Jacob Shin, and others. + * Originally developed by Paul Devriendt. + * Processor information obtained from Chapter 9 (Power and Thermal Management) + * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD + * Opteron Processors" available for download from www.amd.com + * + * Tables for specific CPUs can be inferred from + * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf + */ + +#include <linux/kernel.h> +#include <linux/smp.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/cpumask.h> +#include <linux/sched.h> /* for current / set_cpus_allowed() */ +#include <linux/io.h> +#include <linux/delay.h> + +#include <asm/msr.h> + +#include <linux/acpi.h> +#include <linux/mutex.h> +#include <acpi/processor.h> + +#define PFX "powernow-k8: " +#define VERSION "version 2.20.00" +#include "powernow-k8.h" +#include "mperf.h" + +/* serialize freq changes */ +static DEFINE_MUTEX(fidvid_mutex); + +static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); + +static int cpu_family = CPU_OPTERON; + +/* core performance boost */ +static bool cpb_capable, cpb_enabled; +static struct msr __percpu *msrs; + +static struct cpufreq_driver cpufreq_amd64_driver; + +#ifndef CONFIG_SMP +static inline const struct cpumask *cpu_core_mask(int cpu) +{ + return cpumask_of(0); +} +#endif + +/* Return a frequency in MHz, given an input fid */ +static u32 find_freq_from_fid(u32 fid) +{ + return 800 + (fid * 100); +} + +/* Return a frequency in KHz, given an input fid */ +static u32 find_khz_freq_from_fid(u32 fid) +{ + return 1000 * find_freq_from_fid(fid); +} + +static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, + u32 pstate) +{ + return data[pstate].frequency; +} + +/* Return the vco fid for an input fid + * + * Each "low" fid has corresponding "high" fid, and you can get to "low" fids + * only from corresponding high fids. This returns "high" fid corresponding to + * "low" one. + */ +static u32 convert_fid_to_vco_fid(u32 fid) +{ + if (fid < HI_FID_TABLE_BOTTOM) + return 8 + (2 * fid); + else + return fid; +} + +/* + * Return 1 if the pending bit is set. Unless we just instructed the processor + * to transition to a new state, seeing this bit set is really bad news. + */ +static int pending_bit_stuck(void) +{ + u32 lo, hi; + + if (cpu_family == CPU_HW_PSTATE) + return 0; + + rdmsr(MSR_FIDVID_STATUS, lo, hi); + return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; +} + +/* + * Update the global current fid / vid values from the status msr. + * Returns 1 on error. + */ +static int query_current_values_with_pending_wait(struct powernow_k8_data *data) +{ + u32 lo, hi; + u32 i = 0; + + if (cpu_family == CPU_HW_PSTATE) { + rdmsr(MSR_PSTATE_STATUS, lo, hi); + i = lo & HW_PSTATE_MASK; + data->currpstate = i; + + /* + * a workaround for family 11h erratum 311 might cause + * an "out-of-range Pstate if the core is in Pstate-0 + */ + if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps)) + data->currpstate = HW_PSTATE_0; + + return 0; + } + do { + if (i++ > 10000) { + pr_debug("detected change pending stuck\n"); + return 1; + } + rdmsr(MSR_FIDVID_STATUS, lo, hi); + } while (lo & MSR_S_LO_CHANGE_PENDING); + + data->currvid = hi & MSR_S_HI_CURRENT_VID; + data->currfid = lo & MSR_S_LO_CURRENT_FID; + + return 0; +} + +/* the isochronous relief time */ +static void count_off_irt(struct powernow_k8_data *data) +{ + udelay((1 << data->irt) * 10); + return; +} + +/* the voltage stabilization time */ +static void count_off_vst(struct powernow_k8_data *data) +{ + udelay(data->vstable * VST_UNITS_20US); + return; +} + +/* need to init the control msr to a safe value (for each cpu) */ +static void fidvid_msr_init(void) +{ + u32 lo, hi; + u8 fid, vid; + + rdmsr(MSR_FIDVID_STATUS, lo, hi); + vid = hi & MSR_S_HI_CURRENT_VID; + fid = lo & MSR_S_LO_CURRENT_FID; + lo = fid | (vid << MSR_C_LO_VID_SHIFT); + hi = MSR_C_HI_STP_GNT_BENIGN; + pr_debug("cpu%d, init lo 0x%x, hi 0x%x\n", smp_processor_id(), lo, hi); + wrmsr(MSR_FIDVID_CTL, lo, hi); +} + +/* write the new fid value along with the other control fields to the msr */ +static int write_new_fid(struct powernow_k8_data *data, u32 fid) +{ + u32 lo; + u32 savevid = data->currvid; + u32 i = 0; + + if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) { + printk(KERN_ERR PFX "internal error - overflow on fid write\n"); + return 1; + } + + lo = fid; + lo |= (data->currvid << MSR_C_LO_VID_SHIFT); + lo |= MSR_C_LO_INIT_FID_VID; + + pr_debug("writing fid 0x%x, lo 0x%x, hi 0x%x\n", + fid, lo, data->plllock * PLL_LOCK_CONVERSION); + + do { + wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION); + if (i++ > 100) { + printk(KERN_ERR PFX + "Hardware error - pending bit very stuck - " + "no further pstate changes possible\n"); + return 1; + } + } while (query_current_values_with_pending_wait(data)); + + count_off_irt(data); + + if (savevid != data->currvid) { + printk(KERN_ERR PFX + "vid change on fid trans, old 0x%x, new 0x%x\n", + savevid, data->currvid); + return 1; + } + + if (fid != data->currfid) { + printk(KERN_ERR PFX + "fid trans failed, fid 0x%x, curr 0x%x\n", fid, + data->currfid); + return 1; + } + + return 0; +} + +/* Write a new vid to the hardware */ +static int write_new_vid(struct powernow_k8_data *data, u32 vid) +{ + u32 lo; + u32 savefid = data->currfid; + int i = 0; + + if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) { + printk(KERN_ERR PFX "internal error - overflow on vid write\n"); + return 1; + } + + lo = data->currfid; + lo |= (vid << MSR_C_LO_VID_SHIFT); + lo |= MSR_C_LO_INIT_FID_VID; + + pr_debug("writing vid 0x%x, lo 0x%x, hi 0x%x\n", + vid, lo, STOP_GRANT_5NS); + + do { + wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS); + if (i++ > 100) { + printk(KERN_ERR PFX "internal error - pending bit " + "very stuck - no further pstate " + "changes possible\n"); + return 1; + } + } while (query_current_values_with_pending_wait(data)); + + if (savefid != data->currfid) { + printk(KERN_ERR PFX "fid changed on vid trans, old " + "0x%x new 0x%x\n", + savefid, data->currfid); + return 1; + } + + if (vid != data->currvid) { + printk(KERN_ERR PFX "vid trans failed, vid 0x%x, " + "curr 0x%x\n", + vid, data->currvid); + return 1; + } + + return 0; +} + +/* + * Reduce the vid by the max of step or reqvid. + * Decreasing vid codes represent increasing voltages: + * vid of 0 is 1.550V, vid of 0x1e is 0.800V, vid of VID_OFF is off. + */ +static int decrease_vid_code_by_step(struct powernow_k8_data *data, + u32 reqvid, u32 step) +{ + if ((data->currvid - reqvid) > step) + reqvid = data->currvid - step; + + if (write_new_vid(data, reqvid)) + return 1; + + count_off_vst(data); + + return 0; +} + +/* Change hardware pstate by single MSR write */ +static int transition_pstate(struct powernow_k8_data *data, u32 pstate) +{ + wrmsr(MSR_PSTATE_CTRL, pstate, 0); + data->currpstate = pstate; + return 0; +} + +/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ +static int transition_fid_vid(struct powernow_k8_data *data, + u32 reqfid, u32 reqvid) +{ + if (core_voltage_pre_transition(data, reqvid, reqfid)) + return 1; + + if (core_frequency_transition(data, reqfid)) + return 1; + + if (core_voltage_post_transition(data, reqvid)) + return 1; + + if (query_current_values_with_pending_wait(data)) + return 1; + + if ((reqfid != data->currfid) || (reqvid != data->currvid)) { + printk(KERN_ERR PFX "failed (cpu%d): req 0x%x 0x%x, " + "curr 0x%x 0x%x\n", + smp_processor_id(), + reqfid, reqvid, data->currfid, data->currvid); + return 1; + } + + pr_debug("transitioned (cpu%d): new fid 0x%x, vid 0x%x\n", + smp_processor_id(), data->currfid, data->currvid); + + return 0; +} + +/* Phase 1 - core voltage transition ... setup voltage */ +static int core_voltage_pre_transition(struct powernow_k8_data *data, + u32 reqvid, u32 reqfid) +{ + u32 rvosteps = data->rvo; + u32 savefid = data->currfid; + u32 maxvid, lo, rvomult = 1; + + pr_debug("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " + "reqvid 0x%x, rvo 0x%x\n", + smp_processor_id(), + data->currfid, data->currvid, reqvid, data->rvo); + + if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP)) + rvomult = 2; + rvosteps *= rvomult; + rdmsr(MSR_FIDVID_STATUS, lo, maxvid); + maxvid = 0x1f & (maxvid >> 16); + pr_debug("ph1 maxvid=0x%x\n", maxvid); + if (reqvid < maxvid) /* lower numbers are higher voltages */ + reqvid = maxvid; + + while (data->currvid > reqvid) { + pr_debug("ph1: curr 0x%x, req vid 0x%x\n", + data->currvid, reqvid); + if (decrease_vid_code_by_step(data, reqvid, data->vidmvs)) + return 1; + } + + while ((rvosteps > 0) && + ((rvomult * data->rvo + data->currvid) > reqvid)) { + if (data->currvid == maxvid) { + rvosteps = 0; + } else { + pr_debug("ph1: changing vid for rvo, req 0x%x\n", + data->currvid - 1); + if (decrease_vid_code_by_step(data, data->currvid-1, 1)) + return 1; + rvosteps--; + } + } + + if (query_current_values_with_pending_wait(data)) + return 1; + + if (savefid != data->currfid) { + printk(KERN_ERR PFX "ph1 err, currfid changed 0x%x\n", + data->currfid); + return 1; + } + + pr_debug("ph1 complete, currfid 0x%x, currvid 0x%x\n", + data->currfid, data->currvid); + + return 0; +} + +/* Phase 2 - core frequency transition */ +static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) +{ + u32 vcoreqfid, vcocurrfid, vcofiddiff; + u32 fid_interval, savevid = data->currvid; + + if (data->currfid == reqfid) { + printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", + data->currfid); + return 0; + } + + pr_debug("ph2 (cpu%d): starting, currfid 0x%x, currvid 0x%x, " + "reqfid 0x%x\n", + smp_processor_id(), + data->currfid, data->currvid, reqfid); + + vcoreqfid = convert_fid_to_vco_fid(reqfid); + vcocurrfid = convert_fid_to_vco_fid(data->currfid); + vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid + : vcoreqfid - vcocurrfid; + + if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP)) + vcofiddiff = 0; + + while (vcofiddiff > 2) { + (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); + + if (reqfid > data->currfid) { + if (data->currfid > LO_FID_TABLE_TOP) { + if (write_new_fid(data, + data->currfid + fid_interval)) + return 1; + } else { + if (write_new_fid + (data, + 2 + convert_fid_to_vco_fid(data->currfid))) + return 1; + } + } else { + if (write_new_fid(data, data->currfid - fid_interval)) + return 1; + } + + vcocurrfid = convert_fid_to_vco_fid(data->currfid); + vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid + : vcoreqfid - vcocurrfid; + } + + if (write_new_fid(data, reqfid)) + return 1; + + if (query_current_values_with_pending_wait(data)) + return 1; + + if (data->currfid != reqfid) { + printk(KERN_ERR PFX + "ph2: mismatch, failed fid transition, " + "curr 0x%x, req 0x%x\n", + data->currfid, reqfid); + return 1; + } + + if (savevid != data->currvid) { + printk(KERN_ERR PFX "ph2: vid changed, save 0x%x, curr 0x%x\n", + savevid, data->currvid); + return 1; + } + + pr_debug("ph2 complete, currfid 0x%x, currvid 0x%x\n", + data->currfid, data->currvid); + + return 0; +} + +/* Phase 3 - core voltage transition flow ... jump to the final vid. */ +static int core_voltage_post_transition(struct powernow_k8_data *data, + u32 reqvid) +{ + u32 savefid = data->currfid; + u32 savereqvid = reqvid; + + pr_debug("ph3 (cpu%d): starting, currfid 0x%x, currvid 0x%x\n", + smp_processor_id(), + data->currfid, data->currvid); + + if (reqvid != data->currvid) { + if (write_new_vid(data, reqvid)) + return 1; + + if (savefid != data->currfid) { + printk(KERN_ERR PFX + "ph3: bad fid change, save 0x%x, curr 0x%x\n", + savefid, data->currfid); + return 1; + } + + if (data->currvid != reqvid) { + printk(KERN_ERR PFX + "ph3: failed vid transition\n, " + "req 0x%x, curr 0x%x", + reqvid, data->currvid); + return 1; + } + } + + if (query_current_values_with_pending_wait(data)) + return 1; + + if (savereqvid != data->currvid) { + pr_debug("ph3 failed, currvid 0x%x\n", data->currvid); + return 1; + } + + if (savefid != data->currfid) { + pr_debug("ph3 failed, currfid changed 0x%x\n", + data->currfid); + return 1; + } + + pr_debug("ph3 complete, currfid 0x%x, currvid 0x%x\n", + data->currfid, data->currvid); + + return 0; +} + +static void check_supported_cpu(void *_rc) +{ + u32 eax, ebx, ecx, edx; + int *rc = _rc; + + *rc = -ENODEV; + + if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD) + return; + + eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); + if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) && + ((eax & CPUID_XFAM) < CPUID_XFAM_10H)) + return; + + if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { + if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || + ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { + printk(KERN_INFO PFX + "Processor cpuid %x not supported\n", eax); + return; + } + + eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); + if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { + printk(KERN_INFO PFX + "No frequency change capabilities detected\n"); + return; + } + + cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); + if ((edx & P_STATE_TRANSITION_CAPABLE) + != P_STATE_TRANSITION_CAPABLE) { + printk(KERN_INFO PFX + "Power state transitions not supported\n"); + return; + } + } else { /* must be a HW Pstate capable processor */ + cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); + if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) + cpu_family = CPU_HW_PSTATE; + else + return; + } + + *rc = 0; +} + +static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, + u8 maxvid) +{ + unsigned int j; + u8 lastfid = 0xff; + + for (j = 0; j < data->numps; j++) { + if (pst[j].vid > LEAST_VID) { + printk(KERN_ERR FW_BUG PFX "vid %d invalid : 0x%x\n", + j, pst[j].vid); + return -EINVAL; + } + if (pst[j].vid < data->rvo) { + /* vid + rvo >= 0 */ + printk(KERN_ERR FW_BUG PFX "0 vid exceeded with pstate" + " %d\n", j); + return -ENODEV; + } + if (pst[j].vid < maxvid + data->rvo) { + /* vid + rvo >= maxvid */ + printk(KERN_ERR FW_BUG PFX "maxvid exceeded with pstate" + " %d\n", j); + return -ENODEV; + } + if (pst[j].fid > MAX_FID) { + printk(KERN_ERR FW_BUG PFX "maxfid exceeded with pstate" + " %d\n", j); + return -ENODEV; + } + if (j && (pst[j].fid < HI_FID_TABLE_BOTTOM)) { + /* Only first fid is allowed to be in "low" range */ + printk(KERN_ERR FW_BUG PFX "two low fids - %d : " + "0x%x\n", j, pst[j].fid); + return -EINVAL; + } + if (pst[j].fid < lastfid) + lastfid = pst[j].fid; + } + if (lastfid & 1) { + printk(KERN_ERR FW_BUG PFX "lastfid invalid\n"); + return -EINVAL; + } + if (lastfid > LO_FID_TABLE_TOP) + printk(KERN_INFO FW_BUG PFX + "first fid not from lo freq table\n"); + + return 0; +} + +static void invalidate_entry(struct cpufreq_frequency_table *powernow_table, + unsigned int entry) +{ + powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; +} + +static void print_basics(struct powernow_k8_data *data) +{ + int j; + for (j = 0; j < data->numps; j++) { + if (data->powernow_table[j].frequency != + CPUFREQ_ENTRY_INVALID) { + if (cpu_family == CPU_HW_PSTATE) { + printk(KERN_INFO PFX + " %d : pstate %d (%d MHz)\n", j, + data->powernow_table[j].index, + data->powernow_table[j].frequency/1000); + } else { + printk(KERN_INFO PFX + "fid 0x%x (%d MHz), vid 0x%x\n", + data->powernow_table[j].index & 0xff, + data->powernow_table[j].frequency/1000, + data->powernow_table[j].index >> 8); + } + } + } + if (data->batps) + printk(KERN_INFO PFX "Only %d pstates on battery\n", + data->batps); +} + +static u32 freq_from_fid_did(u32 fid, u32 did) +{ + u32 mhz = 0; + + if (boot_cpu_data.x86 == 0x10) + mhz = (100 * (fid + 0x10)) >> did; + else if (boot_cpu_data.x86 == 0x11) + mhz = (100 * (fid + 8)) >> did; + else + BUG(); + + return mhz * 1000; +} + +static int fill_powernow_table(struct powernow_k8_data *data, + struct pst_s *pst, u8 maxvid) +{ + struct cpufreq_frequency_table *powernow_table; + unsigned int j; + + if (data->batps) { + /* use ACPI support to get full speed on mains power */ + printk(KERN_WARNING PFX + "Only %d pstates usable (use ACPI driver for full " + "range\n", data->batps); + data->numps = data->batps; + } + + for (j = 1; j < data->numps; j++) { + if (pst[j-1].fid >= pst[j].fid) { + printk(KERN_ERR PFX "PST out of sequence\n"); + return -EINVAL; + } + } + + if (data->numps < 2) { + printk(KERN_ERR PFX "no p states to transition\n"); + return -ENODEV; + } + + if (check_pst_table(data, pst, maxvid)) + return -EINVAL; + + powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) + * (data->numps + 1)), GFP_KERNEL); + if (!powernow_table) { + printk(KERN_ERR PFX "powernow_table memory alloc failure\n"); + return -ENOMEM; + } + + for (j = 0; j < data->numps; j++) { + int freq; + powernow_table[j].index = pst[j].fid; /* lower 8 bits */ + powernow_table[j].index |= (pst[j].vid << 8); /* upper 8 bits */ + freq = find_khz_freq_from_fid(pst[j].fid); + powernow_table[j].frequency = freq; + } + powernow_table[data->numps].frequency = CPUFREQ_TABLE_END; + powernow_table[data->numps].index = 0; + + if (query_current_values_with_pending_wait(data)) { + kfree(powernow_table); + return -EIO; + } + + pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); + data->powernow_table = powernow_table; + if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) + print_basics(data); + + for (j = 0; j < data->numps; j++) + if ((pst[j].fid == data->currfid) && + (pst[j].vid == data->currvid)) + return 0; + + pr_debug("currfid/vid do not match PST, ignoring\n"); + return 0; +} + +/* Find and validate the PSB/PST table in BIOS. */ +static int find_psb_table(struct powernow_k8_data *data) +{ + struct psb_s *psb; + unsigned int i; + u32 mvs; + u8 maxvid; + u32 cpst = 0; + u32 thiscpuid; + + for (i = 0xc0000; i < 0xffff0; i += 0x10) { + /* Scan BIOS looking for the signature. */ + /* It can not be at ffff0 - it is too big. */ + + psb = phys_to_virt(i); + if (memcmp(psb, PSB_ID_STRING, PSB_ID_STRING_LEN) != 0) + continue; + + pr_debug("found PSB header at 0x%p\n", psb); + + pr_debug("table vers: 0x%x\n", psb->tableversion); + if (psb->tableversion != PSB_VERSION_1_4) { + printk(KERN_ERR FW_BUG PFX "PSB table is not v1.4\n"); + return -ENODEV; + } + + pr_debug("flags: 0x%x\n", psb->flags1); + if (psb->flags1) { + printk(KERN_ERR FW_BUG PFX "unknown flags\n"); + return -ENODEV; + } + + data->vstable = psb->vstable; + pr_debug("voltage stabilization time: %d(*20us)\n", + data->vstable); + + pr_debug("flags2: 0x%x\n", psb->flags2); + data->rvo = psb->flags2 & 3; + data->irt = ((psb->flags2) >> 2) & 3; + mvs = ((psb->flags2) >> 4) & 3; + data->vidmvs = 1 << mvs; + data->batps = ((psb->flags2) >> 6) & 3; + + pr_debug("ramp voltage offset: %d\n", data->rvo); + pr_debug("isochronous relief time: %d\n", data->irt); + pr_debug("maximum voltage step: %d - 0x%x\n", mvs, data->vidmvs); + + pr_debug("numpst: 0x%x\n", psb->num_tables); + cpst = psb->num_tables; + if ((psb->cpuid == 0x00000fc0) || + (psb->cpuid == 0x00000fe0)) { + thiscpuid = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); + if ((thiscpuid == 0x00000fc0) || + (thiscpuid == 0x00000fe0)) + cpst = 1; + } + if (cpst != 1) { + printk(KERN_ERR FW_BUG PFX "numpst must be 1\n"); + return -ENODEV; + } + + data->plllock = psb->plllocktime; + pr_debug("plllocktime: 0x%x (units 1us)\n", psb->plllocktime); + pr_debug("maxfid: 0x%x\n", psb->maxfid); + pr_debug("maxvid: 0x%x\n", psb->maxvid); + maxvid = psb->maxvid; + + data->numps = psb->numps; + pr_debug("numpstates: 0x%x\n", data->numps); + return fill_powernow_table(data, + (struct pst_s *)(psb+1), maxvid); + } + /* + * If you see this message, complain to BIOS manufacturer. If + * he tells you "we do not support Linux" or some similar + * nonsense, remember that Windows 2000 uses the same legacy + * mechanism that the old Linux PSB driver uses. Tell them it + * is broken with Windows 2000. + * + * The reference to the AMD documentation is chapter 9 in the + * BIOS and Kernel Developer's Guide, which is available on + * www.amd.com + */ + printk(KERN_ERR FW_BUG PFX "No PSB or ACPI _PSS objects\n"); + printk(KERN_ERR PFX "Make sure that your BIOS is up to date" + " and Cool'N'Quiet support is enabled in BIOS setup\n"); + return -ENODEV; +} + +static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, + unsigned int index) +{ + u64 control; + + if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) + return; + + control = data->acpi_data.states[index].control; + data->irt = (control >> IRT_SHIFT) & IRT_MASK; + data->rvo = (control >> RVO_SHIFT) & RVO_MASK; + data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; + data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; + data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK); + data->vstable = (control >> VST_SHIFT) & VST_MASK; +} + +static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) +{ + struct cpufreq_frequency_table *powernow_table; + int ret_val = -ENODEV; + u64 control, status; + + if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { + pr_debug("register performance failed: bad ACPI data\n"); + return -EIO; + } + + /* verify the data contained in the ACPI structures */ + if (data->acpi_data.state_count <= 1) { + pr_debug("No ACPI P-States\n"); + goto err_out; + } + + control = data->acpi_data.control_register.space_id; + status = data->acpi_data.status_register.space_id; + + if ((control != ACPI_ADR_SPACE_FIXED_HARDWARE) || + (status != ACPI_ADR_SPACE_FIXED_HARDWARE)) { + pr_debug("Invalid control/status registers (%llx - %llx)\n", + control, status); + goto err_out; + } + + /* fill in data->powernow_table */ + powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) + * (data->acpi_data.state_count + 1)), GFP_KERNEL); + if (!powernow_table) { + pr_debug("powernow_table memory alloc failure\n"); + goto err_out; + } + + /* fill in data */ + data->numps = data->acpi_data.state_count; + powernow_k8_acpi_pst_values(data, 0); + + if (cpu_family == CPU_HW_PSTATE) + ret_val = fill_powernow_table_pstate(data, powernow_table); + else + ret_val = fill_powernow_table_fidvid(data, powernow_table); + if (ret_val) + goto err_out_mem; + + powernow_table[data->acpi_data.state_count].frequency = + CPUFREQ_TABLE_END; + powernow_table[data->acpi_data.state_count].index = 0; + data->powernow_table = powernow_table; + + if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) + print_basics(data); + + /* notify BIOS that we exist */ + acpi_processor_notify_smm(THIS_MODULE); + + if (!zalloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) { + printk(KERN_ERR PFX + "unable to alloc powernow_k8_data cpumask\n"); + ret_val = -ENOMEM; + goto err_out_mem; + } + + return 0; + +err_out_mem: + kfree(powernow_table); + +err_out: + acpi_processor_unregister_performance(&data->acpi_data, data->cpu); + + /* data->acpi_data.state_count informs us at ->exit() + * whether ACPI was used */ + data->acpi_data.state_count = 0; + + return ret_val; +} + +static int fill_powernow_table_pstate(struct powernow_k8_data *data, + struct cpufreq_frequency_table *powernow_table) +{ + int i; + u32 hi = 0, lo = 0; + rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi); + data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; + + for (i = 0; i < data->acpi_data.state_count; i++) { + u32 index; + + index = data->acpi_data.states[i].control & HW_PSTATE_MASK; + if (index > data->max_hw_pstate) { + printk(KERN_ERR PFX "invalid pstate %d - " + "bad value %d.\n", i, index); + printk(KERN_ERR PFX "Please report to BIOS " + "manufacturer\n"); + invalidate_entry(powernow_table, i); + continue; + } + rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); + if (!(hi & HW_PSTATE_VALID_MASK)) { + pr_debug("invalid pstate %d, ignoring\n", index); + invalidate_entry(powernow_table, i); + continue; + } + + powernow_table[i].index = index; + + /* Frequency may be rounded for these */ + if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) + || boot_cpu_data.x86 == 0x11) { + powernow_table[i].frequency = + freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); + } else + powernow_table[i].frequency = + data->acpi_data.states[i].core_frequency * 1000; + } + return 0; +} + +static int fill_powernow_table_fidvid(struct powernow_k8_data *data, + struct cpufreq_frequency_table *powernow_table) +{ + int i; + + for (i = 0; i < data->acpi_data.state_count; i++) { + u32 fid; + u32 vid; + u32 freq, index; + u64 status, control; + + if (data->exttype) { + status = data->acpi_data.states[i].status; + fid = status & EXT_FID_MASK; + vid = (status >> VID_SHIFT) & EXT_VID_MASK; + } else { + control = data->acpi_data.states[i].control; + fid = control & FID_MASK; + vid = (control >> VID_SHIFT) & VID_MASK; + } + + pr_debug(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); + + index = fid | (vid<<8); + powernow_table[i].index = index; + + freq = find_khz_freq_from_fid(fid); + powernow_table[i].frequency = freq; + + /* verify frequency is OK */ + if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { + pr_debug("invalid freq %u kHz, ignoring\n", freq); + invalidate_entry(powernow_table, i); + continue; + } + + /* verify voltage is OK - + * BIOSs are using "off" to indicate invalid */ + if (vid == VID_OFF) { + pr_debug("invalid vid %u, ignoring\n", vid); + invalidate_entry(powernow_table, i); + continue; + } + + if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { + printk(KERN_INFO PFX "invalid freq entries " + "%u kHz vs. %u kHz\n", freq, + (unsigned int) + (data->acpi_data.states[i].core_frequency + * 1000)); + invalidate_entry(powernow_table, i); + continue; + } + } + return 0; +} + +static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) +{ + if (data->acpi_data.state_count) + acpi_processor_unregister_performance(&data->acpi_data, + data->cpu); + free_cpumask_var(data->acpi_data.shared_cpu_map); +} + +static int get_transition_latency(struct powernow_k8_data *data) +{ + int max_latency = 0; + int i; + for (i = 0; i < data->acpi_data.state_count; i++) { + int cur_latency = data->acpi_data.states[i].transition_latency + + data->acpi_data.states[i].bus_master_latency; + if (cur_latency > max_latency) + max_latency = cur_latency; + } + if (max_latency == 0) { + /* + * Fam 11h and later may return 0 as transition latency. This + * is intended and means "very fast". While cpufreq core and + * governors currently can handle that gracefully, better set it + * to 1 to avoid problems in the future. + */ + if (boot_cpu_data.x86 < 0x11) + printk(KERN_ERR FW_WARN PFX "Invalid zero transition " + "latency\n"); + max_latency = 1; + } + /* value in usecs, needs to be in nanoseconds */ + return 1000 * max_latency; +} + +/* Take a frequency, and issue the fid/vid transition command */ +static int transition_frequency_fidvid(struct powernow_k8_data *data, + unsigned int index) +{ + u32 fid = 0; + u32 vid = 0; + int res, i; + struct cpufreq_freqs freqs; + + pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); + + /* fid/vid correctness check for k8 */ + /* fid are the lower 8 bits of the index we stored into + * the cpufreq frequency table in find_psb_table, vid + * are the upper 8 bits. + */ + fid = data->powernow_table[index].index & 0xFF; + vid = (data->powernow_table[index].index & 0xFF00) >> 8; + + pr_debug("table matched fid 0x%x, giving vid 0x%x\n", fid, vid); + + if (query_current_values_with_pending_wait(data)) + return 1; + + if ((data->currvid == vid) && (data->currfid == fid)) { + pr_debug("target matches current values (fid 0x%x, vid 0x%x)\n", + fid, vid); + return 0; + } + + pr_debug("cpu %d, changing to fid 0x%x, vid 0x%x\n", + smp_processor_id(), fid, vid); + freqs.old = find_khz_freq_from_fid(data->currfid); + freqs.new = find_khz_freq_from_fid(fid); + + for_each_cpu(i, data->available_cores) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } + + res = transition_fid_vid(data, fid, vid); + freqs.new = find_khz_freq_from_fid(data->currfid); + + for_each_cpu(i, data->available_cores) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + return res; +} + +/* Take a frequency, and issue the hardware pstate transition command */ +static int transition_frequency_pstate(struct powernow_k8_data *data, + unsigned int index) +{ + u32 pstate = 0; + int res, i; + struct cpufreq_freqs freqs; + + pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index); + + /* get MSR index for hardware pstate transition */ + pstate = index & HW_PSTATE_MASK; + if (pstate > data->max_hw_pstate) + return 0; + freqs.old = find_khz_freq_from_pstate(data->powernow_table, + data->currpstate); + freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); + + for_each_cpu(i, data->available_cores) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } + + res = transition_pstate(data, pstate); + freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); + + for_each_cpu(i, data->available_cores) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + return res; +} + +/* Driver entry point to switch to the target frequency */ +static int powernowk8_target(struct cpufreq_policy *pol, + unsigned targfreq, unsigned relation) +{ + cpumask_var_t oldmask; + struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); + u32 checkfid; + u32 checkvid; + unsigned int newstate; + int ret = -EIO; + + if (!data) + return -EINVAL; + + checkfid = data->currfid; + checkvid = data->currvid; + + /* only run on specific CPU from here on. */ + /* This is poor form: use a workqueue or smp_call_function_single */ + if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) + return -ENOMEM; + + cpumask_copy(oldmask, tsk_cpus_allowed(current)); + set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); + + if (smp_processor_id() != pol->cpu) { + printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); + goto err_out; + } + + if (pending_bit_stuck()) { + printk(KERN_ERR PFX "failing targ, change pending bit set\n"); + goto err_out; + } + + pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", + pol->cpu, targfreq, pol->min, pol->max, relation); + + if (query_current_values_with_pending_wait(data)) + goto err_out; + + if (cpu_family != CPU_HW_PSTATE) { + pr_debug("targ: curr fid 0x%x, vid 0x%x\n", + data->currfid, data->currvid); + + if ((checkvid != data->currvid) || + (checkfid != data->currfid)) { + printk(KERN_INFO PFX + "error - out of sync, fix 0x%x 0x%x, " + "vid 0x%x 0x%x\n", + checkfid, data->currfid, + checkvid, data->currvid); + } + } + + if (cpufreq_frequency_table_target(pol, data->powernow_table, + targfreq, relation, &newstate)) + goto err_out; + + mutex_lock(&fidvid_mutex); + + powernow_k8_acpi_pst_values(data, newstate); + + if (cpu_family == CPU_HW_PSTATE) + ret = transition_frequency_pstate(data, newstate); + else + ret = transition_frequency_fidvid(data, newstate); + if (ret) { + printk(KERN_ERR PFX "transition frequency failed\n"); + ret = 1; + mutex_unlock(&fidvid_mutex); + goto err_out; + } + mutex_unlock(&fidvid_mutex); + + if (cpu_family == CPU_HW_PSTATE) + pol->cur = find_khz_freq_from_pstate(data->powernow_table, + newstate); + else + pol->cur = find_khz_freq_from_fid(data->currfid); + ret = 0; + +err_out: + set_cpus_allowed_ptr(current, oldmask); + free_cpumask_var(oldmask); + return ret; +} + +/* Driver entry point to verify the policy and range of frequencies */ +static int powernowk8_verify(struct cpufreq_policy *pol) +{ + struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); + + if (!data) + return -EINVAL; + + return cpufreq_frequency_table_verify(pol, data->powernow_table); +} + +struct init_on_cpu { + struct powernow_k8_data *data; + int rc; +}; + +static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu) +{ + struct init_on_cpu *init_on_cpu = _init_on_cpu; + + if (pending_bit_stuck()) { + printk(KERN_ERR PFX "failing init, change pending bit set\n"); + init_on_cpu->rc = -ENODEV; + return; + } + + if (query_current_values_with_pending_wait(init_on_cpu->data)) { + init_on_cpu->rc = -ENODEV; + return; + } + + if (cpu_family == CPU_OPTERON) + fidvid_msr_init(); + + init_on_cpu->rc = 0; +} + +/* per CPU init entry point to the driver */ +static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) +{ + static const char ACPI_PSS_BIOS_BUG_MSG[] = + KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" + FW_BUG PFX "Try again with latest BIOS.\n"; + struct powernow_k8_data *data; + struct init_on_cpu init_on_cpu; + int rc; + struct cpuinfo_x86 *c = &cpu_data(pol->cpu); + + if (!cpu_online(pol->cpu)) + return -ENODEV; + + smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); + if (rc) + return -ENODEV; + + data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); + if (!data) { + printk(KERN_ERR PFX "unable to alloc powernow_k8_data"); + return -ENOMEM; + } + + data->cpu = pol->cpu; + data->currpstate = HW_PSTATE_INVALID; + + if (powernow_k8_cpu_init_acpi(data)) { + /* + * Use the PSB BIOS structure. This is only available on + * an UP version, and is deprecated by AMD. + */ + if (num_online_cpus() != 1) { + printk_once(ACPI_PSS_BIOS_BUG_MSG); + goto err_out; + } + if (pol->cpu != 0) { + printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for " + "CPU other than CPU0. Complain to your BIOS " + "vendor.\n"); + goto err_out; + } + rc = find_psb_table(data); + if (rc) + goto err_out; + + /* Take a crude guess here. + * That guess was in microseconds, so multiply with 1000 */ + pol->cpuinfo.transition_latency = ( + ((data->rvo + 8) * data->vstable * VST_UNITS_20US) + + ((1 << data->irt) * 30)) * 1000; + } else /* ACPI _PSS objects available */ + pol->cpuinfo.transition_latency = get_transition_latency(data); + + /* only run on specific CPU from here on */ + init_on_cpu.data = data; + smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu, + &init_on_cpu, 1); + rc = init_on_cpu.rc; + if (rc != 0) + goto err_out_exit_acpi; + + if (cpu_family == CPU_HW_PSTATE) + cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); + else + cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu)); + data->available_cores = pol->cpus; + + if (cpu_family == CPU_HW_PSTATE) + pol->cur = find_khz_freq_from_pstate(data->powernow_table, + data->currpstate); + else + pol->cur = find_khz_freq_from_fid(data->currfid); + pr_debug("policy current frequency %d kHz\n", pol->cur); + + /* min/max the cpu is capable of */ + if (cpufreq_frequency_table_cpuinfo(pol, data->powernow_table)) { + printk(KERN_ERR FW_BUG PFX "invalid powernow_table\n"); + powernow_k8_cpu_exit_acpi(data); + kfree(data->powernow_table); + kfree(data); + return -EINVAL; + } + + /* Check for APERF/MPERF support in hardware */ + if (cpu_has(c, X86_FEATURE_APERFMPERF)) + cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf; + + cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); + + if (cpu_family == CPU_HW_PSTATE) + pr_debug("cpu_init done, current pstate 0x%x\n", + data->currpstate); + else + pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n", + data->currfid, data->currvid); + + per_cpu(powernow_data, pol->cpu) = data; + + return 0; + +err_out_exit_acpi: + powernow_k8_cpu_exit_acpi(data); + +err_out: + kfree(data); + return -ENODEV; +} + +static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) +{ + struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); + + if (!data) + return -EINVAL; + + powernow_k8_cpu_exit_acpi(data); + + cpufreq_frequency_table_put_attr(pol->cpu); + + kfree(data->powernow_table); + kfree(data); + per_cpu(powernow_data, pol->cpu) = NULL; + + return 0; +} + +static void query_values_on_cpu(void *_err) +{ + int *err = _err; + struct powernow_k8_data *data = __this_cpu_read(powernow_data); + + *err = query_current_values_with_pending_wait(data); +} + +static unsigned int powernowk8_get(unsigned int cpu) +{ + struct powernow_k8_data *data = per_cpu(powernow_data, cpu); + unsigned int khz = 0; + int err; + + if (!data) + return 0; + + smp_call_function_single(cpu, query_values_on_cpu, &err, true); + if (err) + goto out; + + if (cpu_family == CPU_HW_PSTATE) + khz = find_khz_freq_from_pstate(data->powernow_table, + data->currpstate); + else + khz = find_khz_freq_from_fid(data->currfid); + + +out: + return khz; +} + +static void _cpb_toggle_msrs(bool t) +{ + int cpu; + + get_online_cpus(); + + rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); + + for_each_cpu(cpu, cpu_online_mask) { + struct msr *reg = per_cpu_ptr(msrs, cpu); + if (t) + reg->l &= ~BIT(25); + else + reg->l |= BIT(25); + } + wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); + + put_online_cpus(); +} + +/* + * Switch on/off core performance boosting. + * + * 0=disable + * 1=enable. + */ +static void cpb_toggle(bool t) +{ + if (!cpb_capable) + return; + + if (t && !cpb_enabled) { + cpb_enabled = true; + _cpb_toggle_msrs(t); + printk(KERN_INFO PFX "Core Boosting enabled.\n"); + } else if (!t && cpb_enabled) { + cpb_enabled = false; + _cpb_toggle_msrs(t); + printk(KERN_INFO PFX "Core Boosting disabled.\n"); + } +} + +static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, + size_t count) +{ + int ret = -EINVAL; + unsigned long val = 0; + + ret = strict_strtoul(buf, 10, &val); + if (!ret && (val == 0 || val == 1) && cpb_capable) + cpb_toggle(val); + else + return -EINVAL; + + return count; +} + +static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) +{ + return sprintf(buf, "%u\n", cpb_enabled); +} + +#define define_one_rw(_name) \ +static struct freq_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +define_one_rw(cpb); + +static struct freq_attr *powernow_k8_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + &cpb, + NULL, +}; + +static struct cpufreq_driver cpufreq_amd64_driver = { + .verify = powernowk8_verify, + .target = powernowk8_target, + .bios_limit = acpi_processor_get_bios_limit, + .init = powernowk8_cpu_init, + .exit = __devexit_p(powernowk8_cpu_exit), + .get = powernowk8_get, + .name = "powernow-k8", + .owner = THIS_MODULE, + .attr = powernow_k8_attr, +}; + +/* + * Clear the boost-disable flag on the CPU_DOWN path so that this cpu + * cannot block the remaining ones from boosting. On the CPU_UP path we + * simply keep the boost-disable flag in sync with the current global + * state. + */ +static int cpb_notify(struct notifier_block *nb, unsigned long action, + void *hcpu) +{ + unsigned cpu = (long)hcpu; + u32 lo, hi; + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + + if (!cpb_enabled) { + rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); + lo |= BIT(25); + wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi); + } + break; + + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); + lo &= ~BIT(25); + wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi); + break; + + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block cpb_nb = { + .notifier_call = cpb_notify, +}; + +/* driver entry point for init */ +static int __cpuinit powernowk8_init(void) +{ + unsigned int i, supported_cpus = 0, cpu; + int rv; + + for_each_online_cpu(i) { + int rc; + smp_call_function_single(i, check_supported_cpu, &rc, 1); + if (rc == 0) + supported_cpus++; + } + + if (supported_cpus != num_online_cpus()) + return -ENODEV; + + printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", + num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); + + if (boot_cpu_has(X86_FEATURE_CPB)) { + + cpb_capable = true; + + msrs = msrs_alloc(); + if (!msrs) { + printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); + return -ENOMEM; + } + + register_cpu_notifier(&cpb_nb); + + rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); + + for_each_cpu(cpu, cpu_online_mask) { + struct msr *reg = per_cpu_ptr(msrs, cpu); + cpb_enabled |= !(!!(reg->l & BIT(25))); + } + + printk(KERN_INFO PFX "Core Performance Boosting: %s.\n", + (cpb_enabled ? "on" : "off")); + } + + rv = cpufreq_register_driver(&cpufreq_amd64_driver); + if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) { + unregister_cpu_notifier(&cpb_nb); + msrs_free(msrs); + msrs = NULL; + } + return rv; +} + +/* driver entry point for term */ +static void __exit powernowk8_exit(void) +{ + pr_debug("exit\n"); + + if (boot_cpu_has(X86_FEATURE_CPB)) { + msrs_free(msrs); + msrs = NULL; + + unregister_cpu_notifier(&cpb_nb); + } + + cpufreq_unregister_driver(&cpufreq_amd64_driver); +} + +MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and " + "Mark Langsdorf <mark.langsdorf@amd.com>"); +MODULE_DESCRIPTION("AMD Athlon 64 and Opteron processor frequency driver."); +MODULE_LICENSE("GPL"); + +late_initcall(powernowk8_init); +module_exit(powernowk8_exit); diff --git a/drivers/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h new file mode 100644 index 00000000000..3744d26cdc2 --- /dev/null +++ b/drivers/cpufreq/powernow-k8.h @@ -0,0 +1,222 @@ +/* + * (c) 2003-2006 Advanced Micro Devices, Inc. + * Your use of this code is subject to the terms and conditions of the + * GNU general public license version 2. See "COPYING" or + * http://www.gnu.org/licenses/gpl.html + */ + +enum pstate { + HW_PSTATE_INVALID = 0xff, + HW_PSTATE_0 = 0, + HW_PSTATE_1 = 1, + HW_PSTATE_2 = 2, + HW_PSTATE_3 = 3, + HW_PSTATE_4 = 4, + HW_PSTATE_5 = 5, + HW_PSTATE_6 = 6, + HW_PSTATE_7 = 7, +}; + +struct powernow_k8_data { + unsigned int cpu; + + u32 numps; /* number of p-states */ + u32 batps; /* number of p-states supported on battery */ + u32 max_hw_pstate; /* maximum legal hardware pstate */ + + /* these values are constant when the PSB is used to determine + * vid/fid pairings, but are modified during the ->target() call + * when ACPI is used */ + u32 rvo; /* ramp voltage offset */ + u32 irt; /* isochronous relief time */ + u32 vidmvs; /* usable value calculated from mvs */ + u32 vstable; /* voltage stabilization time, units 20 us */ + u32 plllock; /* pll lock time, units 1 us */ + u32 exttype; /* extended interface = 1 */ + + /* keep track of the current fid / vid or pstate */ + u32 currvid; + u32 currfid; + enum pstate currpstate; + + /* the powernow_table includes all frequency and vid/fid pairings: + * fid are the lower 8 bits of the index, vid are the upper 8 bits. + * frequency is in kHz */ + struct cpufreq_frequency_table *powernow_table; + + /* the acpi table needs to be kept. it's only available if ACPI was + * used to determine valid frequency/vid/fid states */ + struct acpi_processor_performance acpi_data; + + /* we need to keep track of associated cores, but let cpufreq + * handle hotplug events - so just point at cpufreq pol->cpus + * structure */ + struct cpumask *available_cores; +}; + +/* processor's cpuid instruction support */ +#define CPUID_PROCESSOR_SIGNATURE 1 /* function 1 */ +#define CPUID_XFAM 0x0ff00000 /* extended family */ +#define CPUID_XFAM_K8 0 +#define CPUID_XMOD 0x000f0000 /* extended model */ +#define CPUID_XMOD_REV_MASK 0x000c0000 +#define CPUID_XFAM_10H 0x00100000 /* family 0x10 */ +#define CPUID_USE_XFAM_XMOD 0x00000f00 +#define CPUID_GET_MAX_CAPABILITIES 0x80000000 +#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 +#define P_STATE_TRANSITION_CAPABLE 6 + +/* Model Specific Registers for p-state transitions. MSRs are 64-bit. For */ +/* writes (wrmsr - opcode 0f 30), the register number is placed in ecx, and */ +/* the value to write is placed in edx:eax. For reads (rdmsr - opcode 0f 32), */ +/* the register number is placed in ecx, and the data is returned in edx:eax. */ + +#define MSR_FIDVID_CTL 0xc0010041 +#define MSR_FIDVID_STATUS 0xc0010042 + +/* Field definitions within the FID VID Low Control MSR : */ +#define MSR_C_LO_INIT_FID_VID 0x00010000 +#define MSR_C_LO_NEW_VID 0x00003f00 +#define MSR_C_LO_NEW_FID 0x0000003f +#define MSR_C_LO_VID_SHIFT 8 + +/* Field definitions within the FID VID High Control MSR : */ +#define MSR_C_HI_STP_GNT_TO 0x000fffff + +/* Field definitions within the FID VID Low Status MSR : */ +#define MSR_S_LO_CHANGE_PENDING 0x80000000 /* cleared when completed */ +#define MSR_S_LO_MAX_RAMP_VID 0x3f000000 +#define MSR_S_LO_MAX_FID 0x003f0000 +#define MSR_S_LO_START_FID 0x00003f00 +#define MSR_S_LO_CURRENT_FID 0x0000003f + +/* Field definitions within the FID VID High Status MSR : */ +#define MSR_S_HI_MIN_WORKING_VID 0x3f000000 +#define MSR_S_HI_MAX_WORKING_VID 0x003f0000 +#define MSR_S_HI_START_VID 0x00003f00 +#define MSR_S_HI_CURRENT_VID 0x0000003f +#define MSR_C_HI_STP_GNT_BENIGN 0x00000001 + + +/* Hardware Pstate _PSS and MSR definitions */ +#define USE_HW_PSTATE 0x00000080 +#define HW_PSTATE_MASK 0x00000007 +#define HW_PSTATE_VALID_MASK 0x80000000 +#define HW_PSTATE_MAX_MASK 0x000000f0 +#define HW_PSTATE_MAX_SHIFT 4 +#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */ +#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */ +#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */ +#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */ + +/* define the two driver architectures */ +#define CPU_OPTERON 0 +#define CPU_HW_PSTATE 1 + + +/* + * There are restrictions frequencies have to follow: + * - only 1 entry in the low fid table ( <=1.4GHz ) + * - lowest entry in the high fid table must be >= 2 * the entry in the + * low fid table + * - lowest entry in the high fid table must be a <= 200MHz + 2 * the entry + * in the low fid table + * - the parts can only step at <= 200 MHz intervals, odd fid values are + * supported in revision G and later revisions. + * - lowest frequency must be >= interprocessor hypertransport link speed + * (only applies to MP systems obviously) + */ + +/* fids (frequency identifiers) are arranged in 2 tables - lo and hi */ +#define LO_FID_TABLE_TOP 7 /* fid values marking the boundary */ +#define HI_FID_TABLE_BOTTOM 8 /* between the low and high tables */ + +#define LO_VCOFREQ_TABLE_TOP 1400 /* corresponding vco frequency values */ +#define HI_VCOFREQ_TABLE_BOTTOM 1600 + +#define MIN_FREQ_RESOLUTION 200 /* fids jump by 2 matching freq jumps by 200 */ + +#define MAX_FID 0x2a /* Spec only gives FID values as far as 5 GHz */ +#define LEAST_VID 0x3e /* Lowest (numerically highest) useful vid value */ + +#define MIN_FREQ 800 /* Min and max freqs, per spec */ +#define MAX_FREQ 5000 + +#define INVALID_FID_MASK 0xffffffc0 /* not a valid fid if these bits are set */ +#define INVALID_VID_MASK 0xffffffc0 /* not a valid vid if these bits are set */ + +#define VID_OFF 0x3f + +#define STOP_GRANT_5NS 1 /* min poss memory access latency for voltage change */ + +#define PLL_LOCK_CONVERSION (1000/5) /* ms to ns, then divide by clock period */ + +#define MAXIMUM_VID_STEPS 1 /* Current cpus only allow a single step of 25mV */ +#define VST_UNITS_20US 20 /* Voltage Stabilization Time is in units of 20us */ + +/* + * Most values of interest are encoded in a single field of the _PSS + * entries: the "control" value. + */ + +#define IRT_SHIFT 30 +#define RVO_SHIFT 28 +#define EXT_TYPE_SHIFT 27 +#define PLL_L_SHIFT 20 +#define MVS_SHIFT 18 +#define VST_SHIFT 11 +#define VID_SHIFT 6 +#define IRT_MASK 3 +#define RVO_MASK 3 +#define EXT_TYPE_MASK 1 +#define PLL_L_MASK 0x7f +#define MVS_MASK 3 +#define VST_MASK 0x7f +#define VID_MASK 0x1f +#define FID_MASK 0x1f +#define EXT_VID_MASK 0x3f +#define EXT_FID_MASK 0x3f + + +/* + * Version 1.4 of the PSB table. This table is constructed by BIOS and is + * to tell the OS's power management driver which VIDs and FIDs are + * supported by this particular processor. + * If the data in the PSB / PST is wrong, then this driver will program the + * wrong values into hardware, which is very likely to lead to a crash. + */ + +#define PSB_ID_STRING "AMDK7PNOW!" +#define PSB_ID_STRING_LEN 10 + +#define PSB_VERSION_1_4 0x14 + +struct psb_s { + u8 signature[10]; + u8 tableversion; + u8 flags1; + u16 vstable; + u8 flags2; + u8 num_tables; + u32 cpuid; + u8 plllocktime; + u8 maxfid; + u8 maxvid; + u8 numps; +}; + +/* Pairs of fid/vid values are appended to the version 1.4 PSB table. */ +struct pst_s { + u8 fid; + u8 vid; +}; + +static int core_voltage_pre_transition(struct powernow_k8_data *data, + u32 reqvid, u32 regfid); +static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); +static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); + +static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); + +static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); +static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c new file mode 100644 index 00000000000..1e205e6b172 --- /dev/null +++ b/drivers/cpufreq/sc520_freq.c @@ -0,0 +1,192 @@ +/* + * sc520_freq.c: cpufreq driver for the AMD Elan sc520 + * + * Copyright (C) 2005 Sean Young <sean@mess.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Based on elanfreq.c + * + * 2005-03-30: - initial revision + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> + +#include <linux/delay.h> +#include <linux/cpufreq.h> +#include <linux/timex.h> +#include <linux/io.h> + +#include <asm/msr.h> + +#define MMCR_BASE 0xfffef000 /* The default base address */ +#define OFFS_CPUCTL 0x2 /* CPU Control Register */ + +static __u8 __iomem *cpuctl; + +#define PFX "sc520_freq: " + +static struct cpufreq_frequency_table sc520_freq_table[] = { + {0x01, 100000}, + {0x02, 133000}, + {0, CPUFREQ_TABLE_END}, +}; + +static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu) +{ + u8 clockspeed_reg = *cpuctl; + + switch (clockspeed_reg & 0x03) { + default: + printk(KERN_ERR PFX "error: cpuctl register has unexpected " + "value %02x\n", clockspeed_reg); + case 0x01: + return 100000; + case 0x02: + return 133000; + } +} + +static void sc520_freq_set_cpu_state(unsigned int state) +{ + + struct cpufreq_freqs freqs; + u8 clockspeed_reg; + + freqs.old = sc520_freq_get_cpu_frequency(0); + freqs.new = sc520_freq_table[state].frequency; + freqs.cpu = 0; /* AMD Elan is UP */ + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + pr_debug("attempting to set frequency to %i kHz\n", + sc520_freq_table[state].frequency); + + local_irq_disable(); + + clockspeed_reg = *cpuctl & ~0x03; + *cpuctl = clockspeed_reg | sc520_freq_table[state].index; + + local_irq_enable(); + + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); +}; + +static int sc520_freq_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &sc520_freq_table[0]); +} + +static int sc520_freq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = 0; + + if (cpufreq_frequency_table_target(policy, sc520_freq_table, + target_freq, relation, &newstate)) + return -EINVAL; + + sc520_freq_set_cpu_state(newstate); + + return 0; +} + + +/* + * Module init and exit code + */ + +static int sc520_freq_cpu_init(struct cpufreq_policy *policy) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + int result; + + /* capability check */ + if (c->x86_vendor != X86_VENDOR_AMD || + c->x86 != 4 || c->x86_model != 9) + return -ENODEV; + + /* cpuinfo and default policy values */ + policy->cpuinfo.transition_latency = 1000000; /* 1ms */ + policy->cur = sc520_freq_get_cpu_frequency(0); + + result = cpufreq_frequency_table_cpuinfo(policy, sc520_freq_table); + if (result) + return result; + + cpufreq_frequency_table_get_attr(sc520_freq_table, policy->cpu); + + return 0; +} + + +static int sc520_freq_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + + +static struct freq_attr *sc520_freq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + + +static struct cpufreq_driver sc520_freq_driver = { + .get = sc520_freq_get_cpu_frequency, + .verify = sc520_freq_verify, + .target = sc520_freq_target, + .init = sc520_freq_cpu_init, + .exit = sc520_freq_cpu_exit, + .name = "sc520_freq", + .owner = THIS_MODULE, + .attr = sc520_freq_attr, +}; + + +static int __init sc520_freq_init(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + int err; + + /* Test if we have the right hardware */ + if (c->x86_vendor != X86_VENDOR_AMD || + c->x86 != 4 || c->x86_model != 9) { + pr_debug("no Elan SC520 processor found!\n"); + return -ENODEV; + } + cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1); + if (!cpuctl) { + printk(KERN_ERR "sc520_freq: error: failed to remap memory\n"); + return -ENOMEM; + } + + err = cpufreq_register_driver(&sc520_freq_driver); + if (err) + iounmap(cpuctl); + + return err; +} + + +static void __exit sc520_freq_exit(void) +{ + cpufreq_unregister_driver(&sc520_freq_driver); + iounmap(cpuctl); +} + + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sean Young <sean@mess.org>"); +MODULE_DESCRIPTION("cpufreq driver for AMD's Elan sc520 CPU"); + +module_init(sc520_freq_init); +module_exit(sc520_freq_exit); + diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c new file mode 100644 index 00000000000..6ea3455def2 --- /dev/null +++ b/drivers/cpufreq/speedstep-centrino.c @@ -0,0 +1,633 @@ +/* + * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium + * M (part of the Centrino chipset). + * + * Since the original Pentium M, most new Intel CPUs support Enhanced + * SpeedStep. + * + * Despite the "SpeedStep" in the name, this is almost entirely unlike + * traditional SpeedStep. + * + * Modelled on speedstep.c + * + * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/sched.h> /* current */ +#include <linux/delay.h> +#include <linux/compiler.h> +#include <linux/gfp.h> + +#include <asm/msr.h> +#include <asm/processor.h> +#include <asm/cpufeature.h> + +#define PFX "speedstep-centrino: " +#define MAINTAINER "cpufreq@vger.kernel.org" + +#define INTEL_MSR_RANGE (0xffff) + +struct cpu_id +{ + __u8 x86; /* CPU family */ + __u8 x86_model; /* model */ + __u8 x86_mask; /* stepping */ +}; + +enum { + CPU_BANIAS, + CPU_DOTHAN_A1, + CPU_DOTHAN_A2, + CPU_DOTHAN_B0, + CPU_MP4HT_D0, + CPU_MP4HT_E0, +}; + +static const struct cpu_id cpu_ids[] = { + [CPU_BANIAS] = { 6, 9, 5 }, + [CPU_DOTHAN_A1] = { 6, 13, 1 }, + [CPU_DOTHAN_A2] = { 6, 13, 2 }, + [CPU_DOTHAN_B0] = { 6, 13, 6 }, + [CPU_MP4HT_D0] = {15, 3, 4 }, + [CPU_MP4HT_E0] = {15, 4, 1 }, +}; +#define N_IDS ARRAY_SIZE(cpu_ids) + +struct cpu_model +{ + const struct cpu_id *cpu_id; + const char *model_name; + unsigned max_freq; /* max clock in kHz */ + + struct cpufreq_frequency_table *op_points; /* clock/voltage pairs */ +}; +static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, + const struct cpu_id *x); + +/* Operating points for current CPU */ +static DEFINE_PER_CPU(struct cpu_model *, centrino_model); +static DEFINE_PER_CPU(const struct cpu_id *, centrino_cpu); + +static struct cpufreq_driver centrino_driver; + +#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE + +/* Computes the correct form for IA32_PERF_CTL MSR for a particular + frequency/voltage operating point; frequency in MHz, volts in mV. + This is stored as "index" in the structure. */ +#define OP(mhz, mv) \ + { \ + .frequency = (mhz) * 1000, \ + .index = (((mhz)/100) << 8) | ((mv - 700) / 16) \ + } + +/* + * These voltage tables were derived from the Intel Pentium M + * datasheet, document 25261202.pdf, Table 5. I have verified they + * are consistent with my IBM ThinkPad X31, which has a 1.3GHz Pentium + * M. + */ + +/* Ultra Low Voltage Intel Pentium M processor 900MHz (Banias) */ +static struct cpufreq_frequency_table banias_900[] = +{ + OP(600, 844), + OP(800, 988), + OP(900, 1004), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Ultra Low Voltage Intel Pentium M processor 1000MHz (Banias) */ +static struct cpufreq_frequency_table banias_1000[] = +{ + OP(600, 844), + OP(800, 972), + OP(900, 988), + OP(1000, 1004), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Low Voltage Intel Pentium M processor 1.10GHz (Banias) */ +static struct cpufreq_frequency_table banias_1100[] = +{ + OP( 600, 956), + OP( 800, 1020), + OP( 900, 1100), + OP(1000, 1164), + OP(1100, 1180), + { .frequency = CPUFREQ_TABLE_END } +}; + + +/* Low Voltage Intel Pentium M processor 1.20GHz (Banias) */ +static struct cpufreq_frequency_table banias_1200[] = +{ + OP( 600, 956), + OP( 800, 1004), + OP( 900, 1020), + OP(1000, 1100), + OP(1100, 1164), + OP(1200, 1180), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Intel Pentium M processor 1.30GHz (Banias) */ +static struct cpufreq_frequency_table banias_1300[] = +{ + OP( 600, 956), + OP( 800, 1260), + OP(1000, 1292), + OP(1200, 1356), + OP(1300, 1388), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Intel Pentium M processor 1.40GHz (Banias) */ +static struct cpufreq_frequency_table banias_1400[] = +{ + OP( 600, 956), + OP( 800, 1180), + OP(1000, 1308), + OP(1200, 1436), + OP(1400, 1484), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Intel Pentium M processor 1.50GHz (Banias) */ +static struct cpufreq_frequency_table banias_1500[] = +{ + OP( 600, 956), + OP( 800, 1116), + OP(1000, 1228), + OP(1200, 1356), + OP(1400, 1452), + OP(1500, 1484), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Intel Pentium M processor 1.60GHz (Banias) */ +static struct cpufreq_frequency_table banias_1600[] = +{ + OP( 600, 956), + OP( 800, 1036), + OP(1000, 1164), + OP(1200, 1276), + OP(1400, 1420), + OP(1600, 1484), + { .frequency = CPUFREQ_TABLE_END } +}; + +/* Intel Pentium M processor 1.70GHz (Banias) */ +static struct cpufreq_frequency_table banias_1700[] = +{ + OP( 600, 956), + OP( 800, 1004), + OP(1000, 1116), + OP(1200, 1228), + OP(1400, 1308), + OP(1700, 1484), + { .frequency = CPUFREQ_TABLE_END } +}; +#undef OP + +#define _BANIAS(cpuid, max, name) \ +{ .cpu_id = cpuid, \ + .model_name = "Intel(R) Pentium(R) M processor " name "MHz", \ + .max_freq = (max)*1000, \ + .op_points = banias_##max, \ +} +#define BANIAS(max) _BANIAS(&cpu_ids[CPU_BANIAS], max, #max) + +/* CPU models, their operating frequency range, and freq/voltage + operating points */ +static struct cpu_model models[] = +{ + _BANIAS(&cpu_ids[CPU_BANIAS], 900, " 900"), + BANIAS(1000), + BANIAS(1100), + BANIAS(1200), + BANIAS(1300), + BANIAS(1400), + BANIAS(1500), + BANIAS(1600), + BANIAS(1700), + + /* NULL model_name is a wildcard */ + { &cpu_ids[CPU_DOTHAN_A1], NULL, 0, NULL }, + { &cpu_ids[CPU_DOTHAN_A2], NULL, 0, NULL }, + { &cpu_ids[CPU_DOTHAN_B0], NULL, 0, NULL }, + { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL }, + { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL }, + + { NULL, } +}; +#undef _BANIAS +#undef BANIAS + +static int centrino_cpu_init_table(struct cpufreq_policy *policy) +{ + struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); + struct cpu_model *model; + + for(model = models; model->cpu_id != NULL; model++) + if (centrino_verify_cpu_id(cpu, model->cpu_id) && + (model->model_name == NULL || + strcmp(cpu->x86_model_id, model->model_name) == 0)) + break; + + if (model->cpu_id == NULL) { + /* No match at all */ + pr_debug("no support for CPU model \"%s\": " + "send /proc/cpuinfo to " MAINTAINER "\n", + cpu->x86_model_id); + return -ENOENT; + } + + if (model->op_points == NULL) { + /* Matched a non-match */ + pr_debug("no table support for CPU model \"%s\"\n", + cpu->x86_model_id); + pr_debug("try using the acpi-cpufreq driver\n"); + return -ENOENT; + } + + per_cpu(centrino_model, policy->cpu) = model; + + pr_debug("found \"%s\": max frequency: %dkHz\n", + model->model_name, model->max_freq); + + return 0; +} + +#else +static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) +{ + return -ENODEV; +} +#endif /* CONFIG_X86_SPEEDSTEP_CENTRINO_TABLE */ + +static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c, + const struct cpu_id *x) +{ + if ((c->x86 == x->x86) && + (c->x86_model == x->x86_model) && + (c->x86_mask == x->x86_mask)) + return 1; + return 0; +} + +/* To be called only after centrino_model is initialized */ +static unsigned extract_clock(unsigned msr, unsigned int cpu, int failsafe) +{ + int i; + + /* + * Extract clock in kHz from PERF_CTL value + * for centrino, as some DSDTs are buggy. + * Ideally, this can be done using the acpi_data structure. + */ + if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || + (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || + (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { + msr = (msr >> 8) & 0xff; + return msr * 100000; + } + + if ((!per_cpu(centrino_model, cpu)) || + (!per_cpu(centrino_model, cpu)->op_points)) + return 0; + + msr &= 0xffff; + for (i = 0; + per_cpu(centrino_model, cpu)->op_points[i].frequency + != CPUFREQ_TABLE_END; + i++) { + if (msr == per_cpu(centrino_model, cpu)->op_points[i].index) + return per_cpu(centrino_model, cpu)-> + op_points[i].frequency; + } + if (failsafe) + return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; + else + return 0; +} + +/* Return the current CPU frequency in kHz */ +static unsigned int get_cur_freq(unsigned int cpu) +{ + unsigned l, h; + unsigned clock_freq; + + rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h); + clock_freq = extract_clock(l, cpu, 0); + + if (unlikely(clock_freq == 0)) { + /* + * On some CPUs, we can see transient MSR values (which are + * not present in _PSS), while CPU is doing some automatic + * P-state transition (like TM2). Get the last freq set + * in PERF_CTL. + */ + rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h); + clock_freq = extract_clock(l, cpu, 1); + } + return clock_freq; +} + + +static int centrino_cpu_init(struct cpufreq_policy *policy) +{ + struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); + unsigned freq; + unsigned l, h; + int ret; + int i; + + /* Only Intel makes Enhanced Speedstep-capable CPUs */ + if (cpu->x86_vendor != X86_VENDOR_INTEL || + !cpu_has(cpu, X86_FEATURE_EST)) + return -ENODEV; + + if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) + centrino_driver.flags |= CPUFREQ_CONST_LOOPS; + + if (policy->cpu != 0) + return -ENODEV; + + for (i = 0; i < N_IDS; i++) + if (centrino_verify_cpu_id(cpu, &cpu_ids[i])) + break; + + if (i != N_IDS) + per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; + + if (!per_cpu(centrino_cpu, policy->cpu)) { + pr_debug("found unsupported CPU with " + "Enhanced SpeedStep: send /proc/cpuinfo to " + MAINTAINER "\n"); + return -ENODEV; + } + + if (centrino_cpu_init_table(policy)) { + return -ENODEV; + } + + /* Check to see if Enhanced SpeedStep is enabled, and try to + enable it if not. */ + rdmsr(MSR_IA32_MISC_ENABLE, l, h); + + if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { + l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP; + pr_debug("trying to enable Enhanced SpeedStep (%x)\n", l); + wrmsr(MSR_IA32_MISC_ENABLE, l, h); + + /* check to see if it stuck */ + rdmsr(MSR_IA32_MISC_ENABLE, l, h); + if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) { + printk(KERN_INFO PFX + "couldn't enable Enhanced SpeedStep\n"); + return -ENODEV; + } + } + + freq = get_cur_freq(policy->cpu); + policy->cpuinfo.transition_latency = 10000; + /* 10uS transition latency */ + policy->cur = freq; + + pr_debug("centrino_cpu_init: cur=%dkHz\n", policy->cur); + + ret = cpufreq_frequency_table_cpuinfo(policy, + per_cpu(centrino_model, policy->cpu)->op_points); + if (ret) + return (ret); + + cpufreq_frequency_table_get_attr( + per_cpu(centrino_model, policy->cpu)->op_points, policy->cpu); + + return 0; +} + +static int centrino_cpu_exit(struct cpufreq_policy *policy) +{ + unsigned int cpu = policy->cpu; + + if (!per_cpu(centrino_model, cpu)) + return -ENODEV; + + cpufreq_frequency_table_put_attr(cpu); + + per_cpu(centrino_model, cpu) = NULL; + + return 0; +} + +/** + * centrino_verify - verifies a new CPUFreq policy + * @policy: new policy + * + * Limit must be within this model's frequency range at least one + * border included. + */ +static int centrino_verify (struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, + per_cpu(centrino_model, policy->cpu)->op_points); +} + +/** + * centrino_setpolicy - set a new CPUFreq policy + * @policy: new policy + * @target_freq: the target frequency + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * + * Sets a new CPUFreq policy. + */ +static int centrino_target (struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = 0; + unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; + struct cpufreq_freqs freqs; + int retval = 0; + unsigned int j, k, first_cpu, tmp; + cpumask_var_t covered_cpus; + + if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) + return -ENOMEM; + + if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { + retval = -ENODEV; + goto out; + } + + if (unlikely(cpufreq_frequency_table_target(policy, + per_cpu(centrino_model, cpu)->op_points, + target_freq, + relation, + &newstate))) { + retval = -EINVAL; + goto out; + } + + first_cpu = 1; + for_each_cpu(j, policy->cpus) { + int good_cpu; + + /* cpufreq holds the hotplug lock, so we are safe here */ + if (!cpu_online(j)) + continue; + + /* + * Support for SMP systems. + * Make sure we are running on CPU that wants to change freq + */ + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) + good_cpu = cpumask_any_and(policy->cpus, + cpu_online_mask); + else + good_cpu = j; + + if (good_cpu >= nr_cpu_ids) { + pr_debug("couldn't limit to CPUs in this domain\n"); + retval = -EAGAIN; + if (first_cpu) { + /* We haven't started the transition yet. */ + goto out; + } + break; + } + + msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; + + if (first_cpu) { + rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); + if (msr == (oldmsr & 0xffff)) { + pr_debug("no change needed - msr was and needs " + "to be %x\n", oldmsr); + retval = 0; + goto out; + } + + freqs.old = extract_clock(oldmsr, cpu, 0); + freqs.new = extract_clock(msr, cpu, 0); + + pr_debug("target=%dkHz old=%d new=%d msr=%04x\n", + target_freq, freqs.old, freqs.new, msr); + + for_each_cpu(k, policy->cpus) { + if (!cpu_online(k)) + continue; + freqs.cpu = k; + cpufreq_notify_transition(&freqs, + CPUFREQ_PRECHANGE); + } + + first_cpu = 0; + /* all but 16 LSB are reserved, treat them with care */ + oldmsr &= ~0xffff; + msr &= 0xffff; + oldmsr |= msr; + } + + wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h); + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) + break; + + cpumask_set_cpu(j, covered_cpus); + } + + for_each_cpu(k, policy->cpus) { + if (!cpu_online(k)) + continue; + freqs.cpu = k; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + + if (unlikely(retval)) { + /* + * We have failed halfway through the frequency change. + * We have sent callbacks to policy->cpus and + * MSRs have already been written on coverd_cpus. + * Best effort undo.. + */ + + for_each_cpu(j, covered_cpus) + wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h); + + tmp = freqs.new; + freqs.new = freqs.old; + freqs.old = tmp; + for_each_cpu(j, policy->cpus) { + if (!cpu_online(j)) + continue; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + } + retval = 0; + +out: + free_cpumask_var(covered_cpus); + return retval; +} + +static struct freq_attr* centrino_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver centrino_driver = { + .name = "centrino", /* should be speedstep-centrino, + but there's a 16 char limit */ + .init = centrino_cpu_init, + .exit = centrino_cpu_exit, + .verify = centrino_verify, + .target = centrino_target, + .get = get_cur_freq, + .attr = centrino_attr, + .owner = THIS_MODULE, +}; + + +/** + * centrino_init - initializes the Enhanced SpeedStep CPUFreq driver + * + * Initializes the Enhanced SpeedStep support. Returns -ENODEV on + * unsupported devices, -ENOENT if there's no voltage table for this + * particular CPU model, -EINVAL on problems during initiatization, + * and zero on success. + * + * This is quite picky. Not only does the CPU have to advertise the + * "est" flag in the cpuid capability flags, we look for a specific + * CPU model and stepping, and we need to have the exact model name in + * our voltage tables. That is, be paranoid about not releasing + * someone's valuable magic smoke. + */ +static int __init centrino_init(void) +{ + struct cpuinfo_x86 *cpu = &cpu_data(0); + + if (!cpu_has(cpu, X86_FEATURE_EST)) + return -ENODEV; + + return cpufreq_register_driver(¢rino_driver); +} + +static void __exit centrino_exit(void) +{ + cpufreq_unregister_driver(¢rino_driver); +} + +MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>"); +MODULE_DESCRIPTION ("Enhanced SpeedStep driver for Intel Pentium M processors."); +MODULE_LICENSE ("GPL"); + +late_initcall(centrino_init); +module_exit(centrino_exit); diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c new file mode 100644 index 00000000000..a748ce782fe --- /dev/null +++ b/drivers/cpufreq/speedstep-ich.c @@ -0,0 +1,448 @@ +/* + * (C) 2001 Dave Jones, Arjan van de ven. + * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> + * + * Licensed under the terms of the GNU GPL License version 2. + * Based upon reverse engineered information, and on Intel documentation + * for chipsets ICH2-M and ICH3-M. + * + * Many thanks to Ducrot Bruno for finding and fixing the last + * "missing link" for ICH2-M/ICH3-M support, and to Thomas Winkler + * for extensive testing. + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + + +/********************************************************************* + * SPEEDSTEP - DEFINITIONS * + *********************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/pci.h> +#include <linux/sched.h> + +#include "speedstep-lib.h" + + +/* speedstep_chipset: + * It is necessary to know which chipset is used. As accesses to + * this device occur at various places in this module, we need a + * static struct pci_dev * pointing to that device. + */ +static struct pci_dev *speedstep_chipset_dev; + + +/* speedstep_processor + */ +static enum speedstep_processor speedstep_processor; + +static u32 pmbase; + +/* + * There are only two frequency states for each processor. Values + * are in kHz for the time being. + */ +static struct cpufreq_frequency_table speedstep_freqs[] = { + {SPEEDSTEP_HIGH, 0}, + {SPEEDSTEP_LOW, 0}, + {0, CPUFREQ_TABLE_END}, +}; + + +/** + * speedstep_find_register - read the PMBASE address + * + * Returns: -ENODEV if no register could be found + */ +static int speedstep_find_register(void) +{ + if (!speedstep_chipset_dev) + return -ENODEV; + + /* get PMBASE */ + pci_read_config_dword(speedstep_chipset_dev, 0x40, &pmbase); + if (!(pmbase & 0x01)) { + printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); + return -ENODEV; + } + + pmbase &= 0xFFFFFFFE; + if (!pmbase) { + printk(KERN_ERR "speedstep-ich: could not find speedstep register\n"); + return -ENODEV; + } + + pr_debug("pmbase is 0x%x\n", pmbase); + return 0; +} + +/** + * speedstep_set_state - set the SpeedStep state + * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) + * + * Tries to change the SpeedStep state. Can be called from + * smp_call_function_single. + */ +static void speedstep_set_state(unsigned int state) +{ + u8 pm2_blk; + u8 value; + unsigned long flags; + + if (state > 0x1) + return; + + /* Disable IRQs */ + local_irq_save(flags); + + /* read state */ + value = inb(pmbase + 0x50); + + pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + + /* write new state */ + value &= 0xFE; + value |= state; + + pr_debug("writing 0x%x to pmbase 0x%x + 0x50\n", value, pmbase); + + /* Disable bus master arbitration */ + pm2_blk = inb(pmbase + 0x20); + pm2_blk |= 0x01; + outb(pm2_blk, (pmbase + 0x20)); + + /* Actual transition */ + outb(value, (pmbase + 0x50)); + + /* Restore bus master arbitration */ + pm2_blk &= 0xfe; + outb(pm2_blk, (pmbase + 0x20)); + + /* check if transition was successful */ + value = inb(pmbase + 0x50); + + /* Enable IRQs */ + local_irq_restore(flags); + + pr_debug("read at pmbase 0x%x + 0x50 returned 0x%x\n", pmbase, value); + + if (state == (value & 0x1)) + pr_debug("change to %u MHz succeeded\n", + speedstep_get_frequency(speedstep_processor) / 1000); + else + printk(KERN_ERR "cpufreq: change failed - I/O error\n"); + + return; +} + +/* Wrapper for smp_call_function_single. */ +static void _speedstep_set_state(void *_state) +{ + speedstep_set_state(*(unsigned int *)_state); +} + +/** + * speedstep_activate - activate SpeedStep control in the chipset + * + * Tries to activate the SpeedStep status and control registers. + * Returns -EINVAL on an unsupported chipset, and zero on success. + */ +static int speedstep_activate(void) +{ + u16 value = 0; + + if (!speedstep_chipset_dev) + return -EINVAL; + + pci_read_config_word(speedstep_chipset_dev, 0x00A0, &value); + if (!(value & 0x08)) { + value |= 0x08; + pr_debug("activating SpeedStep (TM) registers\n"); + pci_write_config_word(speedstep_chipset_dev, 0x00A0, value); + } + + return 0; +} + + +/** + * speedstep_detect_chipset - detect the Southbridge which contains SpeedStep logic + * + * Detects ICH2-M, ICH3-M and ICH4-M so far. The pci_dev points to + * the LPC bridge / PM module which contains all power-management + * functions. Returns the SPEEDSTEP_CHIPSET_-number for the detected + * chipset, or zero on failure. + */ +static unsigned int speedstep_detect_chipset(void) +{ + speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801DB_12, + PCI_ANY_ID, PCI_ANY_ID, + NULL); + if (speedstep_chipset_dev) + return 4; /* 4-M */ + + speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801CA_12, + PCI_ANY_ID, PCI_ANY_ID, + NULL); + if (speedstep_chipset_dev) + return 3; /* 3-M */ + + + speedstep_chipset_dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82801BA_10, + PCI_ANY_ID, PCI_ANY_ID, + NULL); + if (speedstep_chipset_dev) { + /* speedstep.c causes lockups on Dell Inspirons 8000 and + * 8100 which use a pretty old revision of the 82815 + * host brige. Abort on these systems. + */ + static struct pci_dev *hostbridge; + + hostbridge = pci_get_subsys(PCI_VENDOR_ID_INTEL, + PCI_DEVICE_ID_INTEL_82815_MC, + PCI_ANY_ID, PCI_ANY_ID, + NULL); + + if (!hostbridge) + return 2; /* 2-M */ + + if (hostbridge->revision < 5) { + pr_debug("hostbridge does not support speedstep\n"); + speedstep_chipset_dev = NULL; + pci_dev_put(hostbridge); + return 0; + } + + pci_dev_put(hostbridge); + return 2; /* 2-M */ + } + + return 0; +} + +static void get_freq_data(void *_speed) +{ + unsigned int *speed = _speed; + + *speed = speedstep_get_frequency(speedstep_processor); +} + +static unsigned int speedstep_get(unsigned int cpu) +{ + unsigned int speed; + + /* You're supposed to ensure CPU is online. */ + if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0) + BUG(); + + pr_debug("detected %u kHz as current frequency\n", speed); + return speed; +} + +/** + * speedstep_target - set a new CPUFreq policy + * @policy: new policy + * @target_freq: the target frequency + * @relation: how that frequency relates to achieved frequency + * (CPUFREQ_RELATION_L or CPUFREQ_RELATION_H) + * + * Sets a new CPUFreq policy. + */ +static int speedstep_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + unsigned int newstate = 0, policy_cpu; + struct cpufreq_freqs freqs; + int i; + + if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], + target_freq, relation, &newstate)) + return -EINVAL; + + policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); + freqs.old = speedstep_get(policy_cpu); + freqs.new = speedstep_freqs[newstate].frequency; + freqs.cpu = policy->cpu; + + pr_debug("transiting from %u to %u kHz\n", freqs.old, freqs.new); + + /* no transition necessary */ + if (freqs.old == freqs.new) + return 0; + + for_each_cpu(i, policy->cpus) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + } + + smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, + true); + + for_each_cpu(i, policy->cpus) { + freqs.cpu = i; + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + } + + return 0; +} + + +/** + * speedstep_verify - verifies a new CPUFreq policy + * @policy: new policy + * + * Limit must be within speedstep_low_freq and speedstep_high_freq, with + * at least one border included. + */ +static int speedstep_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); +} + +struct get_freqs { + struct cpufreq_policy *policy; + int ret; +}; + +static void get_freqs_on_cpu(void *_get_freqs) +{ + struct get_freqs *get_freqs = _get_freqs; + + get_freqs->ret = + speedstep_get_freqs(speedstep_processor, + &speedstep_freqs[SPEEDSTEP_LOW].frequency, + &speedstep_freqs[SPEEDSTEP_HIGH].frequency, + &get_freqs->policy->cpuinfo.transition_latency, + &speedstep_set_state); +} + +static int speedstep_cpu_init(struct cpufreq_policy *policy) +{ + int result; + unsigned int policy_cpu, speed; + struct get_freqs gf; + + /* only run on CPU to be set, or on its sibling */ +#ifdef CONFIG_SMP + cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); +#endif + policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); + + /* detect low and high frequency and transition latency */ + gf.policy = policy; + smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1); + if (gf.ret) + return gf.ret; + + /* get current speed setting */ + speed = speedstep_get(policy_cpu); + if (!speed) + return -EIO; + + pr_debug("currently at %s speed setting - %i MHz\n", + (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) + ? "low" : "high", + (speed / 1000)); + + /* cpuinfo and default policy values */ + policy->cur = speed; + + result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); + if (result) + return result; + + cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); + + return 0; +} + + +static int speedstep_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + +static struct freq_attr *speedstep_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + + +static struct cpufreq_driver speedstep_driver = { + .name = "speedstep-ich", + .verify = speedstep_verify, + .target = speedstep_target, + .init = speedstep_cpu_init, + .exit = speedstep_cpu_exit, + .get = speedstep_get, + .owner = THIS_MODULE, + .attr = speedstep_attr, +}; + + +/** + * speedstep_init - initializes the SpeedStep CPUFreq driver + * + * Initializes the SpeedStep support. Returns -ENODEV on unsupported + * devices, -EINVAL on problems during initiatization, and zero on + * success. + */ +static int __init speedstep_init(void) +{ + /* detect processor */ + speedstep_processor = speedstep_detect_processor(); + if (!speedstep_processor) { + pr_debug("Intel(R) SpeedStep(TM) capable processor " + "not found\n"); + return -ENODEV; + } + + /* detect chipset */ + if (!speedstep_detect_chipset()) { + pr_debug("Intel(R) SpeedStep(TM) for this chipset not " + "(yet) available.\n"); + return -ENODEV; + } + + /* activate speedstep support */ + if (speedstep_activate()) { + pci_dev_put(speedstep_chipset_dev); + return -EINVAL; + } + + if (speedstep_find_register()) + return -ENODEV; + + return cpufreq_register_driver(&speedstep_driver); +} + + +/** + * speedstep_exit - unregisters SpeedStep support + * + * Unregisters SpeedStep support. + */ +static void __exit speedstep_exit(void) +{ + pci_dev_put(speedstep_chipset_dev); + cpufreq_unregister_driver(&speedstep_driver); +} + + +MODULE_AUTHOR("Dave Jones <davej@redhat.com>, " + "Dominik Brodowski <linux@brodo.de>"); +MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets " + "with ICH-M southbridges."); +MODULE_LICENSE("GPL"); + +module_init(speedstep_init); +module_exit(speedstep_exit); diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c new file mode 100644 index 00000000000..8af2d2fd9d5 --- /dev/null +++ b/drivers/cpufreq/speedstep-lib.c @@ -0,0 +1,478 @@ +/* + * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> + * + * Licensed under the terms of the GNU GPL License version 2. + * + * Library for common functions for Intel SpeedStep v.1 and v.2 support + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/cpufreq.h> + +#include <asm/msr.h> +#include <asm/tsc.h> +#include "speedstep-lib.h" + +#define PFX "speedstep-lib: " + +#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK +static int relaxed_check; +#else +#define relaxed_check 0 +#endif + +/********************************************************************* + * GET PROCESSOR CORE SPEED IN KHZ * + *********************************************************************/ + +static unsigned int pentium3_get_frequency(enum speedstep_processor processor) +{ + /* See table 14 of p3_ds.pdf and table 22 of 29834003.pdf */ + struct { + unsigned int ratio; /* Frequency Multiplier (x10) */ + u8 bitmap; /* power on configuration bits + [27, 25:22] (in MSR 0x2a) */ + } msr_decode_mult[] = { + { 30, 0x01 }, + { 35, 0x05 }, + { 40, 0x02 }, + { 45, 0x06 }, + { 50, 0x00 }, + { 55, 0x04 }, + { 60, 0x0b }, + { 65, 0x0f }, + { 70, 0x09 }, + { 75, 0x0d }, + { 80, 0x0a }, + { 85, 0x26 }, + { 90, 0x20 }, + { 100, 0x2b }, + { 0, 0xff } /* error or unknown value */ + }; + + /* PIII(-M) FSB settings: see table b1-b of 24547206.pdf */ + struct { + unsigned int value; /* Front Side Bus speed in MHz */ + u8 bitmap; /* power on configuration bits [18: 19] + (in MSR 0x2a) */ + } msr_decode_fsb[] = { + { 66, 0x0 }, + { 100, 0x2 }, + { 133, 0x1 }, + { 0, 0xff} + }; + + u32 msr_lo, msr_tmp; + int i = 0, j = 0; + + /* read MSR 0x2a - we only need the low 32 bits */ + rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); + pr_debug("P3 - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + msr_tmp = msr_lo; + + /* decode the FSB */ + msr_tmp &= 0x00c0000; + msr_tmp >>= 18; + while (msr_tmp != msr_decode_fsb[i].bitmap) { + if (msr_decode_fsb[i].bitmap == 0xff) + return 0; + i++; + } + + /* decode the multiplier */ + if (processor == SPEEDSTEP_CPU_PIII_C_EARLY) { + pr_debug("workaround for early PIIIs\n"); + msr_lo &= 0x03c00000; + } else + msr_lo &= 0x0bc00000; + msr_lo >>= 22; + while (msr_lo != msr_decode_mult[j].bitmap) { + if (msr_decode_mult[j].bitmap == 0xff) + return 0; + j++; + } + + pr_debug("speed is %u\n", + (msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100)); + + return msr_decode_mult[j].ratio * msr_decode_fsb[i].value * 100; +} + + +static unsigned int pentiumM_get_frequency(void) +{ + u32 msr_lo, msr_tmp; + + rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); + pr_debug("PM - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp); + + /* see table B-2 of 24547212.pdf */ + if (msr_lo & 0x00040000) { + printk(KERN_DEBUG PFX "PM - invalid FSB: 0x%x 0x%x\n", + msr_lo, msr_tmp); + return 0; + } + + msr_tmp = (msr_lo >> 22) & 0x1f; + pr_debug("bits 22-26 are 0x%x, speed is %u\n", + msr_tmp, (msr_tmp * 100 * 1000)); + + return msr_tmp * 100 * 1000; +} + +static unsigned int pentium_core_get_frequency(void) +{ + u32 fsb = 0; + u32 msr_lo, msr_tmp; + int ret; + + rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp); + /* see table B-2 of 25366920.pdf */ + switch (msr_lo & 0x07) { + case 5: + fsb = 100000; + break; + case 1: + fsb = 133333; + break; + case 3: + fsb = 166667; + break; + case 2: + fsb = 200000; + break; + case 0: + fsb = 266667; + break; + case 4: + fsb = 333333; + break; + default: + printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value"); + } + + rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp); + pr_debug("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", + msr_lo, msr_tmp); + + msr_tmp = (msr_lo >> 22) & 0x1f; + pr_debug("bits 22-26 are 0x%x, speed is %u\n", + msr_tmp, (msr_tmp * fsb)); + + ret = (msr_tmp * fsb); + return ret; +} + + +static unsigned int pentium4_get_frequency(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + u32 msr_lo, msr_hi, mult; + unsigned int fsb = 0; + unsigned int ret; + u8 fsb_code; + + /* Pentium 4 Model 0 and 1 do not have the Core Clock Frequency + * to System Bus Frequency Ratio Field in the Processor Frequency + * Configuration Register of the MSR. Therefore the current + * frequency cannot be calculated and has to be measured. + */ + if (c->x86_model < 2) + return cpu_khz; + + rdmsr(0x2c, msr_lo, msr_hi); + + pr_debug("P4 - MSR_EBC_FREQUENCY_ID: 0x%x 0x%x\n", msr_lo, msr_hi); + + /* decode the FSB: see IA-32 Intel (C) Architecture Software + * Developer's Manual, Volume 3: System Prgramming Guide, + * revision #12 in Table B-1: MSRs in the Pentium 4 and + * Intel Xeon Processors, on page B-4 and B-5. + */ + fsb_code = (msr_lo >> 16) & 0x7; + switch (fsb_code) { + case 0: + fsb = 100 * 1000; + break; + case 1: + fsb = 13333 * 10; + break; + case 2: + fsb = 200 * 1000; + break; + } + + if (!fsb) + printk(KERN_DEBUG PFX "couldn't detect FSB speed. " + "Please send an e-mail to <linux@brodo.de>\n"); + + /* Multiplier. */ + mult = msr_lo >> 24; + + pr_debug("P4 - FSB %u kHz; Multiplier %u; Speed %u kHz\n", + fsb, mult, (fsb * mult)); + + ret = (fsb * mult); + return ret; +} + + +/* Warning: may get called from smp_call_function_single. */ +unsigned int speedstep_get_frequency(enum speedstep_processor processor) +{ + switch (processor) { + case SPEEDSTEP_CPU_PCORE: + return pentium_core_get_frequency(); + case SPEEDSTEP_CPU_PM: + return pentiumM_get_frequency(); + case SPEEDSTEP_CPU_P4D: + case SPEEDSTEP_CPU_P4M: + return pentium4_get_frequency(); + case SPEEDSTEP_CPU_PIII_T: + case SPEEDSTEP_CPU_PIII_C: + case SPEEDSTEP_CPU_PIII_C_EARLY: + return pentium3_get_frequency(processor); + default: + return 0; + }; + return 0; +} +EXPORT_SYMBOL_GPL(speedstep_get_frequency); + + +/********************************************************************* + * DETECT SPEEDSTEP-CAPABLE PROCESSOR * + *********************************************************************/ + +unsigned int speedstep_detect_processor(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + u32 ebx, msr_lo, msr_hi; + + pr_debug("x86: %x, model: %x\n", c->x86, c->x86_model); + + if ((c->x86_vendor != X86_VENDOR_INTEL) || + ((c->x86 != 6) && (c->x86 != 0xF))) + return 0; + + if (c->x86 == 0xF) { + /* Intel Mobile Pentium 4-M + * or Intel Mobile Pentium 4 with 533 MHz FSB */ + if (c->x86_model != 2) + return 0; + + ebx = cpuid_ebx(0x00000001); + ebx &= 0x000000FF; + + pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask); + + switch (c->x86_mask) { + case 4: + /* + * B-stepping [M-P4-M] + * sample has ebx = 0x0f, production has 0x0e. + */ + if ((ebx == 0x0e) || (ebx == 0x0f)) + return SPEEDSTEP_CPU_P4M; + break; + case 7: + /* + * C-stepping [M-P4-M] + * needs to have ebx=0x0e, else it's a celeron: + * cf. 25130917.pdf / page 7, footnote 5 even + * though 25072120.pdf / page 7 doesn't say + * samples are only of B-stepping... + */ + if (ebx == 0x0e) + return SPEEDSTEP_CPU_P4M; + break; + case 9: + /* + * D-stepping [M-P4-M or M-P4/533] + * + * this is totally strange: CPUID 0x0F29 is + * used by M-P4-M, M-P4/533 and(!) Celeron CPUs. + * The latter need to be sorted out as they don't + * support speedstep. + * Celerons with CPUID 0x0F29 may have either + * ebx=0x8 or 0xf -- 25130917.pdf doesn't say anything + * specific. + * M-P4-Ms may have either ebx=0xe or 0xf [see above] + * M-P4/533 have either ebx=0xe or 0xf. [25317607.pdf] + * also, M-P4M HTs have ebx=0x8, too + * For now, they are distinguished by the model_id + * string + */ + if ((ebx == 0x0e) || + (strstr(c->x86_model_id, + "Mobile Intel(R) Pentium(R) 4") != NULL)) + return SPEEDSTEP_CPU_P4M; + break; + default: + break; + } + return 0; + } + + switch (c->x86_model) { + case 0x0B: /* Intel PIII [Tualatin] */ + /* cpuid_ebx(1) is 0x04 for desktop PIII, + * 0x06 for mobile PIII-M */ + ebx = cpuid_ebx(0x00000001); + pr_debug("ebx is %x\n", ebx); + + ebx &= 0x000000FF; + + if (ebx != 0x06) + return 0; + + /* So far all PIII-M processors support SpeedStep. See + * Intel's 24540640.pdf of June 2003 + */ + return SPEEDSTEP_CPU_PIII_T; + + case 0x08: /* Intel PIII [Coppermine] */ + + /* all mobile PIII Coppermines have FSB 100 MHz + * ==> sort out a few desktop PIIIs. */ + rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_hi); + pr_debug("Coppermine: MSR_IA32_EBL_CR_POWERON is 0x%x, 0x%x\n", + msr_lo, msr_hi); + msr_lo &= 0x00c0000; + if (msr_lo != 0x0080000) + return 0; + + /* + * If the processor is a mobile version, + * platform ID has bit 50 set + * it has SpeedStep technology if either + * bit 56 or 57 is set + */ + rdmsr(MSR_IA32_PLATFORM_ID, msr_lo, msr_hi); + pr_debug("Coppermine: MSR_IA32_PLATFORM ID is 0x%x, 0x%x\n", + msr_lo, msr_hi); + if ((msr_hi & (1<<18)) && + (relaxed_check ? 1 : (msr_hi & (3<<24)))) { + if (c->x86_mask == 0x01) { + pr_debug("early PIII version\n"); + return SPEEDSTEP_CPU_PIII_C_EARLY; + } else + return SPEEDSTEP_CPU_PIII_C; + } + + default: + return 0; + } +} +EXPORT_SYMBOL_GPL(speedstep_detect_processor); + + +/********************************************************************* + * DETECT SPEEDSTEP SPEEDS * + *********************************************************************/ + +unsigned int speedstep_get_freqs(enum speedstep_processor processor, + unsigned int *low_speed, + unsigned int *high_speed, + unsigned int *transition_latency, + void (*set_state) (unsigned int state)) +{ + unsigned int prev_speed; + unsigned int ret = 0; + unsigned long flags; + struct timeval tv1, tv2; + + if ((!processor) || (!low_speed) || (!high_speed) || (!set_state)) + return -EINVAL; + + pr_debug("trying to determine both speeds\n"); + + /* get current speed */ + prev_speed = speedstep_get_frequency(processor); + if (!prev_speed) + return -EIO; + + pr_debug("previous speed is %u\n", prev_speed); + + local_irq_save(flags); + + /* switch to low state */ + set_state(SPEEDSTEP_LOW); + *low_speed = speedstep_get_frequency(processor); + if (!*low_speed) { + ret = -EIO; + goto out; + } + + pr_debug("low speed is %u\n", *low_speed); + + /* start latency measurement */ + if (transition_latency) + do_gettimeofday(&tv1); + + /* switch to high state */ + set_state(SPEEDSTEP_HIGH); + + /* end latency measurement */ + if (transition_latency) + do_gettimeofday(&tv2); + + *high_speed = speedstep_get_frequency(processor); + if (!*high_speed) { + ret = -EIO; + goto out; + } + + pr_debug("high speed is %u\n", *high_speed); + + if (*low_speed == *high_speed) { + ret = -ENODEV; + goto out; + } + + /* switch to previous state, if necessary */ + if (*high_speed != prev_speed) + set_state(SPEEDSTEP_LOW); + + if (transition_latency) { + *transition_latency = (tv2.tv_sec - tv1.tv_sec) * USEC_PER_SEC + + tv2.tv_usec - tv1.tv_usec; + pr_debug("transition latency is %u uSec\n", *transition_latency); + + /* convert uSec to nSec and add 20% for safety reasons */ + *transition_latency *= 1200; + + /* check if the latency measurement is too high or too low + * and set it to a safe value (500uSec) in that case + */ + if (*transition_latency > 10000000 || + *transition_latency < 50000) { + printk(KERN_WARNING PFX "frequency transition " + "measured seems out of range (%u " + "nSec), falling back to a safe one of" + "%u nSec.\n", + *transition_latency, 500000); + *transition_latency = 500000; + } + } + +out: + local_irq_restore(flags); + return ret; +} +EXPORT_SYMBOL_GPL(speedstep_get_freqs); + +#ifdef CONFIG_X86_SPEEDSTEP_RELAXED_CAP_CHECK +module_param(relaxed_check, int, 0444); +MODULE_PARM_DESC(relaxed_check, + "Don't do all checks for speedstep capability."); +#endif + +MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>"); +MODULE_DESCRIPTION("Library for Intel SpeedStep 1 or 2 cpufreq drivers."); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/speedstep-lib.h b/drivers/cpufreq/speedstep-lib.h new file mode 100644 index 00000000000..70d9cea1219 --- /dev/null +++ b/drivers/cpufreq/speedstep-lib.h @@ -0,0 +1,49 @@ +/* + * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> + * + * Licensed under the terms of the GNU GPL License version 2. + * + * Library for common functions for Intel SpeedStep v.1 and v.2 support + * + * BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous* + */ + + + +/* processors */ +enum speedstep_processor { + SPEEDSTEP_CPU_PIII_C_EARLY = 0x00000001, /* Coppermine core */ + SPEEDSTEP_CPU_PIII_C = 0x00000002, /* Coppermine core */ + SPEEDSTEP_CPU_PIII_T = 0x00000003, /* Tualatin core */ + SPEEDSTEP_CPU_P4M = 0x00000004, /* P4-M */ +/* the following processors are not speedstep-capable and are not auto-detected + * in speedstep_detect_processor(). However, their speed can be detected using + * the speedstep_get_frequency() call. */ + SPEEDSTEP_CPU_PM = 0xFFFFFF03, /* Pentium M */ + SPEEDSTEP_CPU_P4D = 0xFFFFFF04, /* desktop P4 */ + SPEEDSTEP_CPU_PCORE = 0xFFFFFF05, /* Core */ +}; + +/* speedstep states -- only two of them */ + +#define SPEEDSTEP_HIGH 0x00000000 +#define SPEEDSTEP_LOW 0x00000001 + + +/* detect a speedstep-capable processor */ +extern enum speedstep_processor speedstep_detect_processor(void); + +/* detect the current speed (in khz) of the processor */ +extern unsigned int speedstep_get_frequency(enum speedstep_processor processor); + + +/* detect the low and high speeds of the processor. The callback + * set_state"'s first argument is either SPEEDSTEP_HIGH or + * SPEEDSTEP_LOW; the second argument is zero so that no + * cpufreq_notify_transition calls are initiated. + */ +extern unsigned int speedstep_get_freqs(enum speedstep_processor processor, + unsigned int *low_speed, + unsigned int *high_speed, + unsigned int *transition_latency, + void (*set_state) (unsigned int state)); diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c new file mode 100644 index 00000000000..c76ead3490b --- /dev/null +++ b/drivers/cpufreq/speedstep-smi.c @@ -0,0 +1,464 @@ +/* + * Intel SpeedStep SMI driver. + * + * (C) 2003 Hiroshi Miura <miura@da-cha.org> + * + * Licensed under the terms of the GNU GPL License version 2. + * + */ + + +/********************************************************************* + * SPEEDSTEP - DEFINITIONS * + *********************************************************************/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/cpufreq.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <asm/ist.h> + +#include "speedstep-lib.h" + +/* speedstep system management interface port/command. + * + * These parameters are got from IST-SMI BIOS call. + * If user gives it, these are used. + * + */ +static int smi_port; +static int smi_cmd; +static unsigned int smi_sig; + +/* info about the processor */ +static enum speedstep_processor speedstep_processor; + +/* + * There are only two frequency states for each processor. Values + * are in kHz for the time being. + */ +static struct cpufreq_frequency_table speedstep_freqs[] = { + {SPEEDSTEP_HIGH, 0}, + {SPEEDSTEP_LOW, 0}, + {0, CPUFREQ_TABLE_END}, +}; + +#define GET_SPEEDSTEP_OWNER 0 +#define GET_SPEEDSTEP_STATE 1 +#define SET_SPEEDSTEP_STATE 2 +#define GET_SPEEDSTEP_FREQS 4 + +/* how often shall the SMI call be tried if it failed, e.g. because + * of DMA activity going on? */ +#define SMI_TRIES 5 + +/** + * speedstep_smi_ownership + */ +static int speedstep_smi_ownership(void) +{ + u32 command, result, magic, dummy; + u32 function = GET_SPEEDSTEP_OWNER; + unsigned char magic_data[] = "Copyright (c) 1999 Intel Corporation"; + + command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); + magic = virt_to_phys(magic_data); + + pr_debug("trying to obtain ownership with command %x at port %x\n", + command, smi_port); + + __asm__ __volatile__( + "push %%ebp\n" + "out %%al, (%%dx)\n" + "pop %%ebp\n" + : "=D" (result), + "=a" (dummy), "=b" (dummy), "=c" (dummy), "=d" (dummy), + "=S" (dummy) + : "a" (command), "b" (function), "c" (0), "d" (smi_port), + "D" (0), "S" (magic) + : "memory" + ); + + pr_debug("result is %x\n", result); + + return result; +} + +/** + * speedstep_smi_get_freqs - get SpeedStep preferred & current freq. + * @low: the low frequency value is placed here + * @high: the high frequency value is placed here + * + * Only available on later SpeedStep-enabled systems, returns false results or + * even hangs [cf. bugme.osdl.org # 1422] on earlier systems. Empirical testing + * shows that the latter occurs if !(ist_info.event & 0xFFFF). + */ +static int speedstep_smi_get_freqs(unsigned int *low, unsigned int *high) +{ + u32 command, result = 0, edi, high_mhz, low_mhz, dummy; + u32 state = 0; + u32 function = GET_SPEEDSTEP_FREQS; + + if (!(ist_info.event & 0xFFFF)) { + pr_debug("bug #1422 -- can't read freqs from BIOS\n"); + return -ENODEV; + } + + command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); + + pr_debug("trying to determine frequencies with command %x at port %x\n", + command, smi_port); + + __asm__ __volatile__( + "push %%ebp\n" + "out %%al, (%%dx)\n" + "pop %%ebp" + : "=a" (result), + "=b" (high_mhz), + "=c" (low_mhz), + "=d" (state), "=D" (edi), "=S" (dummy) + : "a" (command), + "b" (function), + "c" (state), + "d" (smi_port), "S" (0), "D" (0) + ); + + pr_debug("result %x, low_freq %u, high_freq %u\n", + result, low_mhz, high_mhz); + + /* abort if results are obviously incorrect... */ + if ((high_mhz + low_mhz) < 600) + return -EINVAL; + + *high = high_mhz * 1000; + *low = low_mhz * 1000; + + return result; +} + +/** + * speedstep_get_state - set the SpeedStep state + * @state: processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) + * + */ +static int speedstep_get_state(void) +{ + u32 function = GET_SPEEDSTEP_STATE; + u32 result, state, edi, command, dummy; + + command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); + + pr_debug("trying to determine current setting with command %x " + "at port %x\n", command, smi_port); + + __asm__ __volatile__( + "push %%ebp\n" + "out %%al, (%%dx)\n" + "pop %%ebp\n" + : "=a" (result), + "=b" (state), "=D" (edi), + "=c" (dummy), "=d" (dummy), "=S" (dummy) + : "a" (command), "b" (function), "c" (0), + "d" (smi_port), "S" (0), "D" (0) + ); + + pr_debug("state is %x, result is %x\n", state, result); + + return state & 1; +} + + +/** + * speedstep_set_state - set the SpeedStep state + * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) + * + */ +static void speedstep_set_state(unsigned int state) +{ + unsigned int result = 0, command, new_state, dummy; + unsigned long flags; + unsigned int function = SET_SPEEDSTEP_STATE; + unsigned int retry = 0; + + if (state > 0x1) + return; + + /* Disable IRQs */ + local_irq_save(flags); + + command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff); + + pr_debug("trying to set frequency to state %u " + "with command %x at port %x\n", + state, command, smi_port); + + do { + if (retry) { + pr_debug("retry %u, previous result %u, waiting...\n", + retry, result); + mdelay(retry * 50); + } + retry++; + __asm__ __volatile__( + "push %%ebp\n" + "out %%al, (%%dx)\n" + "pop %%ebp" + : "=b" (new_state), "=D" (result), + "=c" (dummy), "=a" (dummy), + "=d" (dummy), "=S" (dummy) + : "a" (command), "b" (function), "c" (state), + "d" (smi_port), "S" (0), "D" (0) + ); + } while ((new_state != state) && (retry <= SMI_TRIES)); + + /* enable IRQs */ + local_irq_restore(flags); + + if (new_state == state) + pr_debug("change to %u MHz succeeded after %u tries " + "with result %u\n", + (speedstep_freqs[new_state].frequency / 1000), + retry, result); + else + printk(KERN_ERR "cpufreq: change to state %u " + "failed with new_state %u and result %u\n", + state, new_state, result); + + return; +} + + +/** + * speedstep_target - set a new CPUFreq policy + * @policy: new policy + * @target_freq: new freq + * @relation: + * + * Sets a new CPUFreq policy/freq. + */ +static int speedstep_target(struct cpufreq_policy *policy, + unsigned int target_freq, unsigned int relation) +{ + unsigned int newstate = 0; + struct cpufreq_freqs freqs; + + if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], + target_freq, relation, &newstate)) + return -EINVAL; + + freqs.old = speedstep_freqs[speedstep_get_state()].frequency; + freqs.new = speedstep_freqs[newstate].frequency; + freqs.cpu = 0; /* speedstep.c is UP only driver */ + + if (freqs.old == freqs.new) + return 0; + + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + speedstep_set_state(newstate); + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + + +/** + * speedstep_verify - verifies a new CPUFreq policy + * @policy: new policy + * + * Limit must be within speedstep_low_freq and speedstep_high_freq, with + * at least one border included. + */ +static int speedstep_verify(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); +} + + +static int speedstep_cpu_init(struct cpufreq_policy *policy) +{ + int result; + unsigned int speed, state; + unsigned int *low, *high; + + /* capability check */ + if (policy->cpu != 0) + return -ENODEV; + + result = speedstep_smi_ownership(); + if (result) { + pr_debug("fails in acquiring ownership of a SMI interface.\n"); + return -EINVAL; + } + + /* detect low and high frequency */ + low = &speedstep_freqs[SPEEDSTEP_LOW].frequency; + high = &speedstep_freqs[SPEEDSTEP_HIGH].frequency; + + result = speedstep_smi_get_freqs(low, high); + if (result) { + /* fall back to speedstep_lib.c dection mechanism: + * try both states out */ + pr_debug("could not detect low and high frequencies " + "by SMI call.\n"); + result = speedstep_get_freqs(speedstep_processor, + low, high, + NULL, + &speedstep_set_state); + + if (result) { + pr_debug("could not detect two different speeds" + " -- aborting.\n"); + return result; + } else + pr_debug("workaround worked.\n"); + } + + /* get current speed setting */ + state = speedstep_get_state(); + speed = speedstep_freqs[state].frequency; + + pr_debug("currently at %s speed setting - %i MHz\n", + (speed == speedstep_freqs[SPEEDSTEP_LOW].frequency) + ? "low" : "high", + (speed / 1000)); + + /* cpuinfo and default policy values */ + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + policy->cur = speed; + + result = cpufreq_frequency_table_cpuinfo(policy, speedstep_freqs); + if (result) + return result; + + cpufreq_frequency_table_get_attr(speedstep_freqs, policy->cpu); + + return 0; +} + +static int speedstep_cpu_exit(struct cpufreq_policy *policy) +{ + cpufreq_frequency_table_put_attr(policy->cpu); + return 0; +} + +static unsigned int speedstep_get(unsigned int cpu) +{ + if (cpu) + return -ENODEV; + return speedstep_get_frequency(speedstep_processor); +} + + +static int speedstep_resume(struct cpufreq_policy *policy) +{ + int result = speedstep_smi_ownership(); + + if (result) + pr_debug("fails in re-acquiring ownership of a SMI interface.\n"); + + return result; +} + +static struct freq_attr *speedstep_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static struct cpufreq_driver speedstep_driver = { + .name = "speedstep-smi", + .verify = speedstep_verify, + .target = speedstep_target, + .init = speedstep_cpu_init, + .exit = speedstep_cpu_exit, + .get = speedstep_get, + .resume = speedstep_resume, + .owner = THIS_MODULE, + .attr = speedstep_attr, +}; + +/** + * speedstep_init - initializes the SpeedStep CPUFreq driver + * + * Initializes the SpeedStep support. Returns -ENODEV on unsupported + * BIOS, -EINVAL on problems during initiatization, and zero on + * success. + */ +static int __init speedstep_init(void) +{ + speedstep_processor = speedstep_detect_processor(); + + switch (speedstep_processor) { + case SPEEDSTEP_CPU_PIII_T: + case SPEEDSTEP_CPU_PIII_C: + case SPEEDSTEP_CPU_PIII_C_EARLY: + break; + default: + speedstep_processor = 0; + } + + if (!speedstep_processor) { + pr_debug("No supported Intel CPU detected.\n"); + return -ENODEV; + } + + pr_debug("signature:0x%.8ulx, command:0x%.8ulx, " + "event:0x%.8ulx, perf_level:0x%.8ulx.\n", + ist_info.signature, ist_info.command, + ist_info.event, ist_info.perf_level); + + /* Error if no IST-SMI BIOS or no PARM + sig= 'ISGE' aka 'Intel Speedstep Gate E' */ + if ((ist_info.signature != 0x47534943) && ( + (smi_port == 0) || (smi_cmd == 0))) + return -ENODEV; + + if (smi_sig == 1) + smi_sig = 0x47534943; + else + smi_sig = ist_info.signature; + + /* setup smi_port from MODLULE_PARM or BIOS */ + if ((smi_port > 0xff) || (smi_port < 0)) + return -EINVAL; + else if (smi_port == 0) + smi_port = ist_info.command & 0xff; + + if ((smi_cmd > 0xff) || (smi_cmd < 0)) + return -EINVAL; + else if (smi_cmd == 0) + smi_cmd = (ist_info.command >> 16) & 0xff; + + return cpufreq_register_driver(&speedstep_driver); +} + + +/** + * speedstep_exit - unregisters SpeedStep support + * + * Unregisters SpeedStep support. + */ +static void __exit speedstep_exit(void) +{ + cpufreq_unregister_driver(&speedstep_driver); +} + +module_param(smi_port, int, 0444); +module_param(smi_cmd, int, 0444); +module_param(smi_sig, uint, 0444); + +MODULE_PARM_DESC(smi_port, "Override the BIOS-given IST port with this value " + "-- Intel's default setting is 0xb2"); +MODULE_PARM_DESC(smi_cmd, "Override the BIOS-given IST command with this value " + "-- Intel's default setting is 0x82"); +MODULE_PARM_DESC(smi_sig, "Set to 1 to fake the IST signature when using the " + "SMI interface."); + +MODULE_AUTHOR("Hiroshi Miura"); +MODULE_DESCRIPTION("Speedstep driver for IST applet SMI interface."); +MODULE_LICENSE("GPL"); + +module_init(speedstep_init); +module_exit(speedstep_exit); diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index c1f0045ceb8..af8e7b1aa29 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c @@ -1019,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci, struct ppc4xx_edac_pdata *pdata = NULL; const struct device_node *np = op->dev.of_node; - if (op->dev.of_match == NULL) + if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL) return -EINVAL; /* Initial driver pointers and private data */ diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c index 2192456dfd6..f032e446fc1 100644 --- a/drivers/firmware/iscsi_ibft_find.c +++ b/drivers/firmware/iscsi_ibft_find.c @@ -42,7 +42,20 @@ struct acpi_table_ibft *ibft_addr; EXPORT_SYMBOL_GPL(ibft_addr); -#define IBFT_SIGN "iBFT" +static const struct { + char *sign; +} ibft_signs[] = { +#ifdef CONFIG_ACPI + /* + * One spec says "IBFT", the other says "iBFT". We have to check + * for both. + */ + { ACPI_SIG_IBFT }, +#endif + { "iBFT" }, + { "BIFT" }, /* Broadcom iSCSI Offload */ +}; + #define IBFT_SIGN_LEN 4 #define IBFT_START 0x80000 /* 512kB */ #define IBFT_END 0x100000 /* 1MB */ @@ -62,6 +75,7 @@ static int __init find_ibft_in_mem(void) unsigned long pos; unsigned int len = 0; void *virt; + int i; for (pos = IBFT_START; pos < IBFT_END; pos += 16) { /* The table can't be inside the VGA BIOS reserved space, @@ -69,18 +83,23 @@ static int __init find_ibft_in_mem(void) if (pos == VGA_MEM) pos += VGA_SIZE; virt = isa_bus_to_virt(pos); - if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { - unsigned long *addr = - (unsigned long *)isa_bus_to_virt(pos + 4); - len = *addr; - /* if the length of the table extends past 1M, - * the table cannot be valid. */ - if (pos + len <= (IBFT_END-1)) { - ibft_addr = (struct acpi_table_ibft *)virt; - break; + + for (i = 0; i < ARRAY_SIZE(ibft_signs); i++) { + if (memcmp(virt, ibft_signs[i].sign, IBFT_SIGN_LEN) == + 0) { + unsigned long *addr = + (unsigned long *)isa_bus_to_virt(pos + 4); + len = *addr; + /* if the length of the table extends past 1M, + * the table cannot be valid. */ + if (pos + len <= (IBFT_END-1)) { + ibft_addr = (struct acpi_table_ibft *)virt; + goto done; + } } } } +done: return len; } /* @@ -89,18 +108,12 @@ static int __init find_ibft_in_mem(void) */ unsigned long __init find_ibft_region(unsigned long *sizep) { - + int i; ibft_addr = NULL; #ifdef CONFIG_ACPI - /* - * One spec says "IBFT", the other says "iBFT". We have to check - * for both. - */ - if (!ibft_addr) - acpi_table_parse(ACPI_SIG_IBFT, acpi_find_ibft); - if (!ibft_addr) - acpi_table_parse(IBFT_SIGN, acpi_find_ibft); + for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) + acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft); #endif /* CONFIG_ACPI */ /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 11d7a72c22d..140b9525b48 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -1516,17 +1516,33 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) } EXPORT_SYMBOL(drm_fb_helper_initial_config); -bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) +/** + * drm_fb_helper_hotplug_event - respond to a hotplug notification by + * probing all the outputs attached to the fb. + * @fb_helper: the drm_fb_helper + * + * LOCKING: + * Called at runtime, must take mode config lock. + * + * Scan the connectors attached to the fb_helper and try to put together a + * setup after *notification of a change in output configuration. + * + * RETURNS: + * 0 on success and a non-zero error code otherwise. + */ +int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) { + struct drm_device *dev = fb_helper->dev; int count = 0; u32 max_width, max_height, bpp_sel; bool bound = false, crtcs_bound = false; struct drm_crtc *crtc; if (!fb_helper->fb) - return false; + return 0; - list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) { + mutex_lock(&dev->mode_config.mutex); + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (crtc->fb) crtcs_bound = true; if (crtc->fb == fb_helper->fb) @@ -1535,7 +1551,8 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) if (!bound && crtcs_bound) { fb_helper->delayed_hotplug = true; - return false; + mutex_unlock(&dev->mode_config.mutex); + return 0; } DRM_DEBUG_KMS("\n"); @@ -1546,6 +1563,7 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, max_height); drm_setup_crtcs(fb_helper); + mutex_unlock(&dev->mode_config.mutex); return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c34a8dd31d0..32d1b3e829c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); unsigned int i915_powersave = 1; module_param_named(powersave, i915_powersave, int, 0600); -unsigned int i915_semaphores = 1; +unsigned int i915_semaphores = 0; module_param_named(semaphores, i915_semaphores, int, 0600); unsigned int i915_enable_rc6 = 0; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 373c2a005ec..2166ee071dd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -5154,6 +5154,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, I915_WRITE(DSPCNTR(plane), dspcntr); POSTING_READ(DSPCNTR(plane)); + if (!HAS_PCH_SPLIT(dev)) + intel_enable_plane(dev_priv, plane, pipe); ret = intel_pipe_set_base(crtc, x, y, old_fb); diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 4bce801bc58..c77111eca6a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -42,7 +42,8 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages, nvbe->nr_pages = 0; while (num_pages--) { - if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) { + /* this code path isn't called and is incorrect anyways */ + if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/ nvbe->pages[nvbe->nr_pages] = dma_addrs[nvbe->nr_pages]; nvbe->ttm_alloced[nvbe->nr_pages] = true; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index c20eac3379e..9073e3bfb08 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1780,7 +1780,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev) mc_shared_chmap = RREG32(MC_SHARED_CHMAP); - mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); + if (rdev->flags & RADEON_IS_IGP) + mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG); + else + mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); switch (rdev->config.evergreen.max_tile_pipes) { case 1: diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 94533849927..fc40e0cc345 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -200,6 +200,7 @@ #define BURSTLENGTH_SHIFT 9 #define BURSTLENGTH_MASK 0x00000200 #define CHANSIZE_OVERRIDE (1 << 11) +#define FUS_MC_ARB_RAMCFG 0x2768 #define MC_VM_AGP_TOP 0x2028 #define MC_VM_AGP_BOT 0x202C #define MC_VM_AGP_BASE 0x2030 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 7aade20f63a..3d8a7634bbe 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -674,7 +674,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE); cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG); - cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE); + cgts_tcc_disable = 0xff000000; gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE); gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG); cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE); @@ -871,7 +871,7 @@ static void cayman_gpu_init(struct radeon_device *rdev) smx_dc_ctl0 = RREG32(SMX_DC_CTL0); smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff); - smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); + smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets); WREG32(SMX_DC_CTL0, smx_dc_ctl0); WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE); @@ -887,20 +887,20 @@ static void cayman_gpu_init(struct radeon_device *rdev) WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO); - WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | - POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | - SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); + WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) | + POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) | + SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1))); - WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) | - SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) | - SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size))); + WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) | + SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) | + SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size))); WREG32(VGT_NUM_INSTANCES, 1); WREG32(CP_PERFMON_CNTL, 0); - WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) | + WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) | FETCH_FIFO_HIWATER(0x4) | DONE_FIFO_HIWATER(0xe0) | ALU_UPDATE_FIFO_HIWATER(0x8))); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index dd881d035f0..90dfb2b8cf0 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1574,9 +1574,17 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; bool bad_record = false; - u8 *record = (u8 *)(mode_info->atom_context->bios + - data_offset + - le16_to_cpu(lvds_info->info.usModePatchTableOffset)); + u8 *record; + + if ((frev == 1) && (crev < 2)) + /* absolute */ + record = (u8 *)(mode_info->atom_context->bios + + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); + else + /* relative */ + record = (u8 *)(mode_info->atom_context->bios + + data_offset + + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); while (*record != ATOM_RECORD_END_TYPE) { switch (*record) { case LCD_MODE_PATCH_RECORD_MODE_TYPE: diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 8a955bbdb60..a533f52fd16 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -181,9 +181,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); for (i = 0; i < pages; i++, p++) { - /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32 - * is requested. */ - if (dma_addr[i] != DMA_ERROR_CODE) { + /* we reverted the patch using dma_addr in TTM for now but this + * code stops building on alpha so just comment it out for now */ + if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */ rdev->gart.ttm_alloced[p] = true; rdev->gart.pages_addr[p] = dma_addr[i]; } else { diff --git a/drivers/gpu/drm/radeon/reg_srcs/cayman b/drivers/gpu/drm/radeon/reg_srcs/cayman index 6334f8ac120..0aa8e85a945 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/cayman +++ b/drivers/gpu/drm/radeon/reg_srcs/cayman @@ -33,6 +33,7 @@ cayman 0x9400 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 +0x00009508 TA_CNTL_AUX 0x00009830 DB_DEBUG 0x00009834 DB_DEBUG2 0x00009838 DB_DEBUG3 diff --git a/drivers/gpu/drm/radeon/reg_srcs/evergreen b/drivers/gpu/drm/radeon/reg_srcs/evergreen index 7e1637176e0..0e28cae7ea4 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/evergreen +++ b/drivers/gpu/drm/radeon/reg_srcs/evergreen @@ -46,6 +46,7 @@ evergreen 0x9400 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS 0x00009100 SPI_CONFIG_CNTL 0x0000913C SPI_CONFIG_CNTL_1 +0x00009508 TA_CNTL_AUX 0x00009700 VC_CNTL 0x00009714 VC_ENHANCE 0x00009830 DB_DEBUG diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index e01cacba685..498b284e5ef 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -219,9 +219,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client) int i; struct vga_switcheroo_client *active = NULL; - if (new_client->active == true) - return 0; - for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { if (vgasr_priv.clients[i].active == true) { active = &vgasr_priv.clients[i]; @@ -372,6 +369,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, goto out; } + if (client->active == true) + goto out; + /* okay we want a switch - test if devices are willing to switch */ can_switch = true; for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index 75b984c519a..107397a606b 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -560,15 +560,18 @@ static struct i2c_adapter mpc_ops = { .timeout = HZ, }; +static const struct of_device_id mpc_i2c_of_match[]; static int __devinit fsl_i2c_probe(struct platform_device *op) { + const struct of_device_id *match; struct mpc_i2c *i2c; const u32 *prop; u32 clock = MPC_I2C_CLOCK_LEGACY; int result = 0; int plen; - if (!op->dev.of_match) + match = of_match_device(mpc_i2c_of_match, &op->dev); + if (!match) return -EINVAL; i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); @@ -605,8 +608,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op) clock = *prop; } - if (op->dev.of_match->data) { - struct mpc_i2c_data *data = op->dev.of_match->data; + if (match->data) { + struct mpc_i2c_data *data = match->data; data->setup(op->dev.of_node, i2c, clock, data->prescaler); } else { /* Backwards compatibility */ diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index a97e3fec814..04be9f82e14 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data) jiffies, expires); timer->expires = jiffies + expires; - timer->data = (unsigned long)&alg_data; + timer->data = (unsigned long)alg_data; add_timer(timer); } diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 5ed9d25d021..99dde874fbb 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -148,6 +148,7 @@ struct rdma_id_private { u32 qp_num; u8 srq; u8 tos; + u8 reuseaddr; }; struct cma_multicast { @@ -712,6 +713,21 @@ static inline int cma_any_addr(struct sockaddr *addr) return cma_zero_addr(addr) || cma_loopback_addr(addr); } +static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) +{ + if (src->sa_family != dst->sa_family) + return -1; + + switch (src->sa_family) { + case AF_INET: + return ((struct sockaddr_in *) src)->sin_addr.s_addr != + ((struct sockaddr_in *) dst)->sin_addr.s_addr; + default: + return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, + &((struct sockaddr_in6 *) dst)->sin6_addr); + } +} + static inline __be16 cma_port(struct sockaddr *addr) { if (addr->sa_family == AF_INET) @@ -1564,50 +1580,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv) mutex_unlock(&lock); } -int rdma_listen(struct rdma_cm_id *id, int backlog) -{ - struct rdma_id_private *id_priv; - int ret; - - id_priv = container_of(id, struct rdma_id_private, id); - if (id_priv->state == CMA_IDLE) { - ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; - ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); - if (ret) - return ret; - } - - if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) - return -EINVAL; - - id_priv->backlog = backlog; - if (id->device) { - switch (rdma_node_get_transport(id->device->node_type)) { - case RDMA_TRANSPORT_IB: - ret = cma_ib_listen(id_priv); - if (ret) - goto err; - break; - case RDMA_TRANSPORT_IWARP: - ret = cma_iw_listen(id_priv, backlog); - if (ret) - goto err; - break; - default: - ret = -ENOSYS; - goto err; - } - } else - cma_listen_on_all(id_priv); - - return 0; -err: - id_priv->backlog = 0; - cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); - return ret; -} -EXPORT_SYMBOL(rdma_listen); - void rdma_set_service_type(struct rdma_cm_id *id, int tos) { struct rdma_id_private *id_priv; @@ -2090,6 +2062,25 @@ err: } EXPORT_SYMBOL(rdma_resolve_addr); +int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) +{ + struct rdma_id_private *id_priv; + unsigned long flags; + int ret; + + id_priv = container_of(id, struct rdma_id_private, id); + spin_lock_irqsave(&id_priv->lock, flags); + if (id_priv->state == CMA_IDLE) { + id_priv->reuseaddr = reuse; + ret = 0; + } else { + ret = -EINVAL; + } + spin_unlock_irqrestore(&id_priv->lock, flags); + return ret; +} +EXPORT_SYMBOL(rdma_set_reuseaddr); + static void cma_bind_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv) { @@ -2165,41 +2156,71 @@ retry: return -EADDRNOTAVAIL; } -static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) +/* + * Check that the requested port is available. This is called when trying to + * bind to a specific port, or when trying to listen on a bound port. In + * the latter case, the provided id_priv may already be on the bind_list, but + * we still need to check that it's okay to start listening. + */ +static int cma_check_port(struct rdma_bind_list *bind_list, + struct rdma_id_private *id_priv, uint8_t reuseaddr) { struct rdma_id_private *cur_id; - struct sockaddr_in *sin, *cur_sin; - struct rdma_bind_list *bind_list; + struct sockaddr *addr, *cur_addr; struct hlist_node *node; + + addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; + if (cma_any_addr(addr) && !reuseaddr) + return -EADDRNOTAVAIL; + + hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { + if (id_priv == cur_id) + continue; + + if ((cur_id->state == CMA_LISTEN) || + !reuseaddr || !cur_id->reuseaddr) { + cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; + if (cma_any_addr(cur_addr)) + return -EADDRNOTAVAIL; + + if (!cma_addr_cmp(addr, cur_addr)) + return -EADDRINUSE; + } + } + return 0; +} + +static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) +{ + struct rdma_bind_list *bind_list; unsigned short snum; + int ret; - sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; - snum = ntohs(sin->sin_port); + snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr)); if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; bind_list = idr_find(ps, snum); - if (!bind_list) - return cma_alloc_port(ps, id_priv, snum); - - /* - * We don't support binding to any address if anyone is bound to - * a specific address on the same port. - */ - if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) - return -EADDRNOTAVAIL; - - hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { - if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) - return -EADDRNOTAVAIL; - - cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; - if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) - return -EADDRINUSE; + if (!bind_list) { + ret = cma_alloc_port(ps, id_priv, snum); + } else { + ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); + if (!ret) + cma_bind_port(bind_list, id_priv); } + return ret; +} - cma_bind_port(bind_list, id_priv); - return 0; +static int cma_bind_listen(struct rdma_id_private *id_priv) +{ + struct rdma_bind_list *bind_list = id_priv->bind_list; + int ret = 0; + + mutex_lock(&lock); + if (bind_list->owners.first->next) + ret = cma_check_port(bind_list, id_priv, 0); + mutex_unlock(&lock); + return ret; } static int cma_get_port(struct rdma_id_private *id_priv) @@ -2253,6 +2274,56 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr, return 0; } +int rdma_listen(struct rdma_cm_id *id, int backlog) +{ + struct rdma_id_private *id_priv; + int ret; + + id_priv = container_of(id, struct rdma_id_private, id); + if (id_priv->state == CMA_IDLE) { + ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; + ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); + if (ret) + return ret; + } + + if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) + return -EINVAL; + + if (id_priv->reuseaddr) { + ret = cma_bind_listen(id_priv); + if (ret) + goto err; + } + + id_priv->backlog = backlog; + if (id->device) { + switch (rdma_node_get_transport(id->device->node_type)) { + case RDMA_TRANSPORT_IB: + ret = cma_ib_listen(id_priv); + if (ret) + goto err; + break; + case RDMA_TRANSPORT_IWARP: + ret = cma_iw_listen(id_priv, backlog); + if (ret) + goto err; + break; + default: + ret = -ENOSYS; + goto err; + } + } else + cma_listen_on_all(id_priv); + + return 0; +err: + id_priv->backlog = 0; + cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); + return ret; +} +EXPORT_SYMBOL(rdma_listen); + int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) { struct rdma_id_private *id_priv; diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 2a1e9ae134b..a9c042345c6 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -725,7 +725,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv, */ clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); - if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { + if (iw_event->status == 0) { cm_id_priv->id.local_addr = iw_event->local_addr; cm_id_priv->id.remote_addr = iw_event->remote_addr; cm_id_priv->state = IW_CM_STATE_ESTABLISHED; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index ec1e9da1488..b3fa798525b 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -883,6 +883,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname, } rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); break; + case RDMA_OPTION_ID_REUSEADDR: + if (optlen != sizeof(int)) { + ret = -EINVAL; + break; + } + ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); + break; default: ret = -ENOSYS; } diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 9d8dcfab2b3..d7ee70fc917 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1198,9 +1198,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) } PDBG("%s ep %p status %d error %d\n", __func__, ep, rpl->status, status2errno(rpl->status)); - ep->com.wr_wait.ret = status2errno(rpl->status); - ep->com.wr_wait.done = 1; - wake_up(&ep->com.wr_wait.wait); + c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); return 0; } @@ -1234,9 +1232,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) struct c4iw_listen_ep *ep = lookup_stid(t, stid); PDBG("%s ep %p\n", __func__, ep); - ep->com.wr_wait.ret = status2errno(rpl->status); - ep->com.wr_wait.done = 1; - wake_up(&ep->com.wr_wait.wait); + c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); return 0; } @@ -1466,7 +1462,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) struct c4iw_qp_attributes attrs; int disconnect = 1; int release = 0; - int closing = 0; + int abort = 0; struct tid_info *t = dev->rdev.lldi.tids; unsigned int tid = GET_TID(hdr); @@ -1492,23 +1488,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) * in rdma connection migration (see c4iw_accept_cr()). */ __state_set(&ep->com, CLOSING); - ep->com.wr_wait.done = 1; - ep->com.wr_wait.ret = -ECONNRESET; PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); - wake_up(&ep->com.wr_wait.wait); + c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); break; case MPA_REP_SENT: __state_set(&ep->com, CLOSING); - ep->com.wr_wait.done = 1; - ep->com.wr_wait.ret = -ECONNRESET; PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); - wake_up(&ep->com.wr_wait.wait); + c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); break; case FPDU_MODE: start_ep_timer(ep); __state_set(&ep->com, CLOSING); - closing = 1; + attrs.next_state = C4IW_QP_STATE_CLOSING; + abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, + C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); peer_close_upcall(ep); + disconnect = 1; break; case ABORTING: disconnect = 0; @@ -1536,11 +1531,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) BUG_ON(1); } mutex_unlock(&ep->com.mutex); - if (closing) { - attrs.next_state = C4IW_QP_STATE_CLOSING; - c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, - C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); - } if (disconnect) c4iw_ep_disconnect(ep, 0, GFP_KERNEL); if (release) @@ -1581,9 +1571,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) /* * Wake up any threads in rdma_init() or rdma_fini(). */ - ep->com.wr_wait.done = 1; - ep->com.wr_wait.ret = -ECONNRESET; - wake_up(&ep->com.wr_wait.wait); + c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); mutex_lock(&ep->com.mutex); switch (ep->com.state) { @@ -1710,14 +1698,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) ep = lookup_tid(t, tid); BUG_ON(!ep); - if (ep->com.qp) { + if (ep && ep->com.qp) { printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, ep->com.qp->wq.sq.qid); attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); } else - printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); + printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); return 0; } @@ -2296,14 +2284,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); - if (wr_waitp) { - if (ret) - wr_waitp->ret = -ret; - else - wr_waitp->ret = 0; - wr_waitp->done = 1; - wake_up(&wr_waitp->wait); - } + if (wr_waitp) + c4iw_wake_up(wr_waitp, ret ? -ret : 0); kfree_skb(skb); break; case 2: diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index e29172c2afc..40a13cc633a 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(DRV_VERSION); -static LIST_HEAD(dev_list); +static LIST_HEAD(uld_ctx_list); static DEFINE_MUTEX(dev_mutex); static struct dentry *c4iw_debugfs_root; @@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) c4iw_destroy_resource(&rdev->resource); } -static void c4iw_remove(struct c4iw_dev *dev) +struct uld_ctx { + struct list_head entry; + struct cxgb4_lld_info lldi; + struct c4iw_dev *dev; +}; + +static void c4iw_remove(struct uld_ctx *ctx) { - PDBG("%s c4iw_dev %p\n", __func__, dev); - list_del(&dev->entry); - if (dev->registered) - c4iw_unregister_device(dev); - c4iw_rdev_close(&dev->rdev); - idr_destroy(&dev->cqidr); - idr_destroy(&dev->qpidr); - idr_destroy(&dev->mmidr); - iounmap(dev->rdev.oc_mw_kva); - ib_dealloc_device(&dev->ibdev); + PDBG("%s c4iw_dev %p\n", __func__, ctx->dev); + c4iw_unregister_device(ctx->dev); + c4iw_rdev_close(&ctx->dev->rdev); + idr_destroy(&ctx->dev->cqidr); + idr_destroy(&ctx->dev->qpidr); + idr_destroy(&ctx->dev->mmidr); + iounmap(ctx->dev->rdev.oc_mw_kva); + ib_dealloc_device(&ctx->dev->ibdev); + ctx->dev = NULL; } static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) @@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); if (!devp) { printk(KERN_ERR MOD "Cannot allocate ib device\n"); - return NULL; + return ERR_PTR(-ENOMEM); } devp->rdev.lldi = *infop; @@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, devp->rdev.lldi.vr->ocq.size); - printk(KERN_INFO MOD "ocq memory: " + PDBG(KERN_INFO MOD "ocq memory: " "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); - mutex_lock(&dev_mutex); - ret = c4iw_rdev_open(&devp->rdev); if (ret) { mutex_unlock(&dev_mutex); printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); ib_dealloc_device(&devp->ibdev); - return NULL; + return ERR_PTR(ret); } idr_init(&devp->cqidr); idr_init(&devp->qpidr); idr_init(&devp->mmidr); spin_lock_init(&devp->lock); - list_add_tail(&devp->entry, &dev_list); - mutex_unlock(&dev_mutex); if (c4iw_debugfs_root) { devp->debugfs_root = debugfs_create_dir( @@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) { - struct c4iw_dev *dev; + struct uld_ctx *ctx; static int vers_printed; int i; @@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", DRV_VERSION); - dev = c4iw_alloc(infop); - if (!dev) + ctx = kzalloc(sizeof *ctx, GFP_KERNEL); + if (!ctx) { + ctx = ERR_PTR(-ENOMEM); goto out; + } + ctx->lldi = *infop; PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", - __func__, pci_name(dev->rdev.lldi.pdev), - dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, - dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); + __func__, pci_name(ctx->lldi.pdev), + ctx->lldi.nchan, ctx->lldi.nrxq, + ctx->lldi.ntxq, ctx->lldi.nports); + + mutex_lock(&dev_mutex); + list_add_tail(&ctx->entry, &uld_ctx_list); + mutex_unlock(&dev_mutex); - for (i = 0; i < dev->rdev.lldi.nrxq; i++) - PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); + for (i = 0; i < ctx->lldi.nrxq; i++) + PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]); out: - return dev; + return ctx; } static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *gl) { - struct c4iw_dev *dev = handle; + struct uld_ctx *ctx = handle; + struct c4iw_dev *dev = ctx->dev; struct sk_buff *skb; const struct cpl_act_establish *rpl; unsigned int opcode; @@ -503,47 +512,49 @@ nomem: static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) { - struct c4iw_dev *dev = handle; + struct uld_ctx *ctx = handle; PDBG("%s new_state %u\n", __func__, new_state); switch (new_state) { case CXGB4_STATE_UP: - printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); - if (!dev->registered) { - int ret; - ret = c4iw_register_device(dev); - if (ret) + printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev)); + if (!ctx->dev) { + int ret = 0; + + ctx->dev = c4iw_alloc(&ctx->lldi); + if (!IS_ERR(ctx->dev)) + ret = c4iw_register_device(ctx->dev); + if (IS_ERR(ctx->dev) || ret) printk(KERN_ERR MOD "%s: RDMA registration failed: %d\n", - pci_name(dev->rdev.lldi.pdev), ret); + pci_name(ctx->lldi.pdev), ret); } break; case CXGB4_STATE_DOWN: printk(KERN_INFO MOD "%s: Down\n", - pci_name(dev->rdev.lldi.pdev)); - if (dev->registered) - c4iw_unregister_device(dev); + pci_name(ctx->lldi.pdev)); + if (ctx->dev) + c4iw_remove(ctx); break; case CXGB4_STATE_START_RECOVERY: printk(KERN_INFO MOD "%s: Fatal Error\n", - pci_name(dev->rdev.lldi.pdev)); - dev->rdev.flags |= T4_FATAL_ERROR; - if (dev->registered) { + pci_name(ctx->lldi.pdev)); + if (ctx->dev) { struct ib_event event; + ctx->dev->rdev.flags |= T4_FATAL_ERROR; memset(&event, 0, sizeof event); event.event = IB_EVENT_DEVICE_FATAL; - event.device = &dev->ibdev; + event.device = &ctx->dev->ibdev; ib_dispatch_event(&event); - c4iw_unregister_device(dev); + c4iw_remove(ctx); } break; case CXGB4_STATE_DETACH: printk(KERN_INFO MOD "%s: Detach\n", - pci_name(dev->rdev.lldi.pdev)); - mutex_lock(&dev_mutex); - c4iw_remove(dev); - mutex_unlock(&dev_mutex); + pci_name(ctx->lldi.pdev)); + if (ctx->dev) + c4iw_remove(ctx); break; } return 0; @@ -576,11 +587,13 @@ static int __init c4iw_init_module(void) static void __exit c4iw_exit_module(void) { - struct c4iw_dev *dev, *tmp; + struct uld_ctx *ctx, *tmp; mutex_lock(&dev_mutex); - list_for_each_entry_safe(dev, tmp, &dev_list, entry) { - c4iw_remove(dev); + list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) { + if (ctx->dev) + c4iw_remove(ctx); + kfree(ctx); } mutex_unlock(&dev_mutex); cxgb4_unregister_uld(CXGB4_ULD_RDMA); diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 9f6166f5926..35d2a5dd9bb 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -131,42 +131,58 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) #define C4IW_WR_TO (10*HZ) +enum { + REPLY_READY = 0, +}; + struct c4iw_wr_wait { wait_queue_head_t wait; - int done; + unsigned long status; int ret; }; static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) { wr_waitp->ret = 0; - wr_waitp->done = 0; + wr_waitp->status = 0; init_waitqueue_head(&wr_waitp->wait); } +static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) +{ + wr_waitp->ret = ret; + set_bit(REPLY_READY, &wr_waitp->status); + wake_up(&wr_waitp->wait); +} + static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp, u32 hwtid, u32 qpid, const char *func) { unsigned to = C4IW_WR_TO; - do { + int ret; - wait_event_timeout(wr_waitp->wait, wr_waitp->done, to); - if (!wr_waitp->done) { + do { + ret = wait_event_timeout(wr_waitp->wait, + test_and_clear_bit(REPLY_READY, &wr_waitp->status), to); + if (!ret) { printk(KERN_ERR MOD "%s - Device %s not responding - " "tid %u qpid %u\n", func, pci_name(rdev->lldi.pdev), hwtid, qpid); + if (c4iw_fatal_error(rdev)) { + wr_waitp->ret = -EIO; + break; + } to = to << 2; } - } while (!wr_waitp->done); + } while (!ret); if (wr_waitp->ret) - printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n", - pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); + PDBG("%s: FW reply %d tid %u qpid %u\n", + pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); return wr_waitp->ret; } - struct c4iw_dev { struct ib_device ibdev; struct c4iw_rdev rdev; @@ -175,9 +191,7 @@ struct c4iw_dev { struct idr qpidr; struct idr mmidr; spinlock_t lock; - struct list_head entry; struct dentry *debugfs_root; - u8 registered; }; static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index f66dd8bf512..5b9e4220ca0 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -516,7 +516,6 @@ int c4iw_register_device(struct c4iw_dev *dev) if (ret) goto bail2; } - dev->registered = 1; return 0; bail2: ib_unregister_device(&dev->ibdev); @@ -535,6 +534,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev) c4iw_class_attributes[i]); ib_unregister_device(&dev->ibdev); kfree(dev->ibdev.iwcm); - dev->registered = 0; return; } diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 70a5a3c646d..3b773b05a89 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ - t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 | + (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) | V_FW_RI_RES_WR_IQID(scq->cqid)); res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( V_FW_RI_RES_WR_DCAEN(0) | @@ -1210,7 +1210,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, if (ret) { if (internal) c4iw_get_ep(&qhp->ep->com); - disconnect = abort = 1; goto err; } break; diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 58c0e417bc3..be24ac72611 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -398,7 +398,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, struct ipath_devdata *dd; unsigned long long addr; u32 bar0 = 0, bar1 = 0; - u8 rev; dd = ipath_alloc_devdata(pdev); if (IS_ERR(dd)) { @@ -540,13 +539,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev, goto bail_regions; } - ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); - if (ret) { - ipath_dev_err(dd, "Failed to read PCI revision ID unit " - "%u: err %d\n", dd->ipath_unit, -ret); - goto bail_regions; /* shouldn't ever happen */ - } - dd->ipath_pcirev = rev; + dd->ipath_pcirev = pdev->revision; #if defined(__powerpc__) /* There isn't a generic way to specify writethrough mappings */ diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 33c7eedaba6..e74cdf9ef47 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -2563,7 +2563,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) u16 last_ae; u8 original_hw_tcp_state; u8 original_ibqp_state; - enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK; + int disconn_status = 0; int issue_disconn = 0; int issue_close = 0; int issue_flush = 0; @@ -2605,7 +2605,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { issue_disconn = 1; if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) - disconn_status = IW_CM_EVENT_STATUS_RESET; + disconn_status = -ECONNRESET; } if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || @@ -2666,7 +2666,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp) cm_id->provider_data = nesqp; /* Send up the close complete event */ cm_event.event = IW_CM_EVENT_CLOSE; - cm_event.status = IW_CM_EVENT_STATUS_OK; + cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -2966,7 +2966,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) nes_add_ref(&nesqp->ibqp); cm_event.event = IW_CM_EVENT_ESTABLISHED; - cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; + cm_event.status = 0; cm_event.provider_data = (void *)nesqp; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -3377,7 +3377,7 @@ static void cm_event_connected(struct nes_cm_event *event) /* notify OF layer we successfully created the requested connection */ cm_event.event = IW_CM_EVENT_CONNECT_REPLY; - cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; + cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr.sin_family = AF_INET; cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; @@ -3484,7 +3484,7 @@ static void cm_event_reset(struct nes_cm_event *event) nesqp->cm_id = NULL; /* cm_id->provider_data = NULL; */ cm_event.event = IW_CM_EVENT_DISCONNECT; - cm_event.status = IW_CM_EVENT_STATUS_RESET; + cm_event.status = -ECONNRESET; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event) ret = cm_id->event_handler(cm_id, &cm_event); atomic_inc(&cm_closes); cm_event.event = IW_CM_EVENT_CLOSE; - cm_event.status = IW_CM_EVENT_STATUS_OK; + cm_event.status = 0; cm_event.provider_data = cm_id->provider_data; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; @@ -3534,7 +3534,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event) cm_node, cm_id, jiffies); cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; - cm_event.status = IW_CM_EVENT_STATUS_OK; + cm_event.status = 0; cm_event.provider_data = (void *)cm_node; cm_event.local_addr.sin_family = AF_INET; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 26d8018c0a7..95ca93ceeda 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -1484,7 +1484,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp) (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { cm_id = nesqp->cm_id; cm_event.event = IW_CM_EVENT_CONNECT_REPLY; - cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT; + cm_event.status = -ETIMEDOUT; cm_event.local_addr = cm_id->local_addr; cm_event.remote_addr = cm_id->remote_addr; cm_event.private_data = NULL; diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 6bab3eaea70..9f53e68a096 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -7534,7 +7534,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd) ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); tstart = get_jiffies_64(); while (chan_done && - !time_after64(tstart, tstart + msecs_to_jiffies(500))) { + !time_after64(get_jiffies_64(), + tstart + msecs_to_jiffies(500))) { msleep(20); for (chan = 0; chan < SERDES_CHANS; ++chan) { rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 48b6674cbc4..891cc2ff5f0 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -526,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd) */ devid = parent->device; if (devid >= 0x25e2 && devid <= 0x25fa) { - u8 rev; - /* 5000 P/V/X/Z */ - pci_read_config_byte(parent, PCI_REVISION_ID, &rev); - if (rev <= 0xb2) + if (parent->revision <= 0xb2) bits = 1U << 10; else bits = 7U << 10; diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c index 1839194ea98..10bcd4ae540 100644 --- a/drivers/input/keyboard/atakbd.c +++ b/drivers/input/keyboard/atakbd.c @@ -223,8 +223,9 @@ static int __init atakbd_init(void) return -ENODEV; // need to init core driver if not already done so - if (atari_keyb_init()) - return -ENODEV; + error = atari_keyb_init(); + if (error) + return error; atakbd_dev = input_allocate_device(); if (!atakbd_dev) diff --git a/drivers/input/mouse/atarimouse.c b/drivers/input/mouse/atarimouse.c index adf45b3040e..5c4a692bf73 100644 --- a/drivers/input/mouse/atarimouse.c +++ b/drivers/input/mouse/atarimouse.c @@ -77,15 +77,15 @@ static void atamouse_interrupt(char *buf) #endif /* only relative events get here */ - dx = buf[1]; - dy = -buf[2]; + dx = buf[1]; + dy = buf[2]; input_report_rel(atamouse_dev, REL_X, dx); input_report_rel(atamouse_dev, REL_Y, dy); - input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x1); + input_report_key(atamouse_dev, BTN_LEFT, buttons & 0x4); input_report_key(atamouse_dev, BTN_MIDDLE, buttons & 0x2); - input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x4); + input_report_key(atamouse_dev, BTN_RIGHT, buttons & 0x1); input_sync(atamouse_dev); @@ -108,7 +108,7 @@ static int atamouse_open(struct input_dev *dev) static void atamouse_close(struct input_dev *dev) { ikbd_mouse_disable(); - atari_mouse_interrupt_hook = NULL; + atari_input_mouse_interrupt_hook = NULL; } static int __init atamouse_init(void) @@ -118,8 +118,9 @@ static int __init atamouse_init(void) if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ST_MFP)) return -ENODEV; - if (!atari_keyb_init()) - return -ENODEV; + error = atari_keyb_init(); + if (error) + return error; atamouse_dev = input_allocate_device(); if (!atamouse_dev) diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index c24946f5125..1de1c19dad3 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c @@ -281,17 +281,24 @@ struct ser_req { u8 command; u8 ref_off; u16 scratch; - __be16 sample; struct spi_message msg; struct spi_transfer xfer[6]; + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + __be16 sample ____cacheline_aligned; }; struct ads7845_ser_req { u8 command[3]; - u8 pwrdown[3]; - u8 sample[3]; struct spi_message msg; struct spi_transfer xfer[2]; + /* + * DMA (thus cache coherency maintenance) requires the + * transfer buffers to live in their own cache lines. + */ + u8 sample[3] ____cacheline_aligned; }; static int ads7846_read12_ser(struct device *dev, unsigned command) diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c index e7089a1f6cb..b37e6186d0f 100644 --- a/drivers/leds/leds-lm3530.c +++ b/drivers/leds/leds-lm3530.c @@ -349,6 +349,7 @@ static const struct i2c_device_id lm3530_id[] = { {LM3530_NAME, 0}, {} }; +MODULE_DEVICE_TABLE(i2c, lm3530_id); static struct i2c_driver lm3530_i2c_driver = { .probe = lm3530_probe, diff --git a/drivers/lguest/Kconfig b/drivers/lguest/Kconfig index 0aaa0597a62..34ae49dc557 100644 --- a/drivers/lguest/Kconfig +++ b/drivers/lguest/Kconfig @@ -5,8 +5,10 @@ config LGUEST ---help--- This is a very simple module which allows you to run multiple instances of the same Linux kernel, using the - "lguest" command found in the Documentation/lguest directory. + "lguest" command found in the Documentation/virtual/lguest + directory. + Note that "lguest" is pronounced to rhyme with "fell quest", - not "rustyvisor". See Documentation/lguest/lguest.txt. + not "rustyvisor". See Documentation/virtual/lguest/lguest.txt. If unsure, say N. If curious, say M. If masochistic, say Y. diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile index 7d463c26124..8ac947c7e7c 100644 --- a/drivers/lguest/Makefile +++ b/drivers/lguest/Makefile @@ -18,7 +18,7 @@ Mastery: PREFIX=M Beer: @for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}" Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery: - @sh ../../Documentation/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` + @sh ../../Documentation/virtual/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'` Puppy: @clear @printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n" diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c index c820e2f5352..3f442003623 100644 --- a/drivers/media/video/cx88/cx88-input.c +++ b/drivers/media/video/cx88/cx88-input.c @@ -524,7 +524,7 @@ void cx88_ir_irq(struct cx88_core *core) for (todo = 32; todo > 0; todo -= bits) { ev.pulse = samples & 0x80000000 ? false : true; bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples)); - ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate); + ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate; ir_raw_event_store_with_filter(ir->dev, &ev); samples <<= bits; } diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 3973f9a9475..ddb4c091ded 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -136,11 +136,50 @@ unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl, } EXPORT_SYMBOL(soc_camera_apply_sensor_flags); +#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ + ((x) >> 24) & 0xff + +static int soc_camera_try_fmt(struct soc_camera_device *icd, + struct v4l2_format *f) +{ + struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); + struct v4l2_pix_format *pix = &f->fmt.pix; + int ret; + + dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n", + pixfmtstr(pix->pixelformat), pix->width, pix->height); + + pix->bytesperline = 0; + pix->sizeimage = 0; + + ret = ici->ops->try_fmt(icd, f); + if (ret < 0) + return ret; + + if (!pix->sizeimage) { + if (!pix->bytesperline) { + const struct soc_camera_format_xlate *xlate; + + xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); + if (!xlate) + return -EINVAL; + + ret = soc_mbus_bytes_per_line(pix->width, + xlate->host_fmt); + if (ret > 0) + pix->bytesperline = ret; + } + if (pix->bytesperline) + pix->sizeimage = pix->bytesperline * pix->height; + } + + return 0; +} + static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct soc_camera_device *icd = file->private_data; - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); WARN_ON(priv != file->private_data); @@ -149,7 +188,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv, return -EINVAL; /* limit format to hardware capabilities */ - return ici->ops->try_fmt(icd, f); + return soc_camera_try_fmt(icd, f); } static int soc_camera_enum_input(struct file *file, void *priv, @@ -362,9 +401,6 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd) icd->user_formats = NULL; } -#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \ - ((x) >> 24) & 0xff - /* Called with .vb_lock held, or from the first open(2), see comment there */ static int soc_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) @@ -377,7 +413,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd, pixfmtstr(pix->pixelformat), pix->width, pix->height); /* We always call try_fmt() before set_fmt() or set_crop() */ - ret = ici->ops->try_fmt(icd, f); + ret = soc_camera_try_fmt(icd, f); if (ret < 0) return ret; diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index 5aeaf876ba9..4aae501f02d 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c @@ -155,8 +155,10 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, sd->v4l2_dev = v4l2_dev; if (sd->internal_ops && sd->internal_ops->registered) { err = sd->internal_ops->registered(sd); - if (err) + if (err) { + module_put(sd->owner); return err; + } } /* This just returns 0 if either of the two args is NULL */ @@ -164,6 +166,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev, if (err) { if (sd->internal_ops && sd->internal_ops->unregistered) sd->internal_ops->unregistered(sd); + module_put(sd->owner); return err; } diff --git a/drivers/media/video/v4l2-subdev.c b/drivers/media/video/v4l2-subdev.c index 0b806449067..812729ebf09 100644 --- a/drivers/media/video/v4l2-subdev.c +++ b/drivers/media/video/v4l2-subdev.c @@ -155,25 +155,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg) switch (cmd) { case VIDIOC_QUERYCTRL: - return v4l2_subdev_queryctrl(sd, arg); + return v4l2_queryctrl(sd->ctrl_handler, arg); case VIDIOC_QUERYMENU: - return v4l2_subdev_querymenu(sd, arg); + return v4l2_querymenu(sd->ctrl_handler, arg); case VIDIOC_G_CTRL: - return v4l2_subdev_g_ctrl(sd, arg); + return v4l2_g_ctrl(sd->ctrl_handler, arg); case VIDIOC_S_CTRL: - return v4l2_subdev_s_ctrl(sd, arg); + return v4l2_s_ctrl(sd->ctrl_handler, arg); case VIDIOC_G_EXT_CTRLS: - return v4l2_subdev_g_ext_ctrls(sd, arg); + return v4l2_g_ext_ctrls(sd->ctrl_handler, arg); case VIDIOC_S_EXT_CTRLS: - return v4l2_subdev_s_ext_ctrls(sd, arg); + return v4l2_s_ext_ctrls(sd->ctrl_handler, arg); case VIDIOC_TRY_EXT_CTRLS: - return v4l2_subdev_try_ext_ctrls(sd, arg); + return v4l2_try_ext_ctrls(sd->ctrl_handler, arg); case VIDIOC_DQEVENT: if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index 643ad52e3ca..4796bbf0ae4 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c @@ -1000,7 +1000,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void) gd->major = I2O_MAJOR; gd->queue = queue; gd->fops = &i2o_block_fops; - gd->events = DISK_EVENT_MEDIA_CHANGE; gd->private_data = dev; dev->gd = gd; diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index d4a851c6b5b..0b4d5b23bec 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -144,7 +144,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) int iter, i; unsigned long flags; - data->chip->irq_ack(irq_data); + data->chip->irq_ack(data); for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { u32 status; diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 2e165117457..3ab9ffa00aa 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -717,14 +717,14 @@ static int usbhs_enable(struct device *dev) gpio_request(pdata->ehci_data->reset_gpio_port[0], "USB1 PHY reset"); gpio_direction_output - (pdata->ehci_data->reset_gpio_port[0], 1); + (pdata->ehci_data->reset_gpio_port[0], 0); } if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) { gpio_request(pdata->ehci_data->reset_gpio_port[1], "USB2 PHY reset"); gpio_direction_output - (pdata->ehci_data->reset_gpio_port[1], 1); + (pdata->ehci_data->reset_gpio_port[1], 0); } /* Hold the PHY in RESET for enough time till DIR is high */ @@ -904,11 +904,11 @@ static int usbhs_enable(struct device *dev) if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) gpio_set_value - (pdata->ehci_data->reset_gpio_port[0], 0); + (pdata->ehci_data->reset_gpio_port[0], 1); if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) gpio_set_value - (pdata->ehci_data->reset_gpio_port[1], 0); + (pdata->ehci_data->reset_gpio_port[1], 1); } end_count: diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 16422de0823..2c0d4d16491 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c @@ -447,12 +447,13 @@ static int __init load_twl4030_script(struct twl4030_script *tscript, if (err) goto out; } - if (tscript->flags & TWL4030_SLEEP_SCRIPT) + if (tscript->flags & TWL4030_SLEEP_SCRIPT) { if (order) pr_warning("TWL4030: Bad order of scripts (sleep "\ "script before wakeup) Leads to boot"\ "failure on some boards\n"); err = twl4030_config_sleep_sequence(address); + } out: return err; } diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 2b200c1cfbb..461e6a17fb9 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) spin_unlock_irqrestore(&host->clk_lock, flags); return; } - mmc_claim_host(host); + mutex_lock(&host->clk_gate_mutex); spin_lock_irqsave(&host->clk_lock, flags); if (!host->clk_requests) { spin_unlock_irqrestore(&host->clk_lock, flags); @@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host) pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); } spin_unlock_irqrestore(&host->clk_lock, flags); - mmc_release_host(host); + mutex_unlock(&host->clk_gate_mutex); } /* @@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host) { unsigned long flags; - mmc_claim_host(host); + mutex_lock(&host->clk_gate_mutex); spin_lock_irqsave(&host->clk_lock, flags); if (host->clk_gated) { spin_unlock_irqrestore(&host->clk_lock, flags); @@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host) } host->clk_requests++; spin_unlock_irqrestore(&host->clk_lock, flags); - mmc_release_host(host); + mutex_unlock(&host->clk_gate_mutex); } /** @@ -215,6 +215,7 @@ static inline void mmc_host_clk_init(struct mmc_host *host) host->clk_gated = false; INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); spin_lock_init(&host->clk_lock); + mutex_init(&host->clk_gate_mutex); } /** diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c index f9b611fc773..60e4186a434 100644 --- a/drivers/mmc/host/sdhci-of-core.c +++ b/drivers/mmc/host/sdhci-of-core.c @@ -124,8 +124,10 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np) #endif } +static const struct of_device_id sdhci_of_match[]; static int __devinit sdhci_of_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct sdhci_of_data *sdhci_of_data; struct sdhci_host *host; @@ -134,9 +136,10 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev) int size; int ret; - if (!ofdev->dev.of_match) + match = of_match_device(sdhci_of_match, &ofdev->dev); + if (!match) return -EINVAL; - sdhci_of_data = ofdev->dev.of_match->data; + sdhci_of_data = match->data; if (!of_device_is_available(np)) return -ENODEV; diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 44b1f46458c..5069111c81c 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -260,6 +260,13 @@ config MTD_BCM963XX Support for parsing CFE image tag and creating MTD partitions on Broadcom BCM63xx boards. +config MTD_LANTIQ + tristate "Lantiq SoC NOR support" + depends on LANTIQ + select MTD_PARTITIONS + help + Support for NOR flash attached to the Lantiq SoC's External Bus Unit. + config MTD_DILNETPC tristate "CFI Flash device mapped on DIL/Net PC" depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile index 08533bd5cba..6adf4c9b905 100644 --- a/drivers/mtd/maps/Makefile +++ b/drivers/mtd/maps/Makefile @@ -60,3 +60,4 @@ obj-$(CONFIG_MTD_VMU) += vmu-flash.o obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o +obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c new file mode 100644 index 00000000000..a90cabd7b84 --- /dev/null +++ b/drivers/mtd/maps/lantiq-flash.c @@ -0,0 +1,251 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE + * Copyright (C) 2010 John Crispin <blogic@openwrt.org> + */ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/map.h> +#include <linux/mtd/partitions.h> +#include <linux/mtd/cfi.h> +#include <linux/platform_device.h> +#include <linux/mtd/physmap.h> + +#include <lantiq_soc.h> +#include <lantiq_platform.h> + +/* + * The NOR flash is connected to the same external bus unit (EBU) as PCI. + * To make PCI work we need to enable the endianness swapping for the address + * written to the EBU. This endianness swapping works for PCI correctly but + * fails for attached NOR devices. To workaround this we need to use a complex + * map. The workaround involves swapping all addresses whilst probing the chip. + * Once probing is complete we stop swapping the addresses but swizzle the + * unlock addresses to ensure that access to the NOR device works correctly. + */ + +enum { + LTQ_NOR_PROBING, + LTQ_NOR_NORMAL +}; + +struct ltq_mtd { + struct resource *res; + struct mtd_info *mtd; + struct map_info *map; +}; + +static char ltq_map_name[] = "ltq_nor"; + +static map_word +ltq_read16(struct map_info *map, unsigned long adr) +{ + unsigned long flags; + map_word temp; + + if (map->map_priv_1 == LTQ_NOR_PROBING) + adr ^= 2; + spin_lock_irqsave(&ebu_lock, flags); + temp.x[0] = *(u16 *)(map->virt + adr); + spin_unlock_irqrestore(&ebu_lock, flags); + return temp; +} + +static void +ltq_write16(struct map_info *map, map_word d, unsigned long adr) +{ + unsigned long flags; + + if (map->map_priv_1 == LTQ_NOR_PROBING) + adr ^= 2; + spin_lock_irqsave(&ebu_lock, flags); + *(u16 *)(map->virt + adr) = d.x[0]; + spin_unlock_irqrestore(&ebu_lock, flags); +} + +/* + * The following 2 functions copy data between iomem and a cached memory + * section. As memcpy() makes use of pre-fetching we cannot use it here. + * The normal alternative of using memcpy_{to,from}io also makes use of + * memcpy() on MIPS so it is not applicable either. We are therefore stuck + * with having to use our own loop. + */ +static void +ltq_copy_from(struct map_info *map, void *to, + unsigned long from, ssize_t len) +{ + unsigned char *f = (unsigned char *)map->virt + from; + unsigned char *t = (unsigned char *)to; + unsigned long flags; + + spin_lock_irqsave(&ebu_lock, flags); + while (len--) + *t++ = *f++; + spin_unlock_irqrestore(&ebu_lock, flags); +} + +static void +ltq_copy_to(struct map_info *map, unsigned long to, + const void *from, ssize_t len) +{ + unsigned char *f = (unsigned char *)from; + unsigned char *t = (unsigned char *)map->virt + to; + unsigned long flags; + + spin_lock_irqsave(&ebu_lock, flags); + while (len--) + *t++ = *f++; + spin_unlock_irqrestore(&ebu_lock, flags); +} + +static const char const *part_probe_types[] = { "cmdlinepart", NULL }; + +static int __init +ltq_mtd_probe(struct platform_device *pdev) +{ + struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev); + struct ltq_mtd *ltq_mtd; + struct mtd_partition *parts; + struct resource *res; + int nr_parts = 0; + struct cfi_private *cfi; + int err; + + ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL); + platform_set_drvdata(pdev, ltq_mtd); + + ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!ltq_mtd->res) { + dev_err(&pdev->dev, "failed to get memory resource"); + err = -ENOENT; + goto err_out; + } + + res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start, + resource_size(ltq_mtd->res), dev_name(&pdev->dev)); + if (!ltq_mtd->res) { + dev_err(&pdev->dev, "failed to request mem resource"); + err = -EBUSY; + goto err_out; + } + + ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL); + ltq_mtd->map->phys = res->start; + ltq_mtd->map->size = resource_size(res); + ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev, + ltq_mtd->map->phys, ltq_mtd->map->size); + if (!ltq_mtd->map->virt) { + dev_err(&pdev->dev, "failed to ioremap!\n"); + err = -ENOMEM; + goto err_free; + } + + ltq_mtd->map->name = ltq_map_name; + ltq_mtd->map->bankwidth = 2; + ltq_mtd->map->read = ltq_read16; + ltq_mtd->map->write = ltq_write16; + ltq_mtd->map->copy_from = ltq_copy_from; + ltq_mtd->map->copy_to = ltq_copy_to; + + ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING; + ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map); + ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL; + + if (!ltq_mtd->mtd) { + dev_err(&pdev->dev, "probing failed\n"); + err = -ENXIO; + goto err_unmap; + } + + ltq_mtd->mtd->owner = THIS_MODULE; + + cfi = ltq_mtd->map->fldrv_priv; + cfi->addr_unlock1 ^= 1; + cfi->addr_unlock2 ^= 1; + + nr_parts = parse_mtd_partitions(ltq_mtd->mtd, + part_probe_types, &parts, 0); + if (nr_parts > 0) { + dev_info(&pdev->dev, + "using %d partitions from cmdline", nr_parts); + } else { + nr_parts = ltq_mtd_data->nr_parts; + parts = ltq_mtd_data->parts; + } + + err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts); + if (err) { + dev_err(&pdev->dev, "failed to add partitions\n"); + goto err_destroy; + } + + return 0; + +err_destroy: + map_destroy(ltq_mtd->mtd); +err_unmap: + iounmap(ltq_mtd->map->virt); +err_free: + kfree(ltq_mtd->map); +err_out: + kfree(ltq_mtd); + return err; +} + +static int __devexit +ltq_mtd_remove(struct platform_device *pdev) +{ + struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev); + + if (ltq_mtd) { + if (ltq_mtd->mtd) { + del_mtd_partitions(ltq_mtd->mtd); + map_destroy(ltq_mtd->mtd); + } + if (ltq_mtd->map->virt) + iounmap(ltq_mtd->map->virt); + kfree(ltq_mtd->map); + kfree(ltq_mtd); + } + return 0; +} + +static struct platform_driver ltq_mtd_driver = { + .remove = __devexit_p(ltq_mtd_remove), + .driver = { + .name = "ltq_nor", + .owner = THIS_MODULE, + }, +}; + +static int __init +init_ltq_mtd(void) +{ + int ret = platform_driver_probe(<q_mtd_driver, ltq_mtd_probe); + + if (ret) + pr_err("ltq_nor: error registering platform driver"); + return ret; +} + +static void __exit +exit_ltq_mtd(void) +{ + platform_driver_unregister(<q_mtd_driver); +} + +module_init(init_ltq_mtd); +module_exit(exit_ltq_mtd); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_DESCRIPTION("Lantiq SoC NOR"); diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index bd483f0c57e..c1d33464aee 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -214,11 +214,13 @@ static void __devinit of_free_probes(const char **probes) } #endif +static struct of_device_id of_flash_match[]; static int __devinit of_flash_probe(struct platform_device *dev) { #ifdef CONFIG_MTD_PARTITIONS const char **part_probe_types; #endif + const struct of_device_id *match; struct device_node *dp = dev->dev.of_node; struct resource res; struct of_flash *info; @@ -232,9 +234,10 @@ static int __devinit of_flash_probe(struct platform_device *dev) struct mtd_info **mtd_list = NULL; resource_size_t res_size; - if (!dev->dev.of_match) + match = of_match_device(of_flash_match, &dev->dev); + if (!match) return -EINVAL; - probe_type = dev->dev.of_match->data; + probe_type = match->data; reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 3ffe05db492..5d513b54a7d 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -10,6 +10,7 @@ */ #include <linux/slab.h> +#include <linux/gpio.h> #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> @@ -470,7 +471,7 @@ static int __init au1xxx_nand_init(void) #ifdef CONFIG_MIPS_PB1550 /* set gpio206 high */ - au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR); + gpio_direction_input(206); boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index dc280bc8eba..19f04a34783 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2017,6 +2017,13 @@ config FTMAC100 from Faraday. It is used on Faraday A320, Andes AG101 and some other ARM/NDS32 SoC's. +config LANTIQ_ETOP + tristate "Lantiq SoC ETOP driver" + depends on SOC_TYPE_XWAY + help + Support for the MII0 inside the Lantiq SoC + + source "drivers/net/fs_enet/Kconfig" source "drivers/net/octeon/Kconfig" @@ -2536,7 +2543,7 @@ config S6GMAC source "drivers/net/stmmac/Kconfig" config PCH_GBE - tristate "PCH Gigabit Ethernet" + tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE" depends on PCI select MII ---help--- @@ -2548,6 +2555,12 @@ config PCH_GBE to Gigabit Ethernet. This driver enables Gigabit Ethernet function. + This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ + Output Hub), ML7223. + ML7223 IOH is for MP(Media Phone) use. + ML7223 is companion chip for Intel Atom E6xx series. + ML7223 is completely compatible for Intel EG20T PCH. + endif # NETDEV_1000 # diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 01b604ad155..209fbb70619 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -144,7 +144,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o obj-$(CONFIG_B44) += b44.o obj-$(CONFIG_FORCEDETH) += forcedeth.o -obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o +obj-$(CONFIG_NE_H8300) += ne-h8300.o obj-$(CONFIG_AX88796) += ax88796.o obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o obj-$(CONFIG_FTMAC100) += ftmac100.o @@ -219,7 +219,7 @@ obj-$(CONFIG_SC92031) += sc92031.o obj-$(CONFIG_LP486E) += lp486e.o obj-$(CONFIG_ETH16I) += eth16i.o -obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o +obj-$(CONFIG_ZORRO8390) += zorro8390.o obj-$(CONFIG_HPLANCE) += hplance.o 7990.o obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o obj-$(CONFIG_EQUALIZER) += eql.o @@ -231,7 +231,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o obj-$(CONFIG_DECLANCE) += declance.o obj-$(CONFIG_ATARILANCE) += atarilance.o obj-$(CONFIG_A2065) += a2065.o -obj-$(CONFIG_HYDRA) += hydra.o 8390.o +obj-$(CONFIG_HYDRA) += hydra.o obj-$(CONFIG_ARIADNE) += ariadne.o obj-$(CONFIG_CS89x0) += cs89x0.o obj-$(CONFIG_MACSONIC) += macsonic.o @@ -259,6 +259,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/ obj-$(CONFIG_ENC28J60) += enc28j60.o obj-$(CONFIG_ETHOC) += ethoc.o obj-$(CONFIG_GRETH) += greth.o +obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 4af235d41fd..fbfb5b47c50 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c @@ -527,7 +527,7 @@ static void __init etherh_banner(void) * Read the ethernet address string from the on board rom. * This is an ascii string... */ -static int __init etherh_addr(char *addr, struct expansion_card *ec) +static int __devinit etherh_addr(char *addr, struct expansion_card *ec) { struct in_chunk_dir cd; char *s; @@ -655,7 +655,7 @@ static const struct net_device_ops etherh_netdev_ops = { static u32 etherh_regoffsets[16]; static u32 etherm_regoffsets[16]; -static int __init +static int __devinit etherh_probe(struct expansion_card *ec, const struct ecard_id *id) { const struct etherh_data *data = id->data; diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index ce0091eb06f..1264d781b55 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c @@ -554,7 +554,7 @@ static unsigned long __init lance_probe1( struct net_device *dev, memaddr == (unsigned short *)0xffe00000) { /* PAMs card and Riebl on ST use level 5 autovector */ if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO, - "PAM/Riebl-ST Ethernet", dev)) { + "PAM,Riebl-ST Ethernet", dev)) { printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 ); return 0; } diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h index 66823eded7a..2353eca3259 100644 --- a/drivers/net/benet/be.h +++ b/drivers/net/benet/be.h @@ -213,7 +213,7 @@ struct be_rx_stats { struct be_rx_compl_info { u32 rss_hash; - u16 vid; + u16 vlan_tag; u16 pkt_size; u16 rxq_idx; u16 mac_id; diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 1e2d825bb94..9dc9394fd4c 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -132,7 +132,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, struct be_async_event_grp5_pvid_state *evt) { if (evt->enabled) - adapter->pvid = evt->tag; + adapter->pvid = le16_to_cpu(evt->tag); else adapter->pvid = 0; } diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 02a0443d182..9187fb4e08f 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1018,7 +1018,8 @@ static void be_rx_compl_process(struct be_adapter *adapter, kfree_skb(skb); return; } - vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid); + vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, + rxcp->vlan_tag); } else { netif_receive_skb(skb); } @@ -1076,7 +1077,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter, if (likely(!rxcp->vlanf)) napi_gro_frags(&eq_obj->napi); else - vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid); + vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, + rxcp->vlan_tag); } static void be_parse_rx_compl_v1(struct be_adapter *adapter, @@ -1102,7 +1104,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter, rxcp->pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl); rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl); - rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl); + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, + compl); } static void be_parse_rx_compl_v0(struct be_adapter *adapter, @@ -1128,7 +1131,8 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter, rxcp->pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl); rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl); - rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl); + rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, + compl); } static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) @@ -1155,9 +1159,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo) rxcp->vlanf = 0; if (!lancer_chip(adapter)) - rxcp->vid = swab16(rxcp->vid); + rxcp->vlan_tag = swab16(rxcp->vlan_tag); - if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid]) + if (((adapter->pvid & VLAN_VID_MASK) == + (rxcp->vlan_tag & VLAN_VID_MASK)) && + !adapter->vlan_tag[rxcp->vlan_tag]) rxcp->vlanf = 0; /* As the compl has been parsed, reset it; we wont touch it again */ diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index b28baff7086..01b8a6af275 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -39,7 +39,7 @@ typedef struct mac_addr { u8 mac_addr_value[ETH_ALEN]; -} mac_addr_t; +} __packed mac_addr_t; enum { BOND_AD_STABLE = 0, @@ -134,12 +134,12 @@ typedef struct lacpdu { u8 tlv_type_terminator; // = terminator u8 terminator_length; // = 0 u8 reserved_50[50]; // = 0 -} lacpdu_t; +} __packed lacpdu_t; typedef struct lacpdu_header { struct ethhdr hdr; struct lacpdu lacpdu; -} lacpdu_header_t; +} __packed lacpdu_header_t; // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) typedef struct bond_marker { @@ -155,12 +155,12 @@ typedef struct bond_marker { u8 tlv_type_terminator; // = 0x00 u8 terminator_length; // = 0x00 u8 reserved_90[90]; // = 0 -} bond_marker_t; +} __packed bond_marker_t; typedef struct bond_marker_header { struct ethhdr hdr; struct bond_marker marker; -} bond_marker_header_t; +} __packed bond_marker_header_t; #pragma pack() diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index bd1d811c204..5fedc337556 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -247,8 +247,10 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev, } #endif /* CONFIG_PPC_MPC512x */ +static struct of_device_id mpc5xxx_can_table[]; static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct mpc5xxx_can_data *data; struct device_node *np = ofdev->dev.of_node; struct net_device *dev; @@ -258,9 +260,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev) int irq, mscan_clksrc = 0; int err = -ENOMEM; - if (!ofdev->dev.of_match) + match = of_match_device(mpc5xxx_can_table, &ofdev->dev); + if (!match) return -EINVAL; - data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data; + data = match->data; base = of_iomap(np, 0); if (!base) { diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index a358ea9445a..f501bba1fc6 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -346,10 +346,10 @@ static void sja1000_rx(struct net_device *dev) | (priv->read_reg(priv, REG_ID2) >> 5); } + cf->can_dlc = get_can_dlc(fi & 0x0F); if (fi & FI_RTR) { id |= CAN_RTR_FLAG; } else { - cf->can_dlc = get_can_dlc(fi & 0x0F); for (i = 0; i < cf->can_dlc; i++) cf->data[i] = priv->read_reg(priv, dreg++); } diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index b423965a78d..1b49df6b247 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -583,7 +583,9 @@ static int slcan_open(struct tty_struct *tty) /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ - return sl->dev->base_addr; + + /* TTY layer expects 0 on success */ + return 0; err_free_chan: sl->tty = NULL; diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 3e2e734fecb..f3bbdcef338 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c @@ -55,15 +55,20 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->duplex = -1; } - cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full - | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half - | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half - | SUPPORTED_Autoneg | SUPPORTED_FIBRE); - - cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg - | ADVERTISED_FIBRE); + if (cmd->speed == SPEED_10000) { + cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); + cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + cmd->port = PORT_FIBRE; + } else { + cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full + | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full + | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg + | SUPPORTED_TP); + cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg + | ADVERTISED_TP); + cmd->port = PORT_TP; + } - cmd->port = PORT_FIBRE; cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE; return 0; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 53c0f04b1b2..cf79cf759e1 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -2688,9 +2688,6 @@ static int ehea_open(struct net_device *dev) netif_start_queue(dev); } - init_waitqueue_head(&port->swqe_avail_wq); - init_waitqueue_head(&port->restart_wq); - mutex_unlock(&port->port_lock); return ret; @@ -3276,6 +3273,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, INIT_WORK(&port->reset_task, ehea_reset_port); + init_waitqueue_head(&port->swqe_avail_wq); + init_waitqueue_head(&port->restart_wq); + ret = register_netdev(dev); if (ret) { pr_err("register_netdev failed. ret=%d\n", ret); diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 24cb953900d..5131e61c358 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -998,8 +998,10 @@ static const struct net_device_ops fs_enet_netdev_ops = { #endif }; +static struct of_device_id fs_enet_match[]; static int __devinit fs_enet_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct net_device *ndev; struct fs_enet_private *fep; struct fs_platform_info *fpi; @@ -1007,14 +1009,15 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) const u8 *mac_addr; int privsize, len, ret = -ENODEV; - if (!ofdev->dev.of_match) + match = of_match_device(fs_enet_match, &ofdev->dev); + if (!match) return -EINVAL; fpi = kzalloc(sizeof(*fpi), GFP_KERNEL); if (!fpi) return -ENOMEM; - if (!IS_FEC(ofdev->dev.of_match)) { + if (!IS_FEC(match)) { data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len); if (!data || len != 4) goto out_free_fpi; @@ -1049,7 +1052,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev) fep->dev = &ofdev->dev; fep->ndev = ndev; fep->fpi = fpi; - fep->ops = ofdev->dev.of_match->data; + fep->ops = match->data; ret = fep->ops->setup_data(ndev); if (ret) diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index 7e840d373ab..6a2e150e75b 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c @@ -101,17 +101,20 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus) return 0; } +static struct of_device_id fs_enet_mdio_fec_match[]; static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct resource res; struct mii_bus *new_bus; struct fec_info *fec; int (*get_bus_freq)(struct device_node *); int ret = -ENOMEM, clock, speed; - if (!ofdev->dev.of_match) + match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev); + if (!match) return -EINVAL; - get_bus_freq = ofdev->dev.of_match->data; + get_bus_freq = match->data; new_bus = mdiobus_alloc(); if (!new_bus) diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index c5ef62ceb84..1cd481c0420 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c @@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = { .ndo_open = hydra_open, .ndo_stop = hydra_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; @@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z) 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e, }; - dev = alloc_ei_netdev(); + dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; diff --git a/drivers/net/lantiq_etop.c b/drivers/net/lantiq_etop.c new file mode 100644 index 00000000000..45f252b7da3 --- /dev/null +++ b/drivers/net/lantiq_etop.c @@ -0,0 +1,805 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) 2011 John Crispin <blogic@openwrt.org> + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/uaccess.h> +#include <linux/in.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/phy.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/mm.h> +#include <linux/platform_device.h> +#include <linux/ethtool.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/io.h> + +#include <asm/checksum.h> + +#include <lantiq_soc.h> +#include <xway_dma.h> +#include <lantiq_platform.h> + +#define LTQ_ETOP_MDIO 0x11804 +#define MDIO_REQUEST 0x80000000 +#define MDIO_READ 0x40000000 +#define MDIO_ADDR_MASK 0x1f +#define MDIO_ADDR_OFFSET 0x15 +#define MDIO_REG_MASK 0x1f +#define MDIO_REG_OFFSET 0x10 +#define MDIO_VAL_MASK 0xffff + +#define PPE32_CGEN 0x800 +#define LQ_PPE32_ENET_MAC_CFG 0x1840 + +#define LTQ_ETOP_ENETS0 0x11850 +#define LTQ_ETOP_MAC_DA0 0x1186C +#define LTQ_ETOP_MAC_DA1 0x11870 +#define LTQ_ETOP_CFG 0x16020 +#define LTQ_ETOP_IGPLEN 0x16080 + +#define MAX_DMA_CHAN 0x8 +#define MAX_DMA_CRC_LEN 0x4 +#define MAX_DMA_DATA_LEN 0x600 + +#define ETOP_FTCU BIT(28) +#define ETOP_MII_MASK 0xf +#define ETOP_MII_NORMAL 0xd +#define ETOP_MII_REVERSE 0xe +#define ETOP_PLEN_UNDER 0x40 +#define ETOP_CGEN 0x800 + +/* use 2 static channels for TX/RX */ +#define LTQ_ETOP_TX_CHANNEL 1 +#define LTQ_ETOP_RX_CHANNEL 6 +#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL) +#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL) + +#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x)) +#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y)) +#define ltq_etop_w32_mask(x, y, z) \ + ltq_w32_mask(x, y, ltq_etop_membase + (z)) + +#define DRV_VERSION "1.0" + +static void __iomem *ltq_etop_membase; + +struct ltq_etop_chan { + int idx; + int tx_free; + struct net_device *netdev; + struct napi_struct napi; + struct ltq_dma_channel dma; + struct sk_buff *skb[LTQ_DESC_NUM]; +}; + +struct ltq_etop_priv { + struct net_device *netdev; + struct ltq_eth_data *pldata; + struct resource *res; + + struct mii_bus *mii_bus; + struct phy_device *phydev; + + struct ltq_etop_chan ch[MAX_DMA_CHAN]; + int tx_free[MAX_DMA_CHAN >> 1]; + + spinlock_t lock; +}; + +static int +ltq_etop_alloc_skb(struct ltq_etop_chan *ch) +{ + ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN); + if (!ch->skb[ch->dma.desc]) + return -ENOMEM; + ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL, + ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN, + DMA_FROM_DEVICE); + ch->dma.desc_base[ch->dma.desc].addr = + CPHYSADDR(ch->skb[ch->dma.desc]->data); + ch->dma.desc_base[ch->dma.desc].ctl = + LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | + MAX_DMA_DATA_LEN; + skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN); + return 0; +} + +static void +ltq_etop_hw_receive(struct ltq_etop_chan *ch) +{ + struct ltq_etop_priv *priv = netdev_priv(ch->netdev); + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + struct sk_buff *skb = ch->skb[ch->dma.desc]; + int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + if (ltq_etop_alloc_skb(ch)) { + netdev_err(ch->netdev, + "failed to allocate new rx buffer, stopping DMA\n"); + ltq_dma_close(&ch->dma); + } + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + spin_unlock_irqrestore(&priv->lock, flags); + + skb_put(skb, len); + skb->dev = ch->netdev; + skb->protocol = eth_type_trans(skb, ch->netdev); + netif_receive_skb(skb); +} + +static int +ltq_etop_poll_rx(struct napi_struct *napi, int budget) +{ + struct ltq_etop_chan *ch = container_of(napi, + struct ltq_etop_chan, napi); + int rx = 0; + int complete = 0; + + while ((rx < budget) && !complete) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + ltq_etop_hw_receive(ch); + rx++; + } else { + complete = 1; + } + } + if (complete || !rx) { + napi_complete(&ch->napi); + ltq_dma_ack_irq(&ch->dma); + } + return rx; +} + +static int +ltq_etop_poll_tx(struct napi_struct *napi, int budget) +{ + struct ltq_etop_chan *ch = + container_of(napi, struct ltq_etop_chan, napi); + struct ltq_etop_priv *priv = netdev_priv(ch->netdev); + struct netdev_queue *txq = + netdev_get_tx_queue(ch->netdev, ch->idx >> 1); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + while ((ch->dma.desc_base[ch->tx_free].ctl & + (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { + dev_kfree_skb_any(ch->skb[ch->tx_free]); + ch->skb[ch->tx_free] = NULL; + memset(&ch->dma.desc_base[ch->tx_free], 0, + sizeof(struct ltq_dma_desc)); + ch->tx_free++; + ch->tx_free %= LTQ_DESC_NUM; + } + spin_unlock_irqrestore(&priv->lock, flags); + + if (netif_tx_queue_stopped(txq)) + netif_tx_start_queue(txq); + napi_complete(&ch->napi); + ltq_dma_ack_irq(&ch->dma); + return 1; +} + +static irqreturn_t +ltq_etop_dma_irq(int irq, void *_priv) +{ + struct ltq_etop_priv *priv = _priv; + int ch = irq - LTQ_DMA_CH0_INT; + + napi_schedule(&priv->ch[ch].napi); + return IRQ_HANDLED; +} + +static void +ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + ltq_dma_free(&ch->dma); + if (ch->dma.irq) + free_irq(ch->dma.irq, priv); + if (IS_RX(ch->idx)) { + int desc; + for (desc = 0; desc < LTQ_DESC_NUM; desc++) + dev_kfree_skb_any(ch->skb[ch->dma.desc]); + } +} + +static void +ltq_etop_hw_exit(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + ltq_pmu_disable(PMU_PPE); + for (i = 0; i < MAX_DMA_CHAN; i++) + if (IS_TX(i) || IS_RX(i)) + ltq_etop_free_channel(dev, &priv->ch[i]); +} + +static int +ltq_etop_hw_init(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + ltq_pmu_enable(PMU_PPE); + + switch (priv->pldata->mii_mode) { + case PHY_INTERFACE_MODE_RMII: + ltq_etop_w32_mask(ETOP_MII_MASK, + ETOP_MII_REVERSE, LTQ_ETOP_CFG); + break; + + case PHY_INTERFACE_MODE_MII: + ltq_etop_w32_mask(ETOP_MII_MASK, + ETOP_MII_NORMAL, LTQ_ETOP_CFG); + break; + + default: + netdev_err(dev, "unknown mii mode %d\n", + priv->pldata->mii_mode); + return -ENOTSUPP; + } + + /* enable crc generation */ + ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); + + ltq_dma_init_port(DMA_PORT_ETOP); + + for (i = 0; i < MAX_DMA_CHAN; i++) { + int irq = LTQ_DMA_CH0_INT + i; + struct ltq_etop_chan *ch = &priv->ch[i]; + + ch->idx = ch->dma.nr = i; + + if (IS_TX(i)) { + ltq_dma_alloc_tx(&ch->dma); + request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, + "etop_tx", priv); + } else if (IS_RX(i)) { + ltq_dma_alloc_rx(&ch->dma); + for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; + ch->dma.desc++) + if (ltq_etop_alloc_skb(ch)) + return -ENOMEM; + ch->dma.desc = 0; + request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, + "etop_rx", priv); + } + ch->dma.irq = irq; + } + return 0; +} + +static void +ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) +{ + strcpy(info->driver, "Lantiq ETOP"); + strcpy(info->bus_info, "internal"); + strcpy(info->version, DRV_VERSION); +} + +static int +ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + return phy_ethtool_gset(priv->phydev, cmd); +} + +static int +ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + return phy_ethtool_sset(priv->phydev, cmd); +} + +static int +ltq_etop_nway_reset(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + return phy_start_aneg(priv->phydev); +} + +static const struct ethtool_ops ltq_etop_ethtool_ops = { + .get_drvinfo = ltq_etop_get_drvinfo, + .get_settings = ltq_etop_get_settings, + .set_settings = ltq_etop_set_settings, + .nway_reset = ltq_etop_nway_reset, +}; + +static int +ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data) +{ + u32 val = MDIO_REQUEST | + ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | + ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) | + phy_data; + + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) + ; + ltq_etop_w32(val, LTQ_ETOP_MDIO); + return 0; +} + +static int +ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg) +{ + u32 val = MDIO_REQUEST | MDIO_READ | + ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) | + ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET); + + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) + ; + ltq_etop_w32(val, LTQ_ETOP_MDIO); + while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST) + ; + val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK; + return val; +} + +static void +ltq_etop_mdio_link(struct net_device *dev) +{ + /* nothing to do */ +} + +static int +ltq_etop_mdio_probe(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + struct phy_device *phydev = NULL; + int phy_addr; + + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { + if (priv->mii_bus->phy_map[phy_addr]) { + phydev = priv->mii_bus->phy_map[phy_addr]; + break; + } + } + + if (!phydev) { + netdev_err(dev, "no PHY found\n"); + return -ENODEV; + } + + phydev = phy_connect(dev, dev_name(&phydev->dev), <q_etop_mdio_link, + 0, priv->pldata->mii_mode); + + if (IS_ERR(phydev)) { + netdev_err(dev, "Could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + phydev->supported &= (SUPPORTED_10baseT_Half + | SUPPORTED_10baseT_Full + | SUPPORTED_100baseT_Half + | SUPPORTED_100baseT_Full + | SUPPORTED_Autoneg + | SUPPORTED_MII + | SUPPORTED_TP); + + phydev->advertising = phydev->supported; + priv->phydev = phydev; + pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n", + dev->name, phydev->drv->name, + dev_name(&phydev->dev), phydev->irq); + + return 0; +} + +static int +ltq_etop_mdio_init(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + int err; + + priv->mii_bus = mdiobus_alloc(); + if (!priv->mii_bus) { + netdev_err(dev, "failed to allocate mii bus\n"); + err = -ENOMEM; + goto err_out; + } + + priv->mii_bus->priv = dev; + priv->mii_bus->read = ltq_etop_mdio_rd; + priv->mii_bus->write = ltq_etop_mdio_wr; + priv->mii_bus->name = "ltq_mii"; + snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0); + priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!priv->mii_bus->irq) { + err = -ENOMEM; + goto err_out_free_mdiobus; + } + + for (i = 0; i < PHY_MAX_ADDR; ++i) + priv->mii_bus->irq[i] = PHY_POLL; + + if (mdiobus_register(priv->mii_bus)) { + err = -ENXIO; + goto err_out_free_mdio_irq; + } + + if (ltq_etop_mdio_probe(dev)) { + err = -ENXIO; + goto err_out_unregister_bus; + } + return 0; + +err_out_unregister_bus: + mdiobus_unregister(priv->mii_bus); +err_out_free_mdio_irq: + kfree(priv->mii_bus->irq); +err_out_free_mdiobus: + mdiobus_free(priv->mii_bus); +err_out: + return err; +} + +static void +ltq_etop_mdio_cleanup(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + phy_disconnect(priv->phydev); + mdiobus_unregister(priv->mii_bus); + kfree(priv->mii_bus->irq); + mdiobus_free(priv->mii_bus); +} + +static int +ltq_etop_open(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + for (i = 0; i < MAX_DMA_CHAN; i++) { + struct ltq_etop_chan *ch = &priv->ch[i]; + + if (!IS_TX(i) && (!IS_RX(i))) + continue; + ltq_dma_open(&ch->dma); + napi_enable(&ch->napi); + } + phy_start(priv->phydev); + netif_tx_start_all_queues(dev); + return 0; +} + +static int +ltq_etop_stop(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + int i; + + netif_tx_stop_all_queues(dev); + phy_stop(priv->phydev); + for (i = 0; i < MAX_DMA_CHAN; i++) { + struct ltq_etop_chan *ch = &priv->ch[i]; + + if (!IS_RX(i) && !IS_TX(i)) + continue; + napi_disable(&ch->napi); + ltq_dma_close(&ch->dma); + } + return 0; +} + +static int +ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) +{ + int queue = skb_get_queue_mapping(skb); + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue); + struct ltq_etop_priv *priv = netdev_priv(dev); + struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1]; + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; + int len; + unsigned long flags; + u32 byte_offset; + + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; + + if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { + dev_kfree_skb_any(skb); + netdev_err(dev, "tx ring full\n"); + netif_tx_stop_queue(txq); + return NETDEV_TX_BUSY; + } + + /* dma needs to start on a 16 byte aligned address */ + byte_offset = CPHYSADDR(skb->data) % 16; + ch->skb[ch->dma.desc] = skb; + + dev->trans_start = jiffies; + + spin_lock_irqsave(&priv->lock, flags); + desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len, + DMA_TO_DEVICE)) - byte_offset; + wmb(); + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); + ch->dma.desc++; + ch->dma.desc %= LTQ_DESC_NUM; + spin_unlock_irqrestore(&priv->lock, flags); + + if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN) + netif_tx_stop_queue(txq); + + return NETDEV_TX_OK; +} + +static int +ltq_etop_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret = eth_change_mtu(dev, new_mtu); + + if (!ret) { + struct ltq_etop_priv *priv = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, + LTQ_ETOP_IGPLEN); + spin_unlock_irqrestore(&priv->lock, flags); + } + return ret; +} + +static int +ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + + /* TODO: mii-toll reports "No MII transceiver present!." ?!*/ + return phy_mii_ioctl(priv->phydev, rq, cmd); +} + +static int +ltq_etop_set_mac_address(struct net_device *dev, void *p) +{ + int ret = eth_mac_addr(dev, p); + + if (!ret) { + struct ltq_etop_priv *priv = netdev_priv(dev); + unsigned long flags; + + /* store the mac for the unicast filter */ + spin_lock_irqsave(&priv->lock, flags); + ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0); + ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16, + LTQ_ETOP_MAC_DA1); + spin_unlock_irqrestore(&priv->lock, flags); + } + return ret; +} + +static void +ltq_etop_set_multicast_list(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + unsigned long flags; + + /* ensure that the unicast filter is not enabled in promiscious mode */ + spin_lock_irqsave(&priv->lock, flags); + if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) + ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0); + else + ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0); + spin_unlock_irqrestore(&priv->lock, flags); +} + +static u16 +ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + /* we are currently only using the first queue */ + return 0; +} + +static int +ltq_etop_init(struct net_device *dev) +{ + struct ltq_etop_priv *priv = netdev_priv(dev); + struct sockaddr mac; + int err; + + ether_setup(dev); + dev->watchdog_timeo = 10 * HZ; + err = ltq_etop_hw_init(dev); + if (err) + goto err_hw; + ltq_etop_change_mtu(dev, 1500); + + memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr)); + if (!is_valid_ether_addr(mac.sa_data)) { + pr_warn("etop: invalid MAC, using random\n"); + random_ether_addr(mac.sa_data); + } + + err = ltq_etop_set_mac_address(dev, &mac); + if (err) + goto err_netdev; + ltq_etop_set_multicast_list(dev); + err = ltq_etop_mdio_init(dev); + if (err) + goto err_netdev; + return 0; + +err_netdev: + unregister_netdev(dev); + free_netdev(dev); +err_hw: + ltq_etop_hw_exit(dev); + return err; +} + +static void +ltq_etop_tx_timeout(struct net_device *dev) +{ + int err; + + ltq_etop_hw_exit(dev); + err = ltq_etop_hw_init(dev); + if (err) + goto err_hw; + dev->trans_start = jiffies; + netif_wake_queue(dev); + return; + +err_hw: + ltq_etop_hw_exit(dev); + netdev_err(dev, "failed to restart etop after TX timeout\n"); +} + +static const struct net_device_ops ltq_eth_netdev_ops = { + .ndo_open = ltq_etop_open, + .ndo_stop = ltq_etop_stop, + .ndo_start_xmit = ltq_etop_tx, + .ndo_change_mtu = ltq_etop_change_mtu, + .ndo_do_ioctl = ltq_etop_ioctl, + .ndo_set_mac_address = ltq_etop_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_multicast_list = ltq_etop_set_multicast_list, + .ndo_select_queue = ltq_etop_select_queue, + .ndo_init = ltq_etop_init, + .ndo_tx_timeout = ltq_etop_tx_timeout, +}; + +static int __init +ltq_etop_probe(struct platform_device *pdev) +{ + struct net_device *dev; + struct ltq_etop_priv *priv; + struct resource *res; + int err; + int i; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "failed to get etop resource\n"); + err = -ENOENT; + goto err_out; + } + + res = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, "failed to request etop resource\n"); + err = -EBUSY; + goto err_out; + } + + ltq_etop_membase = devm_ioremap_nocache(&pdev->dev, + res->start, resource_size(res)); + if (!ltq_etop_membase) { + dev_err(&pdev->dev, "failed to remap etop engine %d\n", + pdev->id); + err = -ENOMEM; + goto err_out; + } + + dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4); + strcpy(dev->name, "eth%d"); + dev->netdev_ops = <q_eth_netdev_ops; + dev->ethtool_ops = <q_etop_ethtool_ops; + priv = netdev_priv(dev); + priv->res = res; + priv->pldata = dev_get_platdata(&pdev->dev); + priv->netdev = dev; + spin_lock_init(&priv->lock); + + for (i = 0; i < MAX_DMA_CHAN; i++) { + if (IS_TX(i)) + netif_napi_add(dev, &priv->ch[i].napi, + ltq_etop_poll_tx, 8); + else if (IS_RX(i)) + netif_napi_add(dev, &priv->ch[i].napi, + ltq_etop_poll_rx, 32); + priv->ch[i].netdev = dev; + } + + err = register_netdev(dev); + if (err) + goto err_free; + + platform_set_drvdata(pdev, dev); + return 0; + +err_free: + kfree(dev); +err_out: + return err; +} + +static int __devexit +ltq_etop_remove(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + + if (dev) { + netif_tx_stop_all_queues(dev); + ltq_etop_hw_exit(dev); + ltq_etop_mdio_cleanup(dev); + unregister_netdev(dev); + } + return 0; +} + +static struct platform_driver ltq_mii_driver = { + .remove = __devexit_p(ltq_etop_remove), + .driver = { + .name = "ltq_etop", + .owner = THIS_MODULE, + }, +}; + +int __init +init_ltq_etop(void) +{ + int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); + + if (ret) + pr_err("ltq_etop: Error registering platfom driver!"); + return ret; +} + +static void __exit +exit_ltq_etop(void) +{ + platform_driver_unregister(<q_mii_driver); +} + +module_init(init_ltq_etop); +module_exit(exit_ltq_etop); + +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_DESCRIPTION("Lantiq SoC ETOP"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index 30be8c634eb..7298a34bc79 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c @@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev) #ifndef MODULE struct net_device * __init ne_probe(int unit) { - struct net_device *dev = alloc_ei_netdev(); + struct net_device *dev = ____alloc_ei_netdev(0); int err; if (!dev) @@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = { .ndo_open = ne_open, .ndo_stop = ne_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; @@ -637,7 +637,7 @@ int init_module(void) int err; for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) { - struct net_device *dev = alloc_ei_netdev(); + struct net_device *dev = ____alloc_ei_netdev(0); if (!dev) break; if (io[this_dev]) { diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 2ef2f9cdefa..56d049a472d 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -34,6 +34,10 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_COPYBREAK_DEFAULT 256 #define PCH_GBE_PCI_BAR 1 +/* Macros for ML7223 */ +#define PCI_VENDOR_ID_ROHM 0x10db +#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013 + #define PCH_GBE_TX_WEIGHT 64 #define PCH_GBE_RX_WEIGHT 64 #define PCH_GBE_RX_BUFFER_WRITE 16 @@ -43,8 +47,7 @@ const char pch_driver_version[] = DRV_VERSION; #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \ PCH_GBE_CHIP_TYPE_INTERNAL | \ - PCH_GBE_RGMII_MODE_RGMII | \ - PCH_GBE_CRS_SEL \ + PCH_GBE_RGMII_MODE_RGMII \ ) /* Ethertype field values */ @@ -1494,12 +1497,11 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, /* Write meta date of skb */ skb_put(skb, length); skb->protocol = eth_type_trans(skb, netdev); - if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) == - PCH_GBE_RXD_ACC_STAT_TCPIPOK) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { + if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) skb->ip_summed = CHECKSUM_NONE; - } + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + napi_gro_receive(&adapter->napi, skb); (*work_done)++; pr_debug("Receive skb->ip_summed: %d length: %d\n", @@ -2420,6 +2422,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = { .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = (0xFFFF00) }, + {.vendor = PCI_VENDOR_ID_ROHM, + .device = PCI_DEVICE_ID_ROHM_ML7223_GBE, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = (PCI_CLASS_NETWORK_ETHERNET << 8), + .class_mask = (0xFFFF00) + }, /* required last entry */ {0} }; diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c index d98479030ef..3dd45ed61f0 100644 --- a/drivers/net/sfc/mcdi.c +++ b/drivers/net/sfc/mcdi.c @@ -50,6 +50,20 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) return &nic_data->mcdi; } +static inline void +efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) +{ + struct siena_nic_data *nic_data = efx->nic_data; + value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); +} + +static inline void +efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) +{ + struct siena_nic_data *nic_data = efx->nic_data; + __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); +} + void efx_mcdi_init(struct efx_nic *efx) { struct efx_mcdi_iface *mcdi; @@ -70,8 +84,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, size_t inlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); - unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); + unsigned pdu = MCDI_PDU(efx); + unsigned doorbell = MCDI_DOORBELL(efx); unsigned int i; efx_dword_t hdr; u32 xflags, seqno; @@ -92,30 +106,28 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, MCDI_HEADER_SEQ, seqno, MCDI_HEADER_XFLAGS, xflags); - efx_writed(efx, &hdr, pdu); + efx_mcdi_writed(efx, &hdr, pdu); - for (i = 0; i < inlen; i += 4) { - _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); - /* use wmb() within loop to inhibit write combining */ - wmb(); - } + for (i = 0; i < inlen; i += 4) + efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), + pdu + 4 + i); /* ring the doorbell with a distinctive value */ - _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); - wmb(); + EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); + efx_mcdi_writed(efx, &hdr, doorbell); } static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) { struct efx_mcdi_iface *mcdi = efx_mcdi(efx); - unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned int pdu = MCDI_PDU(efx); int i; BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); BUG_ON(outlen & 3 || outlen >= 0x100); for (i = 0; i < outlen; i += 4) - *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); + efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); } static int efx_mcdi_poll(struct efx_nic *efx) @@ -123,7 +135,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsigned int time, finish; unsigned int respseq, respcmd, error; - unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); + unsigned int pdu = MCDI_PDU(efx); unsigned int rc, spins; efx_dword_t reg; @@ -149,8 +161,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) time = get_seconds(); - rmb(); - efx_readd(efx, ®, pdu); + efx_mcdi_readd(efx, ®, pdu); /* All 1's indicates that shared memory is in reset (and is * not a valid header). Wait for it to come out reset before @@ -177,7 +188,7 @@ static int efx_mcdi_poll(struct efx_nic *efx) respseq, mcdi->seqno); rc = EIO; } else if (error) { - efx_readd(efx, ®, pdu + 4); + efx_mcdi_readd(efx, ®, pdu + 4); switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { #define TRANSLATE_ERROR(name) \ case MC_CMD_ERR_ ## name: \ @@ -211,21 +222,21 @@ out: /* Test and clear MC-rebooted flag for this port/function */ int efx_mcdi_poll_reboot(struct efx_nic *efx) { - unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); + unsigned int addr = MCDI_REBOOT_FLAG(efx); efx_dword_t reg; uint32_t value; if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) return false; - efx_readd(efx, ®, addr); + efx_mcdi_readd(efx, ®, addr); value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); if (value == 0) return 0; EFX_ZERO_DWORD(reg); - efx_writed(efx, ®, addr); + efx_mcdi_writed(efx, ®, addr); if (value == MC_STATUS_DWORD_ASSERT) return -EINTR; diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 10f1cb79c14..9b29a8d7c44 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -1937,6 +1937,13 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf) size = min_t(size_t, table->step, 16); + if (table->offset >= efx->type->mem_map_size) { + /* No longer mapped; return dummy data */ + memcpy(buf, "\xde\xc0\xad\xde", 4); + buf += table->rows * size; + continue; + } + for (i = 0; i < table->rows; i++) { switch (table->step) { case 4: /* 32-bit register or SRAM */ diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h index a42db6e35be..d91701abd33 100644 --- a/drivers/net/sfc/nic.h +++ b/drivers/net/sfc/nic.h @@ -143,10 +143,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) /** * struct siena_nic_data - Siena NIC state * @mcdi: Management-Controller-to-Driver Interface + * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. * @wol_filter_id: Wake-on-LAN packet filter id */ struct siena_nic_data { struct efx_mcdi_iface mcdi; + void __iomem *mcdi_smem; int wol_filter_id; }; diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c index e4dd8986b1f..837869b71db 100644 --- a/drivers/net/sfc/siena.c +++ b/drivers/net/sfc/siena.c @@ -220,12 +220,26 @@ static int siena_probe_nic(struct efx_nic *efx) efx_reado(efx, ®, FR_AZ_CS_DEBUG); efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; + /* Initialise MCDI */ + nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + + FR_CZ_MC_TREG_SMEM, + FR_CZ_MC_TREG_SMEM_STEP * + FR_CZ_MC_TREG_SMEM_ROWS); + if (!nic_data->mcdi_smem) { + netif_err(efx, probe, efx->net_dev, + "could not map MCDI at %llx+%x\n", + (unsigned long long)efx->membase_phys + + FR_CZ_MC_TREG_SMEM, + FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); + rc = -ENOMEM; + goto fail1; + } efx_mcdi_init(efx); /* Recover from a failed assertion before probing */ rc = efx_mcdi_handle_assertion(efx); if (rc) - goto fail1; + goto fail2; /* Let the BMC know that the driver is now in charge of link and * filter settings. We must do this before we reset the NIC */ @@ -280,6 +294,7 @@ fail4: fail3: efx_mcdi_drv_attach(efx, false, NULL); fail2: + iounmap(nic_data->mcdi_smem); fail1: kfree(efx->nic_data); return rc; @@ -359,6 +374,8 @@ static int siena_init_nic(struct efx_nic *efx) static void siena_remove_nic(struct efx_nic *efx) { + struct siena_nic_data *nic_data = efx->nic_data; + efx_nic_free_buffer(efx, &efx->irq_status); siena_reset_hw(efx, RESET_TYPE_ALL); @@ -368,7 +385,8 @@ static void siena_remove_nic(struct efx_nic *efx) efx_mcdi_drv_attach(efx, false, NULL); /* Tear down the private nic state */ - kfree(efx->nic_data); + iounmap(nic_data->mcdi_smem); + kfree(nic_data); efx->nic_data = NULL; } @@ -606,8 +624,7 @@ struct efx_nic_type siena_a0_nic_type = { .default_mac_ops = &efx_mcdi_mac_operations, .revision = EFX_REV_SIENA_A0, - .mem_map_size = (FR_CZ_MC_TREG_SMEM + - FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), + .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL, diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 86cbb9ea2f2..8ec1a9a0bb9 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c @@ -853,7 +853,9 @@ static int slip_open(struct tty_struct *tty) /* Done. We have linked the TTY line to a channel. */ rtnl_unlock(); tty->receive_room = 65536; /* We don't flow control */ - return sl->dev->base_addr; + + /* TTY layer expects 0 on success */ + return 0; err_free_bufs: sl_free_bufs(sl); diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index eb4f59fb01e..bff2f7999ff 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -3237,15 +3237,18 @@ static void happy_meal_pci_exit(void) #endif #ifdef CONFIG_SBUS +static const struct of_device_id hme_sbus_match[]; static int __devinit hme_sbus_probe(struct platform_device *op) { + const struct of_device_id *match; struct device_node *dp = op->dev.of_node; const char *model = of_get_property(dp, "model", NULL); int is_qfe; - if (!op->dev.of_match) + match = of_match_device(hme_sbus_match, &op->dev); + if (!match) return -EINVAL; - is_qfe = (op->dev.of_match->data != NULL); + is_qfe = (match->data != NULL); if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) is_qfe = 1; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index a301479ecc6..c924ea2bce0 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -567,7 +567,7 @@ static const struct usb_device_id products [] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = 0, + .driver_info = (unsigned long)&wwan_info, }, /* diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index 7d42f9a2c06..81126ff85e0 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -65,6 +65,7 @@ #define IPHETH_USBINTF_PROTO 1 #define IPHETH_BUF_SIZE 1516 +#define IPHETH_IP_ALIGN 2 /* padding at front of URB */ #define IPHETH_TX_TIMEOUT (5 * HZ) #define IPHETH_INTFNUM 2 @@ -202,18 +203,21 @@ static void ipheth_rcvbulk_callback(struct urb *urb) return; } - len = urb->actual_length; - buf = urb->transfer_buffer; + if (urb->actual_length <= IPHETH_IP_ALIGN) { + dev->net->stats.rx_length_errors++; + return; + } + len = urb->actual_length - IPHETH_IP_ALIGN; + buf = urb->transfer_buffer + IPHETH_IP_ALIGN; - skb = dev_alloc_skb(NET_IP_ALIGN + len); + skb = dev_alloc_skb(len); if (!skb) { err("%s: dev_alloc_skb: -ENOMEM", __func__); dev->net->stats.rx_dropped++; return; } - skb_reserve(skb, NET_IP_ALIGN); - memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN); + memcpy(skb_put(skb, len), buf, len); skb->dev = dev->net; skb->protocol = eth_type_trans(skb, dev->net); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 009bba3d753..9ab439d144e 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -645,6 +645,7 @@ int usbnet_stop (struct net_device *net) struct driver_info *info = dev->driver_info; int retval; + clear_bit(EVENT_DEV_OPEN, &dev->flags); netif_stop_queue (net); netif_info(dev, ifdown, dev->net, @@ -1524,9 +1525,12 @@ int usbnet_resume (struct usb_interface *intf) smp_mb(); clear_bit(EVENT_DEV_ASLEEP, &dev->flags); spin_unlock_irq(&dev->txq.lock); - if (!(dev->txq.qlen >= TX_QLEN(dev))) - netif_start_queue(dev->net); - tasklet_schedule (&dev->bh); + + if (test_bit(EVENT_DEV_OPEN, &dev->flags)) { + if (!(dev->txq.qlen >= TX_QLEN(dev))) + netif_start_queue(dev->net); + tasklet_schedule (&dev->bh); + } } return 0; } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 0d47c3a0530..c16ed961153 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -178,6 +178,7 @@ static void vmxnet3_process_events(struct vmxnet3_adapter *adapter) { int i; + unsigned long flags; u32 events = le32_to_cpu(adapter->shared->ecr); if (!events) return; @@ -190,10 +191,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter) /* Check if there is an error on xmit/recv queues */ if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { - spin_lock(&adapter->cmd_lock); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS); - spin_unlock(&adapter->cmd_lock); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); for (i = 0; i < adapter->num_tx_queues; i++) if (adapter->tqd_start[i].status.stopped) @@ -2733,13 +2734,14 @@ static void vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) { u32 cfg; + unsigned long flags; /* intr settings */ - spin_lock(&adapter->cmd_lock); + spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_CONF_INTR); cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); - spin_unlock(&adapter->cmd_lock); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); adapter->intr.type = cfg & 0x3; adapter->intr.mask_mode = (cfg >> 2) & 0x3; diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 51f2ef142a5..976467253d2 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -311,6 +311,9 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data) /* toggle the LRO feature*/ netdev->features ^= NETIF_F_LRO; + /* Update private LRO flag */ + adapter->lro = lro_requested; + /* update harware LRO capability accordingly */ if (lro_requested) adapter->shared->devRead.misc.uptFeatures |= diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 17d04ff8d67..1482fa65083 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -2141,6 +2141,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class) static void ath9k_flush(struct ieee80211_hw *hw, bool drop) { struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; + struct ath_common *common = ath9k_hw_common(ah); int timeout = 200; /* ms */ int i, j; @@ -2149,6 +2151,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop) cancel_delayed_work_sync(&sc->tx_complete_work); + if (sc->sc_flags & SC_OP_INVALID) { + ath_dbg(common, ATH_DBG_ANY, "Device not present\n"); + mutex_unlock(&sc->mutex); + return; + } + if (drop) timeout = 1; diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index c1511b14b23..42db0fc8b92 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c @@ -2155,6 +2155,13 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed) goto set_ch_out; } + if (priv->iw_mode == NL80211_IFTYPE_ADHOC && + !iwl_legacy_is_channel_ibss(ch_info)) { + IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n"); + ret = -EINVAL; + goto set_ch_out; + } + spin_lock_irqsave(&priv->lock, flags); for_each_context(priv, ctx) { diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h index 9ee849d669f..f43ac1eb901 100644 --- a/drivers/net/wireless/iwlegacy/iwl-dev.h +++ b/drivers/net/wireless/iwlegacy/iwl-dev.h @@ -1411,6 +1411,12 @@ iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch) return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; } +static inline int +iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch) +{ + return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0; +} + static inline void __iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page) { diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index 7e8a658b767..f3ac62431a3 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c @@ -1339,8 +1339,8 @@ int lbs_execute_next_command(struct lbs_private *priv) cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { lbs_deb_host( "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n"); - list_del(&cmdnode->list); spin_lock_irqsave(&priv->driver_lock, flags); + list_del(&cmdnode->list); lbs_complete_command(priv, cmdnode, 0); spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -1352,8 +1352,8 @@ int lbs_execute_next_command(struct lbs_private *priv) (priv->psstate == PS_STATE_PRE_SLEEP)) { lbs_deb_host( "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n"); - list_del(&cmdnode->list); spin_lock_irqsave(&priv->driver_lock, flags); + list_del(&cmdnode->list); lbs_complete_command(priv, cmdnode, 0); spin_unlock_irqrestore(&priv->driver_lock, flags); priv->needtowakeup = 1; @@ -1366,7 +1366,9 @@ int lbs_execute_next_command(struct lbs_private *priv) "EXEC_NEXT_CMD: sending EXIT_PS\n"); } } + spin_lock_irqsave(&priv->driver_lock, flags); list_del(&cmdnode->list); + spin_unlock_irqrestore(&priv->driver_lock, flags); lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n", le16_to_cpu(cmd->command)); lbs_submit_command(priv, cmdnode); diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index b78a38d9172..8c7c522a056 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c @@ -126,7 +126,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, board = z->resource.start; ioaddr = board+cards[i].offset; - dev = alloc_ei_netdev(); + dev = ____alloc_ei_netdev(0); if (!dev) return -ENOMEM; if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) { @@ -146,15 +146,15 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z, static const struct net_device_ops zorro8390_netdev_ops = { .ndo_open = zorro8390_open, .ndo_stop = zorro8390_close, - .ndo_start_xmit = ei_start_xmit, - .ndo_tx_timeout = ei_tx_timeout, - .ndo_get_stats = ei_get_stats, - .ndo_set_multicast_list = ei_set_multicast_list, + .ndo_start_xmit = __ei_start_xmit, + .ndo_tx_timeout = __ei_tx_timeout, + .ndo_get_stats = __ei_get_stats, + .ndo_set_multicast_list = __ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = ei_poll, + .ndo_poll_controller = __ei_poll, #endif }; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index ebf51ad1b71..a806cb321d2 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -579,7 +579,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, } size0 = calculate_iosize(size, min_size, size1, resource_size(b_res), 4096); - size1 = !add_size? size0: + size1 = (!add_head || (add_head && !add_size)) ? size0 : calculate_iosize(size, min_size+add_size, size1, resource_size(b_res), 4096); if (!size0 && !size1) { @@ -677,7 +677,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, align += aligns[order]; } size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align); - size1 = !add_size ? size : + size1 = (!add_head || (add_head && !add_size)) ? size0 : calculate_memsize(size, min_size+add_size, 0, resource_size(b_res), min_align); if (!size0 && !size1) { diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c index ac2701b22e7..043ee3136e4 100644 --- a/drivers/rapidio/switches/idt_gen2.c +++ b/drivers/rapidio/switches/idt_gen2.c @@ -95,6 +95,9 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, else table++; + if (route_port == RIO_INVALID_ROUTE) + route_port = IDT_DEFAULT_ROUTE; + rio_mport_write_config_32(mport, destid, hopcount, LOCAL_RTE_CONF_DESTID_SEL, table); @@ -411,6 +414,12 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum) rdev->rswitch->em_handle = idtg2_em_handler; rdev->rswitch->sw_sysfs = idtg2_sysfs; + if (do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); + } + return 0; } diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c index 3a971077e7b..d06ee2d44b4 100644 --- a/drivers/rapidio/switches/idtcps.c +++ b/drivers/rapidio/switches/idtcps.c @@ -26,6 +26,9 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, { u32 result; + if (route_port == RIO_INVALID_ROUTE) + route_port = CPS_DEFAULT_ROUTE; + if (table == RIO_GLOBAL_TABLE) { rio_mport_write_config_32(mport, destid, hopcount, RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); @@ -130,6 +133,9 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum) /* set TVAL = ~50us */ rio_write_config_32(rdev, rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); } return 0; diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c index 1a62934bfeb..db8b8028988 100644 --- a/drivers/rapidio/switches/tsi57x.c +++ b/drivers/rapidio/switches/tsi57x.c @@ -303,6 +303,12 @@ static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum) rdev->rswitch->em_init = tsi57x_em_init; rdev->rswitch->em_handle = tsi57x_em_handler; + if (do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, + RIO_INVALID_ROUTE); + } + return 0; } diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c index 8d46838dff8..755e1fe914a 100644 --- a/drivers/rtc/rtc-davinci.c +++ b/drivers/rtc/rtc-davinci.c @@ -524,6 +524,8 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) goto fail2; } + platform_set_drvdata(pdev, davinci_rtc); + davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &davinci_rtc_ops, THIS_MODULE); if (IS_ERR(davinci_rtc->rtc)) { @@ -553,8 +555,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL); - platform_set_drvdata(pdev, davinci_rtc); - device_init_wakeup(&pdev->dev, 0); return 0; @@ -562,6 +562,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) fail4: rtc_device_unregister(davinci_rtc->rtc); fail3: + platform_set_drvdata(pdev, NULL); iounmap(davinci_rtc->base); fail2: release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size); diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c index 60ce6960082..47e681df31e 100644 --- a/drivers/rtc/rtc-ds1286.c +++ b/drivers/rtc/rtc-ds1286.c @@ -355,6 +355,7 @@ static int __devinit ds1286_probe(struct platform_device *pdev) goto out; } spin_lock_init(&priv->lock); + platform_set_drvdata(pdev, priv); rtc = rtc_device_register("ds1286", &pdev->dev, &ds1286_ops, THIS_MODULE); if (IS_ERR(rtc)) { @@ -362,7 +363,6 @@ static int __devinit ds1286_probe(struct platform_device *pdev) goto out; } priv->rtc = rtc; - platform_set_drvdata(pdev, priv); return 0; out: diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c index 11ae64dcbf3..335551d333b 100644 --- a/drivers/rtc/rtc-ep93xx.c +++ b/drivers/rtc/rtc-ep93xx.c @@ -151,6 +151,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) return -ENXIO; pdev->dev.platform_data = ep93xx_rtc; + platform_set_drvdata(pdev, rtc); rtc = rtc_device_register(pdev->name, &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE); @@ -159,8 +160,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) goto exit; } - platform_set_drvdata(pdev, rtc); - err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files); if (err) goto fail; @@ -168,9 +167,9 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev) return 0; fail: - platform_set_drvdata(pdev, NULL); rtc_device_unregister(rtc); exit: + platform_set_drvdata(pdev, NULL); pdev->dev.platform_data = NULL; return err; } diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 69fe664a222..eda128fc1d3 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -783,6 +783,9 @@ static int m41t80_probe(struct i2c_client *client, goto exit; } + clientdata->features = id->driver_data; + i2c_set_clientdata(client, clientdata); + rtc = rtc_device_register(client->name, &client->dev, &m41t80_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { @@ -792,8 +795,6 @@ static int m41t80_probe(struct i2c_client *client, } clientdata->rtc = rtc; - clientdata->features = id->driver_data; - i2c_set_clientdata(client, clientdata); /* Make sure HT (Halt Update) bit is cleared */ rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR); diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c index 20494b5edc3..3bc046f427e 100644 --- a/drivers/rtc/rtc-max8925.c +++ b/drivers/rtc/rtc-max8925.c @@ -258,6 +258,8 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) } dev_set_drvdata(&pdev->dev, info); + /* XXX - isn't this redundant? */ + platform_set_drvdata(pdev, info); info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev, &max8925_rtc_ops, THIS_MODULE); @@ -267,10 +269,9 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev) goto out_rtc; } - platform_set_drvdata(pdev, info); - return 0; out_rtc: + platform_set_drvdata(pdev, NULL); free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info); out_irq: kfree(info); diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c index 3f7bc6b9fef..2e48aa60427 100644 --- a/drivers/rtc/rtc-max8998.c +++ b/drivers/rtc/rtc-max8998.c @@ -265,6 +265,8 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) info->rtc = max8998->rtc; info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0; + platform_set_drvdata(pdev, info); + info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev, &max8998_rtc_ops, THIS_MODULE); @@ -274,8 +276,6 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) goto out_rtc; } - platform_set_drvdata(pdev, info); - ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0, "rtc-alarm0", info); @@ -293,6 +293,7 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev) return 0; out_rtc: + platform_set_drvdata(pdev, NULL); kfree(info); return ret; } diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c index c5ac03793e7..a1a278bc340 100644 --- a/drivers/rtc/rtc-mc13xxx.c +++ b/drivers/rtc/rtc-mc13xxx.c @@ -349,11 +349,15 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev) if (ret) goto err_alarm_irq_request; + mc13xxx_unlock(mc13xxx); + priv->rtc = rtc_device_register(pdev->name, &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE); if (IS_ERR(priv->rtc)) { ret = PTR_ERR(priv->rtc); + mc13xxx_lock(mc13xxx); + mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv); err_alarm_irq_request: @@ -365,12 +369,12 @@ err_reset_irq_status: mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv); err_reset_irq_request: + mc13xxx_unlock(mc13xxx); + platform_set_drvdata(pdev, NULL); kfree(priv); } - mc13xxx_unlock(mc13xxx); - return ret; } diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c index 67820626e18..fcb113c1112 100644 --- a/drivers/rtc/rtc-msm6242.c +++ b/drivers/rtc/rtc-msm6242.c @@ -214,6 +214,7 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) error = -ENOMEM; goto out_free_priv; } + platform_set_drvdata(dev, priv); rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops, THIS_MODULE); @@ -223,10 +224,10 @@ static int __init msm6242_rtc_probe(struct platform_device *dev) } priv->rtc = rtc; - platform_set_drvdata(dev, priv); return 0; out_unmap: + platform_set_drvdata(dev, NULL); iounmap(priv->regs); out_free_priv: kfree(priv); diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 826ab64a8fa..d814417bee8 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -418,14 +418,6 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) goto exit_put_clk; } - rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, - THIS_MODULE); - if (IS_ERR(rtc)) { - ret = PTR_ERR(rtc); - goto exit_put_clk; - } - - pdata->rtc = rtc; platform_set_drvdata(pdev, pdata); /* Configure and enable the RTC */ @@ -438,8 +430,19 @@ static int __init mxc_rtc_probe(struct platform_device *pdev) pdata->irq = -1; } + rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc)) { + ret = PTR_ERR(rtc); + goto exit_clr_drvdata; + } + + pdata->rtc = rtc; + return 0; +exit_clr_drvdata: + platform_set_drvdata(pdev, NULL); exit_put_clk: clk_disable(pdata->clk); clk_put(pdata->clk); diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c index a633abc4289..cd4f198cc2e 100644 --- a/drivers/rtc/rtc-pcap.c +++ b/drivers/rtc/rtc-pcap.c @@ -151,6 +151,8 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent); + platform_set_drvdata(pdev, pcap_rtc); + pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev, &pcap_rtc_ops, THIS_MODULE); if (IS_ERR(pcap_rtc->rtc)) { @@ -158,7 +160,6 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev) goto fail_rtc; } - platform_set_drvdata(pdev, pcap_rtc); timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ); alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA); @@ -177,6 +178,7 @@ fail_alarm: fail_timer: rtc_device_unregister(pcap_rtc->rtc); fail_rtc: + platform_set_drvdata(pdev, NULL); kfree(pcap_rtc); return err; } diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c index 694da39b6dd..359da6d020b 100644 --- a/drivers/rtc/rtc-rp5c01.c +++ b/drivers/rtc/rtc-rp5c01.c @@ -249,15 +249,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) spin_lock_init(&priv->lock); + platform_set_drvdata(dev, priv); + rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { error = PTR_ERR(rtc); goto out_unmap; } - priv->rtc = rtc; - platform_set_drvdata(dev, priv); error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr); if (error) @@ -268,6 +268,7 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev) out_unregister: rtc_device_unregister(rtc); out_unmap: + platform_set_drvdata(dev, NULL); iounmap(priv->regs); out_free_priv: kfree(priv); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index b3466c491cd..16512ecae31 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -46,6 +46,7 @@ static struct clk *rtc_clk; static void __iomem *s3c_rtc_base; static int s3c_rtc_alarmno = NO_IRQ; static int s3c_rtc_tickno = NO_IRQ; +static bool wake_en; static enum s3c_cpu_type s3c_rtc_cpu_type; static DEFINE_SPINLOCK(s3c_rtc_pie_lock); @@ -562,8 +563,12 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state) } s3c_rtc_enable(pdev, 0); - if (device_may_wakeup(&pdev->dev)) - enable_irq_wake(s3c_rtc_alarmno); + if (device_may_wakeup(&pdev->dev) && !wake_en) { + if (enable_irq_wake(s3c_rtc_alarmno) == 0) + wake_en = true; + else + dev_err(&pdev->dev, "enable_irq_wake failed\n"); + } return 0; } @@ -579,8 +584,10 @@ static int s3c_rtc_resume(struct platform_device *pdev) writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); } - if (device_may_wakeup(&pdev->dev)) + if (device_may_wakeup(&pdev->dev) && wake_en) { disable_irq_wake(s3c_rtc_alarmno); + wake_en = false; + } return 0; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 475e603fc58..86b6f1cc1b1 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1742,11 +1742,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) static inline int _dasd_term_running_cqr(struct dasd_device *device) { struct dasd_ccw_req *cqr; + int rc; if (list_empty(&device->ccw_queue)) return 0; cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); - return device->discipline->term_IO(cqr); + rc = device->discipline->term_IO(cqr); + if (!rc) + /* + * CQR terminated because a more important request is pending. + * Undo decreasing of retry counter because this is + * not an error case. + */ + cqr->retries++; + return rc; } int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 4b60ede07f0..be55fb2b1b1 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -518,6 +518,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned) return; new_incr->rn = rn; new_incr->standby = standby; + if (!standby) + new_incr->usecount = 1; last_rn = 0; prev = &sclp_mem_list; list_for_each_entry(incr, &sclp_mem_list, list) { diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 83cea9a55e2..1b3924c2fff 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c @@ -236,7 +236,6 @@ tapeblock_setup_device(struct tape_device * device) disk->major = tapeblock_major; disk->first_minor = device->first_minor; disk->fops = &tapeblock_fops; - disk->events = DISK_EVENT_MEDIA_CHANGE; disk->private_data = tape_get_device(device); disk->queue = blkdat->request_queue; set_capacity(disk, 0); diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index e2d45c91b8e..9689d41c788 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -1292,8 +1292,10 @@ static struct scsi_host_template qpti_template = { .use_clustering = ENABLE_CLUSTERING, }; +static const struct of_device_id qpti_match[]; static int __devinit qpti_sbus_probe(struct platform_device *op) { + const struct of_device_id *match; struct scsi_host_template *tpnt; struct device_node *dp = op->dev.of_node; struct Scsi_Host *host; @@ -1301,9 +1303,10 @@ static int __devinit qpti_sbus_probe(struct platform_device *op) static int nqptis; const char *fcode; - if (!op->dev.of_match) + match = of_match_device(qpti_match, &op->dev); + if (!match) return -EINVAL; - tpnt = op->dev.of_match->data; + tpnt = match->data; /* Sometimes Antares cards come up not completely * setup, and we get a report of a zero IRQ. diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 0bac91e7237..ec1803a4872 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -74,8 +74,6 @@ struct kmem_cache *scsi_sdb_cache; */ #define SCSI_QUEUE_DELAY 3 -static void scsi_run_queue(struct request_queue *q); - /* * Function: scsi_unprep_request() * @@ -161,7 +159,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy) blk_requeue_request(q, cmd->request); spin_unlock_irqrestore(q->queue_lock, flags); - scsi_run_queue(q); + kblockd_schedule_work(q, &device->requeue_work); return 0; } @@ -438,7 +436,11 @@ static void scsi_run_queue(struct request_queue *q) continue; } - blk_run_queue_async(sdev->request_queue); + spin_unlock(shost->host_lock); + spin_lock(sdev->request_queue->queue_lock); + __blk_run_queue(sdev->request_queue); + spin_unlock(sdev->request_queue->queue_lock); + spin_lock(shost->host_lock); } /* put any unprocessed entries back */ list_splice(&starved_list, &shost->starved_list); @@ -447,6 +449,16 @@ static void scsi_run_queue(struct request_queue *q) blk_run_queue(q); } +void scsi_requeue_run_queue(struct work_struct *work) +{ + struct scsi_device *sdev; + struct request_queue *q; + + sdev = container_of(work, struct scsi_device, requeue_work); + q = sdev->request_queue; + scsi_run_queue(q); +} + /* * Function: scsi_requeue_command() * diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 087821fac8f..58584dc0724 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -242,6 +242,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, int display_failure_msg = 1, ret; struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); extern void scsi_evt_thread(struct work_struct *work); + extern void scsi_requeue_run_queue(struct work_struct *work); sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, GFP_ATOMIC); @@ -264,6 +265,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, INIT_LIST_HEAD(&sdev->event_list); spin_lock_init(&sdev->list_lock); INIT_WORK(&sdev->event_work, scsi_evt_thread); + INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); sdev->sdev_gendev.parent = get_device(&starget->dev); sdev->sdev_target = starget; diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c index 6f34963b3c6..7ad48585c5e 100644 --- a/drivers/ssb/pci.c +++ b/drivers/ssb/pci.c @@ -662,7 +662,6 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out, static int ssb_pci_sprom_get(struct ssb_bus *bus, struct ssb_sprom *sprom) { - const struct ssb_sprom *fallback; int err; u16 *buf; @@ -707,10 +706,17 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, if (err) { /* All CRC attempts failed. * Maybe there is no SPROM on the device? - * If we have a fallback, use that. */ - fallback = ssb_get_fallback_sprom(); - if (fallback) { - memcpy(sprom, fallback, sizeof(*sprom)); + * Now we ask the arch code if there is some sprom + * available for this device in some other storage */ + err = ssb_fill_sprom_with_fallback(bus, sprom); + if (err) { + ssb_printk(KERN_WARNING PFX "WARNING: Using" + " fallback SPROM failed (err %d)\n", + err); + } else { + ssb_dprintk(KERN_DEBUG PFX "Using SPROM" + " revision %d provided by" + " platform.\n", sprom->revision); err = 0; goto out_free; } diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c index 5f34d7a3e3a..45ff0e3a382 100644 --- a/drivers/ssb/sprom.c +++ b/drivers/ssb/sprom.c @@ -17,7 +17,7 @@ #include <linux/slab.h> -static const struct ssb_sprom *fallback_sprom; +static int(*get_fallback_sprom)(struct ssb_bus *dev, struct ssb_sprom *out); static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, @@ -145,36 +145,43 @@ out: } /** - * ssb_arch_set_fallback_sprom - Set a fallback SPROM for use if no SPROM is found. + * ssb_arch_register_fallback_sprom - Registers a method providing a + * fallback SPROM if no SPROM is found. * - * @sprom: The SPROM data structure to register. + * @sprom_callback: The callback function. * - * With this function the architecture implementation may register a fallback - * SPROM data structure. The fallback is only used for PCI based SSB devices, - * where no valid SPROM can be found in the shadow registers. + * With this function the architecture implementation may register a + * callback handler which fills the SPROM data structure. The fallback is + * only used for PCI based SSB devices, where no valid SPROM can be found + * in the shadow registers. * - * This function is useful for weird architectures that have a half-assed SSB device - * hardwired to their PCI bus. + * This function is useful for weird architectures that have a half-assed + * SSB device hardwired to their PCI bus. * - * Note that it does only work with PCI attached SSB devices. PCMCIA devices currently - * don't use this fallback. - * Architectures must provide the SPROM for native SSB devices anyway, - * so the fallback also isn't used for native devices. + * Note that it does only work with PCI attached SSB devices. PCMCIA + * devices currently don't use this fallback. + * Architectures must provide the SPROM for native SSB devices anyway, so + * the fallback also isn't used for native devices. * - * This function is available for architecture code, only. So it is not exported. + * This function is available for architecture code, only. So it is not + * exported. */ -int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom) +int ssb_arch_register_fallback_sprom(int (*sprom_callback)(struct ssb_bus *bus, + struct ssb_sprom *out)) { - if (fallback_sprom) + if (get_fallback_sprom) return -EEXIST; - fallback_sprom = sprom; + get_fallback_sprom = sprom_callback; return 0; } -const struct ssb_sprom *ssb_get_fallback_sprom(void) +int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out) { - return fallback_sprom; + if (!get_fallback_sprom) + return -ENOENT; + + return get_fallback_sprom(bus, out); } /* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h index 0331139a726..77653014db0 100644 --- a/drivers/ssb/ssb_private.h +++ b/drivers/ssb/ssb_private.h @@ -171,7 +171,8 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus, const char *buf, size_t count, int (*sprom_check_crc)(const u16 *sprom, size_t size), int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)); -extern const struct ssb_sprom *ssb_get_fallback_sprom(void); +extern int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, + struct ssb_sprom *out); /* core.c */ diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 80484af781e..b1f0f83b870 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1391,6 +1391,14 @@ config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE help Support for Console on the NWP serial ports. +config SERIAL_LANTIQ + bool "Lantiq serial driver" + depends on LANTIQ + select SERIAL_CORE + select SERIAL_CORE_CONSOLE + help + Support for console and UART on Lantiq SoCs. + config SERIAL_QE tristate "Freescale QUICC Engine serial port support" depends on QUICC_ENGINE diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile index fee0690ef8e..35276043d9d 100644 --- a/drivers/tty/serial/Makefile +++ b/drivers/tty/serial/Makefile @@ -94,3 +94,4 @@ obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o +obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c new file mode 100644 index 00000000000..58cf279ed87 --- /dev/null +++ b/drivers/tty/serial/lantiq.c @@ -0,0 +1,756 @@ +/* + * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * Copyright (C) 2004 Infineon IFAP DC COM CPE + * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2007 John Crispin <blogic@openwrt.org> + * Copyright (C) 2010 Thomas Langer, <thomas.langer@lantiq.com> + */ + +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/init.h> +#include <linux/console.h> +#include <linux/sysrq.h> +#include <linux/device.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/serial_core.h> +#include <linux/serial.h> +#include <linux/platform_device.h> +#include <linux/io.h> +#include <linux/clk.h> + +#include <lantiq_soc.h> + +#define PORT_LTQ_ASC 111 +#define MAXPORTS 2 +#define UART_DUMMY_UER_RX 1 +#define DRVNAME "ltq_asc" +#ifdef __BIG_ENDIAN +#define LTQ_ASC_TBUF (0x0020 + 3) +#define LTQ_ASC_RBUF (0x0024 + 3) +#else +#define LTQ_ASC_TBUF 0x0020 +#define LTQ_ASC_RBUF 0x0024 +#endif +#define LTQ_ASC_FSTAT 0x0048 +#define LTQ_ASC_WHBSTATE 0x0018 +#define LTQ_ASC_STATE 0x0014 +#define LTQ_ASC_IRNCR 0x00F8 +#define LTQ_ASC_CLC 0x0000 +#define LTQ_ASC_ID 0x0008 +#define LTQ_ASC_PISEL 0x0004 +#define LTQ_ASC_TXFCON 0x0044 +#define LTQ_ASC_RXFCON 0x0040 +#define LTQ_ASC_CON 0x0010 +#define LTQ_ASC_BG 0x0050 +#define LTQ_ASC_IRNREN 0x00F4 + +#define ASC_IRNREN_TX 0x1 +#define ASC_IRNREN_RX 0x2 +#define ASC_IRNREN_ERR 0x4 +#define ASC_IRNREN_TX_BUF 0x8 +#define ASC_IRNCR_TIR 0x1 +#define ASC_IRNCR_RIR 0x2 +#define ASC_IRNCR_EIR 0x4 + +#define ASCOPT_CSIZE 0x3 +#define TXFIFO_FL 1 +#define RXFIFO_FL 1 +#define ASCCLC_DISS 0x2 +#define ASCCLC_RMCMASK 0x0000FF00 +#define ASCCLC_RMCOFFSET 8 +#define ASCCON_M_8ASYNC 0x0 +#define ASCCON_M_7ASYNC 0x2 +#define ASCCON_ODD 0x00000020 +#define ASCCON_STP 0x00000080 +#define ASCCON_BRS 0x00000100 +#define ASCCON_FDE 0x00000200 +#define ASCCON_R 0x00008000 +#define ASCCON_FEN 0x00020000 +#define ASCCON_ROEN 0x00080000 +#define ASCCON_TOEN 0x00100000 +#define ASCSTATE_PE 0x00010000 +#define ASCSTATE_FE 0x00020000 +#define ASCSTATE_ROE 0x00080000 +#define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE) +#define ASCWHBSTATE_CLRREN 0x00000001 +#define ASCWHBSTATE_SETREN 0x00000002 +#define ASCWHBSTATE_CLRPE 0x00000004 +#define ASCWHBSTATE_CLRFE 0x00000008 +#define ASCWHBSTATE_CLRROE 0x00000020 +#define ASCTXFCON_TXFEN 0x0001 +#define ASCTXFCON_TXFFLU 0x0002 +#define ASCTXFCON_TXFITLMASK 0x3F00 +#define ASCTXFCON_TXFITLOFF 8 +#define ASCRXFCON_RXFEN 0x0001 +#define ASCRXFCON_RXFFLU 0x0002 +#define ASCRXFCON_RXFITLMASK 0x3F00 +#define ASCRXFCON_RXFITLOFF 8 +#define ASCFSTAT_RXFFLMASK 0x003F +#define ASCFSTAT_TXFFLMASK 0x3F00 +#define ASCFSTAT_TXFREEMASK 0x3F000000 +#define ASCFSTAT_TXFREEOFF 24 + +static void lqasc_tx_chars(struct uart_port *port); +static struct ltq_uart_port *lqasc_port[MAXPORTS]; +static struct uart_driver lqasc_reg; +static DEFINE_SPINLOCK(ltq_asc_lock); + +struct ltq_uart_port { + struct uart_port port; + struct clk *clk; + unsigned int tx_irq; + unsigned int rx_irq; + unsigned int err_irq; +}; + +static inline struct +ltq_uart_port *to_ltq_uart_port(struct uart_port *port) +{ + return container_of(port, struct ltq_uart_port, port); +} + +static void +lqasc_stop_tx(struct uart_port *port) +{ + return; +} + +static void +lqasc_start_tx(struct uart_port *port) +{ + unsigned long flags; + spin_lock_irqsave(<q_asc_lock, flags); + lqasc_tx_chars(port); + spin_unlock_irqrestore(<q_asc_lock, flags); + return; +} + +static void +lqasc_stop_rx(struct uart_port *port) +{ + ltq_w32(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE); +} + +static void +lqasc_enable_ms(struct uart_port *port) +{ +} + +static int +lqasc_rx_chars(struct uart_port *port) +{ + struct tty_struct *tty = tty_port_tty_get(&port->state->port); + unsigned int ch = 0, rsr = 0, fifocnt; + + if (!tty) { + dev_dbg(port->dev, "%s:tty is busy now", __func__); + return -EBUSY; + } + fifocnt = + ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK; + while (fifocnt--) { + u8 flag = TTY_NORMAL; + ch = ltq_r8(port->membase + LTQ_ASC_RBUF); + rsr = (ltq_r32(port->membase + LTQ_ASC_STATE) + & ASCSTATE_ANY) | UART_DUMMY_UER_RX; + tty_flip_buffer_push(tty); + port->icount.rx++; + + /* + * Note that the error handling code is + * out of the main execution path + */ + if (rsr & ASCSTATE_ANY) { + if (rsr & ASCSTATE_PE) { + port->icount.parity++; + ltq_w32_mask(0, ASCWHBSTATE_CLRPE, + port->membase + LTQ_ASC_WHBSTATE); + } else if (rsr & ASCSTATE_FE) { + port->icount.frame++; + ltq_w32_mask(0, ASCWHBSTATE_CLRFE, + port->membase + LTQ_ASC_WHBSTATE); + } + if (rsr & ASCSTATE_ROE) { + port->icount.overrun++; + ltq_w32_mask(0, ASCWHBSTATE_CLRROE, + port->membase + LTQ_ASC_WHBSTATE); + } + + rsr &= port->read_status_mask; + + if (rsr & ASCSTATE_PE) + flag = TTY_PARITY; + else if (rsr & ASCSTATE_FE) + flag = TTY_FRAME; + } + + if ((rsr & port->ignore_status_mask) == 0) + tty_insert_flip_char(tty, ch, flag); + + if (rsr & ASCSTATE_ROE) + /* + * Overrun is special, since it's reported + * immediately, and doesn't affect the current + * character + */ + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + } + if (ch != 0) + tty_flip_buffer_push(tty); + tty_kref_put(tty); + return 0; +} + +static void +lqasc_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + if (uart_tx_stopped(port)) { + lqasc_stop_tx(port); + return; + } + + while (((ltq_r32(port->membase + LTQ_ASC_FSTAT) & + ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) { + if (port->x_char) { + ltq_w8(port->x_char, port->membase + LTQ_ASC_TBUF); + port->icount.tx++; + port->x_char = 0; + continue; + } + + if (uart_circ_empty(xmit)) + break; + + ltq_w8(port->state->xmit.buf[port->state->xmit.tail], + port->membase + LTQ_ASC_TBUF); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static irqreturn_t +lqasc_tx_int(int irq, void *_port) +{ + unsigned long flags; + struct uart_port *port = (struct uart_port *)_port; + spin_lock_irqsave(<q_asc_lock, flags); + ltq_w32(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR); + spin_unlock_irqrestore(<q_asc_lock, flags); + lqasc_start_tx(port); + return IRQ_HANDLED; +} + +static irqreturn_t +lqasc_err_int(int irq, void *_port) +{ + unsigned long flags; + struct uart_port *port = (struct uart_port *)_port; + spin_lock_irqsave(<q_asc_lock, flags); + /* clear any pending interrupts */ + ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE | + ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE); + spin_unlock_irqrestore(<q_asc_lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t +lqasc_rx_int(int irq, void *_port) +{ + unsigned long flags; + struct uart_port *port = (struct uart_port *)_port; + spin_lock_irqsave(<q_asc_lock, flags); + ltq_w32(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR); + lqasc_rx_chars(port); + spin_unlock_irqrestore(<q_asc_lock, flags); + return IRQ_HANDLED; +} + +static unsigned int +lqasc_tx_empty(struct uart_port *port) +{ + int status; + status = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK; + return status ? 0 : TIOCSER_TEMT; +} + +static unsigned int +lqasc_get_mctrl(struct uart_port *port) +{ + return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR; +} + +static void +lqasc_set_mctrl(struct uart_port *port, u_int mctrl) +{ +} + +static void +lqasc_break_ctl(struct uart_port *port, int break_state) +{ +} + +static int +lqasc_startup(struct uart_port *port) +{ + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + int retval; + + port->uartclk = clk_get_rate(ltq_port->clk); + + ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), + port->membase + LTQ_ASC_CLC); + + ltq_w32(0, port->membase + LTQ_ASC_PISEL); + ltq_w32( + ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) | + ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU, + port->membase + LTQ_ASC_TXFCON); + ltq_w32( + ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK) + | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU, + port->membase + LTQ_ASC_RXFCON); + /* make sure other settings are written to hardware before + * setting enable bits + */ + wmb(); + ltq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN | + ASCCON_ROEN, port->membase + LTQ_ASC_CON); + + retval = request_irq(ltq_port->tx_irq, lqasc_tx_int, + IRQF_DISABLED, "asc_tx", port); + if (retval) { + pr_err("failed to request lqasc_tx_int\n"); + return retval; + } + + retval = request_irq(ltq_port->rx_irq, lqasc_rx_int, + IRQF_DISABLED, "asc_rx", port); + if (retval) { + pr_err("failed to request lqasc_rx_int\n"); + goto err1; + } + + retval = request_irq(ltq_port->err_irq, lqasc_err_int, + IRQF_DISABLED, "asc_err", port); + if (retval) { + pr_err("failed to request lqasc_err_int\n"); + goto err2; + } + + ltq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX, + port->membase + LTQ_ASC_IRNREN); + return 0; + +err2: + free_irq(ltq_port->rx_irq, port); +err1: + free_irq(ltq_port->tx_irq, port); + return retval; +} + +static void +lqasc_shutdown(struct uart_port *port) +{ + struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); + free_irq(ltq_port->tx_irq, port); + free_irq(ltq_port->rx_irq, port); + free_irq(ltq_port->err_irq, port); + + ltq_w32(0, port->membase + LTQ_ASC_CON); + ltq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU, + port->membase + LTQ_ASC_RXFCON); + ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, + port->membase + LTQ_ASC_TXFCON); +} + +static void +lqasc_set_termios(struct uart_port *port, + struct ktermios *new, struct ktermios *old) +{ + unsigned int cflag; + unsigned int iflag; + unsigned int divisor; + unsigned int baud; + unsigned int con = 0; + unsigned long flags; + + cflag = new->c_cflag; + iflag = new->c_iflag; + + switch (cflag & CSIZE) { + case CS7: + con = ASCCON_M_7ASYNC; + break; + + case CS5: + case CS6: + default: + new->c_cflag &= ~ CSIZE; + new->c_cflag |= CS8; + con = ASCCON_M_8ASYNC; + break; + } + + cflag &= ~CMSPAR; /* Mark/Space parity is not supported */ + + if (cflag & CSTOPB) + con |= ASCCON_STP; + + if (cflag & PARENB) { + if (!(cflag & PARODD)) + con &= ~ASCCON_ODD; + else + con |= ASCCON_ODD; + } + + port->read_status_mask = ASCSTATE_ROE; + if (iflag & INPCK) + port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE; + + port->ignore_status_mask = 0; + if (iflag & IGNPAR) + port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE; + + if (iflag & IGNBRK) { + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (iflag & IGNPAR) + port->ignore_status_mask |= ASCSTATE_ROE; + } + + if ((cflag & CREAD) == 0) + port->ignore_status_mask |= UART_DUMMY_UER_RX; + + /* set error signals - framing, parity and overrun, enable receiver */ + con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN; + + spin_lock_irqsave(<q_asc_lock, flags); + + /* set up CON */ + ltq_w32_mask(0, con, port->membase + LTQ_ASC_CON); + + /* Set baud rate - take a divider of 2 into account */ + baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); + divisor = uart_get_divisor(port, baud); + divisor = divisor / 2 - 1; + + /* disable the baudrate generator */ + ltq_w32_mask(ASCCON_R, 0, port->membase + LTQ_ASC_CON); + + /* make sure the fractional divider is off */ + ltq_w32_mask(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON); + + /* set up to use divisor of 2 */ + ltq_w32_mask(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON); + + /* now we can write the new baudrate into the register */ + ltq_w32(divisor, port->membase + LTQ_ASC_BG); + + /* turn the baudrate generator back on */ + ltq_w32_mask(0, ASCCON_R, port->membase + LTQ_ASC_CON); + + /* enable rx */ + ltq_w32(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE); + + spin_unlock_irqrestore(<q_asc_lock, flags); + + /* Don't rewrite B0 */ + if (tty_termios_baud_rate(new)) + tty_termios_encode_baud_rate(new, baud, baud); +} + +static const char* +lqasc_type(struct uart_port *port) +{ + if (port->type == PORT_LTQ_ASC) + return DRVNAME; + else + return NULL; +} + +static void +lqasc_release_port(struct uart_port *port) +{ + if (port->flags & UPF_IOREMAP) { + iounmap(port->membase); + port->membase = NULL; + } +} + +static int +lqasc_request_port(struct uart_port *port) +{ + struct platform_device *pdev = to_platform_device(port->dev); + struct resource *res; + int size; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "cannot obtain I/O memory region"); + return -ENODEV; + } + size = resource_size(res); + + res = devm_request_mem_region(&pdev->dev, res->start, + size, dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, "cannot request I/O memory region"); + return -EBUSY; + } + + if (port->flags & UPF_IOREMAP) { + port->membase = devm_ioremap_nocache(&pdev->dev, + port->mapbase, size); + if (port->membase == NULL) + return -ENOMEM; + } + return 0; +} + +static void +lqasc_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + port->type = PORT_LTQ_ASC; + lqasc_request_port(port); + } +} + +static int +lqasc_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + int ret = 0; + if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC) + ret = -EINVAL; + if (ser->irq < 0 || ser->irq >= NR_IRQS) + ret = -EINVAL; + if (ser->baud_base < 9600) + ret = -EINVAL; + return ret; +} + +static struct uart_ops lqasc_pops = { + .tx_empty = lqasc_tx_empty, + .set_mctrl = lqasc_set_mctrl, + .get_mctrl = lqasc_get_mctrl, + .stop_tx = lqasc_stop_tx, + .start_tx = lqasc_start_tx, + .stop_rx = lqasc_stop_rx, + .enable_ms = lqasc_enable_ms, + .break_ctl = lqasc_break_ctl, + .startup = lqasc_startup, + .shutdown = lqasc_shutdown, + .set_termios = lqasc_set_termios, + .type = lqasc_type, + .release_port = lqasc_release_port, + .request_port = lqasc_request_port, + .config_port = lqasc_config_port, + .verify_port = lqasc_verify_port, +}; + +static void +lqasc_console_putchar(struct uart_port *port, int ch) +{ + int fifofree; + + if (!port->membase) + return; + + do { + fifofree = (ltq_r32(port->membase + LTQ_ASC_FSTAT) + & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF; + } while (fifofree == 0); + ltq_w8(ch, port->membase + LTQ_ASC_TBUF); +} + + +static void +lqasc_console_write(struct console *co, const char *s, u_int count) +{ + struct ltq_uart_port *ltq_port; + struct uart_port *port; + unsigned long flags; + + if (co->index >= MAXPORTS) + return; + + ltq_port = lqasc_port[co->index]; + if (!ltq_port) + return; + + port = <q_port->port; + + spin_lock_irqsave(<q_asc_lock, flags); + uart_console_write(port, s, count, lqasc_console_putchar); + spin_unlock_irqrestore(<q_asc_lock, flags); +} + +static int __init +lqasc_console_setup(struct console *co, char *options) +{ + struct ltq_uart_port *ltq_port; + struct uart_port *port; + int baud = 115200; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index >= MAXPORTS) + return -ENODEV; + + ltq_port = lqasc_port[co->index]; + if (!ltq_port) + return -ENODEV; + + port = <q_port->port; + + port->uartclk = clk_get_rate(ltq_port->clk); + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct console lqasc_console = { + .name = "ttyLTQ", + .write = lqasc_console_write, + .device = uart_console_device, + .setup = lqasc_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &lqasc_reg, +}; + +static int __init +lqasc_console_init(void) +{ + register_console(&lqasc_console); + return 0; +} +console_initcall(lqasc_console_init); + +static struct uart_driver lqasc_reg = { + .owner = THIS_MODULE, + .driver_name = DRVNAME, + .dev_name = "ttyLTQ", + .major = 0, + .minor = 0, + .nr = MAXPORTS, + .cons = &lqasc_console, +}; + +static int __init +lqasc_probe(struct platform_device *pdev) +{ + struct ltq_uart_port *ltq_port; + struct uart_port *port; + struct resource *mmres, *irqres; + int tx_irq, rx_irq, err_irq; + struct clk *clk; + int ret; + + mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!mmres || !irqres) + return -ENODEV; + + if (pdev->id >= MAXPORTS) + return -EBUSY; + + if (lqasc_port[pdev->id] != NULL) + return -EBUSY; + + clk = clk_get(&pdev->dev, "fpi"); + if (IS_ERR(clk)) { + pr_err("failed to get fpi clk\n"); + return -ENOENT; + } + + tx_irq = platform_get_irq_byname(pdev, "tx"); + rx_irq = platform_get_irq_byname(pdev, "rx"); + err_irq = platform_get_irq_byname(pdev, "err"); + if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0)) + return -ENODEV; + + ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL); + if (!ltq_port) + return -ENOMEM; + + port = <q_port->port; + + port->iotype = SERIAL_IO_MEM; + port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP; + port->ops = &lqasc_pops; + port->fifosize = 16; + port->type = PORT_LTQ_ASC, + port->line = pdev->id; + port->dev = &pdev->dev; + + port->irq = tx_irq; /* unused, just to be backward-compatibe */ + port->mapbase = mmres->start; + + ltq_port->clk = clk; + + ltq_port->tx_irq = tx_irq; + ltq_port->rx_irq = rx_irq; + ltq_port->err_irq = err_irq; + + lqasc_port[pdev->id] = ltq_port; + platform_set_drvdata(pdev, ltq_port); + + ret = uart_add_one_port(&lqasc_reg, port); + + return ret; +} + +static struct platform_driver lqasc_driver = { + .driver = { + .name = DRVNAME, + .owner = THIS_MODULE, + }, +}; + +int __init +init_lqasc(void) +{ + int ret; + + ret = uart_register_driver(&lqasc_reg); + if (ret != 0) + return ret; + + ret = platform_driver_probe(&lqasc_driver, lqasc_probe); + if (ret != 0) + uart_unregister_driver(&lqasc_reg); + + return ret; +} + +module_init(init_lqasc); + +MODULE_DESCRIPTION("Lantiq serial port driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c index 0e8eec516df..c911b2419ab 100644 --- a/drivers/tty/serial/of_serial.c +++ b/drivers/tty/serial/of_serial.c @@ -80,14 +80,17 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev, /* * Try to register a serial port */ +static struct of_device_id of_platform_serial_table[]; static int __devinit of_platform_serial_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct of_serial_info *info; struct uart_port port; int port_type; int ret; - if (!ofdev->dev.of_match) + match = of_match_device(of_platform_serial_table, &ofdev->dev); + if (!match) return -EINVAL; if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) @@ -97,7 +100,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev) if (info == NULL) return -ENOMEM; - port_type = (unsigned long)ofdev->dev.of_match->data; + port_type = (unsigned long)match->data; ret = of_platform_serial_setup(ofdev, port_type, &port); if (ret) goto out; diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 36613b37c50..3a68e09309f 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c @@ -2539,15 +2539,18 @@ static void qe_udc_release(struct device *dev) } /* Driver probe functions */ +static const struct of_device_id qe_udc_match[]; static int __devinit qe_udc_probe(struct platform_device *ofdev) { + const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct qe_ep *ep; unsigned int ret = 0; unsigned int i; const void *prop; - if (!ofdev->dev.of_match) + match = of_match_device(qe_udc_match, &ofdev->dev); + if (!match) return -EINVAL; prop = of_get_property(np, "mode", NULL); @@ -2561,7 +2564,7 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev) return -ENOMEM; } - udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data; + udc_controller->soc_type = (unsigned long)match->data; udc_controller->usb_regs = of_iomap(np, 0); if (!udc_controller->usb_regs) { ret = -ENOMEM; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 2ab29124163..7aa4eea930f 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -4,7 +4,7 @@ * Author: Michael S. Tsirkin <mst@redhat.com> * * Inspiration, some code, and most witty comments come from - * Documentation/lguest/lguest.c, by Rusty Russell + * Documentation/virtual/lguest/lguest.c, by Rusty Russell * * This work is licensed under the terms of the GNU GPL, version 2. * diff --git a/drivers/video/acornfb.c b/drivers/video/acornfb.c index 82acb8dc4aa..6183a57eb69 100644 --- a/drivers/video/acornfb.c +++ b/drivers/video/acornfb.c @@ -66,7 +66,7 @@ * have. Allow 1% either way on the nominal for TVs. */ #define NR_MONTYPES 6 -static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = { +static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = { { /* TV */ .hfmin = 15469, .hfmax = 15781, @@ -873,7 +873,7 @@ static struct fb_ops acornfb_ops = { /* * Everything after here is initialisation!!! */ -static struct fb_videomode modedb[] __initdata = { +static struct fb_videomode modedb[] __devinitdata = { { /* 320x256 @ 50Hz */ NULL, 50, 320, 256, 125000, 92, 62, 35, 19, 38, 2, FB_SYNC_COMP_HIGH_ACT, @@ -925,8 +925,7 @@ static struct fb_videomode modedb[] __initdata = { } }; -static struct fb_videomode __initdata -acornfb_default_mode = { +static struct fb_videomode acornfb_default_mode __devinitdata = { .name = NULL, .refresh = 60, .xres = 640, @@ -942,7 +941,7 @@ acornfb_default_mode = { .vmode = FB_VMODE_NONINTERLACED }; -static void __init acornfb_init_fbinfo(void) +static void __devinit acornfb_init_fbinfo(void) { static int first = 1; @@ -1018,8 +1017,7 @@ static void __init acornfb_init_fbinfo(void) * size can optionally be followed by 'M' or 'K' for * MB or KB respectively. */ -static void __init -acornfb_parse_mon(char *opt) +static void __devinit acornfb_parse_mon(char *opt) { char *p = opt; @@ -1066,8 +1064,7 @@ bad: current_par.montype = -1; } -static void __init -acornfb_parse_montype(char *opt) +static void __devinit acornfb_parse_montype(char *opt) { current_par.montype = -2; @@ -1108,8 +1105,7 @@ acornfb_parse_montype(char *opt) } } -static void __init -acornfb_parse_dram(char *opt) +static void __devinit acornfb_parse_dram(char *opt) { unsigned int size; @@ -1134,15 +1130,14 @@ acornfb_parse_dram(char *opt) static struct options { char *name; void (*parse)(char *opt); -} opt_table[] __initdata = { +} opt_table[] __devinitdata = { { "mon", acornfb_parse_mon }, { "montype", acornfb_parse_montype }, { "dram", acornfb_parse_dram }, { NULL, NULL } }; -int __init -acornfb_setup(char *options) +static int __devinit acornfb_setup(char *options) { struct options *optp; char *opt; @@ -1179,8 +1174,7 @@ acornfb_setup(char *options) * Detect type of monitor connected * For now, we just assume SVGA */ -static int __init -acornfb_detect_monitortype(void) +static int __devinit acornfb_detect_monitortype(void) { return 4; } diff --git a/drivers/video/atafb.c b/drivers/video/atafb.c index 5b2b5ef4edb..64e41f5448c 100644 --- a/drivers/video/atafb.c +++ b/drivers/video/atafb.c @@ -3117,7 +3117,7 @@ int __init atafb_init(void) atafb_ops.fb_setcolreg = &falcon_setcolreg; error = request_irq(IRQ_AUTO_4, falcon_vbl_switcher, IRQ_TYPE_PRIO, - "framebuffer/modeswitch", + "framebuffer:modeswitch", falcon_vbl_switcher); if (error) return error; diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index e0c2284924b..5aac00eb183 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -42,9 +42,34 @@ #define FBPIXMAPSIZE (1024 * 8) +static DEFINE_MUTEX(registration_lock); struct fb_info *registered_fb[FB_MAX] __read_mostly; int num_registered_fb __read_mostly; +static struct fb_info *get_fb_info(unsigned int idx) +{ + struct fb_info *fb_info; + + if (idx >= FB_MAX) + return ERR_PTR(-ENODEV); + + mutex_lock(®istration_lock); + fb_info = registered_fb[idx]; + if (fb_info) + atomic_inc(&fb_info->count); + mutex_unlock(®istration_lock); + + return fb_info; +} + +static void put_fb_info(struct fb_info *fb_info) +{ + if (!atomic_dec_and_test(&fb_info->count)) + return; + if (fb_info->fbops->fb_destroy) + fb_info->fbops->fb_destroy(fb_info); +} + int lock_fb_info(struct fb_info *info) { mutex_lock(&info->lock); @@ -647,6 +672,7 @@ int fb_show_logo(struct fb_info *info, int rotate) { return 0; } static void *fb_seq_start(struct seq_file *m, loff_t *pos) { + mutex_lock(®istration_lock); return (*pos < FB_MAX) ? pos : NULL; } @@ -658,6 +684,7 @@ static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos) static void fb_seq_stop(struct seq_file *m, void *v) { + mutex_unlock(®istration_lock); } static int fb_seq_show(struct seq_file *m, void *v) @@ -690,13 +717,30 @@ static const struct file_operations fb_proc_fops = { .release = seq_release, }; -static ssize_t -fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +/* + * We hold a reference to the fb_info in file->private_data, + * but if the current registered fb has changed, we don't + * actually want to use it. + * + * So look up the fb_info using the inode minor number, + * and just verify it against the reference we have. + */ +static struct fb_info *file_fb_info(struct file *file) { - unsigned long p = *ppos; struct inode *inode = file->f_path.dentry->d_inode; int fbidx = iminor(inode); struct fb_info *info = registered_fb[fbidx]; + + if (info != file->private_data) + info = NULL; + return info; +} + +static ssize_t +fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + unsigned long p = *ppos; + struct fb_info *info = file_fb_info(file); u8 *buffer, *dst; u8 __iomem *src; int c, cnt = 0, err = 0; @@ -761,9 +805,7 @@ static ssize_t fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; + struct fb_info *info = file_fb_info(file); u8 *buffer, *src; u8 __iomem *dst; int c, cnt = 0, err = 0; @@ -1141,10 +1183,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; + struct fb_info *info = file_fb_info(file); + if (!info) + return -ENODEV; return do_fb_ioctl(info, cmd, arg); } @@ -1265,12 +1307,13 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd, static long fb_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct inode *inode = file->f_path.dentry->d_inode; - int fbidx = iminor(inode); - struct fb_info *info = registered_fb[fbidx]; - struct fb_ops *fb = info->fbops; + struct fb_info *info = file_fb_info(file); + struct fb_ops *fb; long ret = -ENOIOCTLCMD; + if (!info) + return -ENODEV; + fb = info->fbops; switch(cmd) { case FBIOGET_VSCREENINFO: case FBIOPUT_VSCREENINFO: @@ -1303,16 +1346,18 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd, static int fb_mmap(struct file *file, struct vm_area_struct * vma) { - int fbidx = iminor(file->f_path.dentry->d_inode); - struct fb_info *info = registered_fb[fbidx]; - struct fb_ops *fb = info->fbops; + struct fb_info *info = file_fb_info(file); + struct fb_ops *fb; unsigned long off; unsigned long start; u32 len; + if (!info) + return -ENODEV; if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) return -EINVAL; off = vma->vm_pgoff << PAGE_SHIFT; + fb = info->fbops; if (!fb) return -ENODEV; mutex_lock(&info->mm_lock); @@ -1361,14 +1406,16 @@ __releases(&info->lock) struct fb_info *info; int res = 0; - if (fbidx >= FB_MAX) - return -ENODEV; - info = registered_fb[fbidx]; - if (!info) + info = get_fb_info(fbidx); + if (!info) { request_module("fb%d", fbidx); - info = registered_fb[fbidx]; - if (!info) - return -ENODEV; + info = get_fb_info(fbidx); + if (!info) + return -ENODEV; + } + if (IS_ERR(info)) + return PTR_ERR(info); + mutex_lock(&info->lock); if (!try_module_get(info->fbops->owner)) { res = -ENODEV; @@ -1386,6 +1433,8 @@ __releases(&info->lock) #endif out: mutex_unlock(&info->lock); + if (res) + put_fb_info(info); return res; } @@ -1401,6 +1450,7 @@ __releases(&info->lock) info->fbops->fb_release(info,1); module_put(info->fbops->owner); mutex_unlock(&info->lock); + put_fb_info(info); return 0; } @@ -1487,8 +1537,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena, return false; } +static int do_unregister_framebuffer(struct fb_info *fb_info); + #define VGA_FB_PHYS 0xA0000 -void remove_conflicting_framebuffers(struct apertures_struct *a, +static void do_remove_conflicting_framebuffers(struct apertures_struct *a, const char *name, bool primary) { int i; @@ -1510,43 +1562,32 @@ void remove_conflicting_framebuffers(struct apertures_struct *a, printk(KERN_INFO "fb: conflicting fb hw usage " "%s vs %s - removing generic driver\n", name, registered_fb[i]->fix.id); - unregister_framebuffer(registered_fb[i]); + do_unregister_framebuffer(registered_fb[i]); } } } -EXPORT_SYMBOL(remove_conflicting_framebuffers); -/** - * register_framebuffer - registers a frame buffer device - * @fb_info: frame buffer info structure - * - * Registers a frame buffer device @fb_info. - * - * Returns negative errno on error, or zero for success. - * - */ - -int -register_framebuffer(struct fb_info *fb_info) +static int do_register_framebuffer(struct fb_info *fb_info) { int i; struct fb_event event; struct fb_videomode mode; - if (num_registered_fb == FB_MAX) - return -ENXIO; - if (fb_check_foreignness(fb_info)) return -ENOSYS; - remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, + do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id, fb_is_primary_device(fb_info)); + if (num_registered_fb == FB_MAX) + return -ENXIO; + num_registered_fb++; for (i = 0 ; i < FB_MAX; i++) if (!registered_fb[i]) break; fb_info->node = i; + atomic_set(&fb_info->count, 1); mutex_init(&fb_info->lock); mutex_init(&fb_info->mm_lock); @@ -1592,36 +1633,14 @@ register_framebuffer(struct fb_info *fb_info) return 0; } - -/** - * unregister_framebuffer - releases a frame buffer device - * @fb_info: frame buffer info structure - * - * Unregisters a frame buffer device @fb_info. - * - * Returns negative errno on error, or zero for success. - * - * This function will also notify the framebuffer console - * to release the driver. - * - * This is meant to be called within a driver's module_exit() - * function. If this is called outside module_exit(), ensure - * that the driver implements fb_open() and fb_release() to - * check that no processes are using the device. - */ - -int -unregister_framebuffer(struct fb_info *fb_info) +static int do_unregister_framebuffer(struct fb_info *fb_info) { struct fb_event event; int i, ret = 0; i = fb_info->node; - if (!registered_fb[i]) { - ret = -EINVAL; - goto done; - } - + if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) + return -EINVAL; if (!lock_fb_info(fb_info)) return -ENODEV; @@ -1629,16 +1648,14 @@ unregister_framebuffer(struct fb_info *fb_info) ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); unlock_fb_info(fb_info); - if (ret) { - ret = -EINVAL; - goto done; - } + if (ret) + return -EINVAL; if (fb_info->pixmap.addr && (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) kfree(fb_info->pixmap.addr); fb_destroy_modelist(&fb_info->modelist); - registered_fb[i]=NULL; + registered_fb[i] = NULL; num_registered_fb--; fb_cleanup_device(fb_info); device_destroy(fb_class, MKDEV(FB_MAJOR, i)); @@ -1646,9 +1663,65 @@ unregister_framebuffer(struct fb_info *fb_info) fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); /* this may free fb info */ - if (fb_info->fbops->fb_destroy) - fb_info->fbops->fb_destroy(fb_info); -done: + put_fb_info(fb_info); + return 0; +} + +void remove_conflicting_framebuffers(struct apertures_struct *a, + const char *name, bool primary) +{ + mutex_lock(®istration_lock); + do_remove_conflicting_framebuffers(a, name, primary); + mutex_unlock(®istration_lock); +} +EXPORT_SYMBOL(remove_conflicting_framebuffers); + +/** + * register_framebuffer - registers a frame buffer device + * @fb_info: frame buffer info structure + * + * Registers a frame buffer device @fb_info. + * + * Returns negative errno on error, or zero for success. + * + */ +int +register_framebuffer(struct fb_info *fb_info) +{ + int ret; + + mutex_lock(®istration_lock); + ret = do_register_framebuffer(fb_info); + mutex_unlock(®istration_lock); + + return ret; +} + +/** + * unregister_framebuffer - releases a frame buffer device + * @fb_info: frame buffer info structure + * + * Unregisters a frame buffer device @fb_info. + * + * Returns negative errno on error, or zero for success. + * + * This function will also notify the framebuffer console + * to release the driver. + * + * This is meant to be called within a driver's module_exit() + * function. If this is called outside module_exit(), ensure + * that the driver implements fb_open() and fb_release() to + * check that no processes are using the device. + */ +int +unregister_framebuffer(struct fb_info *fb_info) +{ + int ret; + + mutex_lock(®istration_lock); + ret = do_unregister_framebuffer(fb_info); + mutex_unlock(®istration_lock); + return ret; } diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 1b0f98bc51b..022f9eb0b7b 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -990,6 +990,12 @@ config BCM63XX_WDT To compile this driver as a loadable module, choose M here. The module will be called bcm63xx_wdt. +config LANTIQ_WDT + tristate "Lantiq SoC watchdog" + depends on LANTIQ + help + Hardware driver for the Lantiq SoC Watchdog Timer. + # PARISC Architecture # POWERPC Architecture diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 3f8608b922a..ed26f7094e4 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -123,6 +123,7 @@ obj-$(CONFIG_AR7_WDT) += ar7_wdt.o obj-$(CONFIG_TXX9_WDT) += txx9wdt.o obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o +obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o # PARISC Architecture diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c new file mode 100644 index 00000000000..7d82adac1cb --- /dev/null +++ b/drivers/watchdog/lantiq_wdt.c @@ -0,0 +1,261 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Copyright (C) 2010 John Crispin <blogic@openwrt.org> + * Based on EP93xx wdt driver + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/watchdog.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/clk.h> +#include <linux/io.h> + +#include <lantiq.h> + +/* Section 3.4 of the datasheet + * The password sequence protects the WDT control register from unintended + * write actions, which might cause malfunction of the WDT. + * + * essentially the following two magic passwords need to be written to allow + * IO access to the WDT core + */ +#define LTQ_WDT_PW1 0x00BE0000 +#define LTQ_WDT_PW2 0x00DC0000 + +#define LTQ_WDT_CR 0x0 /* watchdog control register */ +#define LTQ_WDT_SR 0x8 /* watchdog status register */ + +#define LTQ_WDT_SR_EN (0x1 << 31) /* enable bit */ +#define LTQ_WDT_SR_PWD (0x3 << 26) /* turn on power */ +#define LTQ_WDT_SR_CLKDIV (0x3 << 24) /* turn on clock and set */ + /* divider to 0x40000 */ +#define LTQ_WDT_DIVIDER 0x40000 +#define LTQ_MAX_TIMEOUT ((1 << 16) - 1) /* the reload field is 16 bit */ + +static int nowayout = WATCHDOG_NOWAYOUT; + +static void __iomem *ltq_wdt_membase; +static unsigned long ltq_io_region_clk_rate; + +static unsigned long ltq_wdt_bootstatus; +static unsigned long ltq_wdt_in_use; +static int ltq_wdt_timeout = 30; +static int ltq_wdt_ok_to_close; + +static void +ltq_wdt_enable(void) +{ + ltq_wdt_timeout = ltq_wdt_timeout * + (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000; + if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT) + ltq_wdt_timeout = LTQ_MAX_TIMEOUT; + + /* write the first password magic */ + ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); + /* write the second magic plus the configuration and new timeout */ + ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV | + LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR); +} + +static void +ltq_wdt_disable(void) +{ + /* write the first password magic */ + ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); + /* write the second password magic with no config + * this turns the watchdog off + */ + ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR); +} + +static ssize_t +ltq_wdt_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + if (len) { + if (!nowayout) { + size_t i; + + ltq_wdt_ok_to_close = 0; + for (i = 0; i != len; i++) { + char c; + + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + ltq_wdt_ok_to_close = 1; + else + ltq_wdt_ok_to_close = 0; + } + } + ltq_wdt_enable(); + } + + return len; +} + +static struct watchdog_info ident = { + .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | + WDIOF_CARDRESET, + .identity = "ltq_wdt", +}; + +static long +ltq_wdt_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + int ret = -ENOTTY; + + switch (cmd) { + case WDIOC_GETSUPPORT: + ret = copy_to_user((struct watchdog_info __user *)arg, &ident, + sizeof(ident)) ? -EFAULT : 0; + break; + + case WDIOC_GETBOOTSTATUS: + ret = put_user(ltq_wdt_bootstatus, (int __user *)arg); + break; + + case WDIOC_GETSTATUS: + ret = put_user(0, (int __user *)arg); + break; + + case WDIOC_SETTIMEOUT: + ret = get_user(ltq_wdt_timeout, (int __user *)arg); + if (!ret) + ltq_wdt_enable(); + /* intentional drop through */ + case WDIOC_GETTIMEOUT: + ret = put_user(ltq_wdt_timeout, (int __user *)arg); + break; + + case WDIOC_KEEPALIVE: + ltq_wdt_enable(); + ret = 0; + break; + } + return ret; +} + +static int +ltq_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(0, <q_wdt_in_use)) + return -EBUSY; + ltq_wdt_in_use = 1; + ltq_wdt_enable(); + + return nonseekable_open(inode, file); +} + +static int +ltq_wdt_release(struct inode *inode, struct file *file) +{ + if (ltq_wdt_ok_to_close) + ltq_wdt_disable(); + else + pr_err("ltq_wdt: watchdog closed without warning\n"); + ltq_wdt_ok_to_close = 0; + clear_bit(0, <q_wdt_in_use); + + return 0; +} + +static const struct file_operations ltq_wdt_fops = { + .owner = THIS_MODULE, + .write = ltq_wdt_write, + .unlocked_ioctl = ltq_wdt_ioctl, + .open = ltq_wdt_open, + .release = ltq_wdt_release, + .llseek = no_llseek, +}; + +static struct miscdevice ltq_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = <q_wdt_fops, +}; + +static int __init +ltq_wdt_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct clk *clk; + + if (!res) { + dev_err(&pdev->dev, "cannot obtain I/O memory region"); + return -ENOENT; + } + res = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), dev_name(&pdev->dev)); + if (!res) { + dev_err(&pdev->dev, "cannot request I/O memory region"); + return -EBUSY; + } + ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (!ltq_wdt_membase) { + dev_err(&pdev->dev, "cannot remap I/O memory region\n"); + return -ENOMEM; + } + + /* we do not need to enable the clock as it is always running */ + clk = clk_get(&pdev->dev, "io"); + WARN_ON(!clk); + ltq_io_region_clk_rate = clk_get_rate(clk); + clk_put(clk); + + if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST) + ltq_wdt_bootstatus = WDIOF_CARDRESET; + + return misc_register(<q_wdt_miscdev); +} + +static int __devexit +ltq_wdt_remove(struct platform_device *pdev) +{ + misc_deregister(<q_wdt_miscdev); + + if (ltq_wdt_membase) + iounmap(ltq_wdt_membase); + + return 0; +} + + +static struct platform_driver ltq_wdt_driver = { + .remove = __devexit_p(ltq_wdt_remove), + .driver = { + .name = "ltq_wdt", + .owner = THIS_MODULE, + }, +}; + +static int __init +init_ltq_wdt(void) +{ + return platform_driver_probe(<q_wdt_driver, ltq_wdt_probe); +} + +static void __exit +exit_ltq_wdt(void) +{ + return platform_driver_unregister(<q_wdt_driver); +} + +module_init(init_ltq_wdt); +module_exit(exit_ltq_wdt); + +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); + +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_DESCRIPTION("Lantiq SoC Watchdog"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c index 528bceb220f..eed5436ffb5 100644 --- a/drivers/watchdog/mpc8xxx_wdt.c +++ b/drivers/watchdog/mpc8xxx_wdt.c @@ -185,17 +185,20 @@ static struct miscdevice mpc8xxx_wdt_miscdev = { .fops = &mpc8xxx_wdt_fops, }; +static const struct of_device_id mpc8xxx_wdt_match[]; static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev) { int ret; + const struct of_device_id *match; struct device_node *np = ofdev->dev.of_node; struct mpc8xxx_wdt_type *wdt_type; u32 freq = fsl_get_sys_freq(); bool enabled; - if (!ofdev->dev.of_match) + match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev); + if (!match) return -EINVAL; - wdt_type = ofdev->dev.of_match->data; + wdt_type = match->data; if (!freq || freq == -1) return -EINVAL; diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c index 5ec5ac1f787..1479dc4d612 100644 --- a/drivers/watchdog/mtx-1_wdt.c +++ b/drivers/watchdog/mtx-1_wdt.c @@ -66,6 +66,7 @@ static struct { int default_ticks; unsigned long inuse; unsigned gpio; + int gstate; } mtx1_wdt_device; static void mtx1_wdt_trigger(unsigned long unused) @@ -75,13 +76,13 @@ static void mtx1_wdt_trigger(unsigned long unused) spin_lock(&mtx1_wdt_device.lock); if (mtx1_wdt_device.running) ticks--; - /* - * toggle GPIO2_15 - */ - tmp = au_readl(GPIO2_DIR); - tmp = (tmp & ~(1 << mtx1_wdt_device.gpio)) | - ((~tmp) & (1 << mtx1_wdt_device.gpio)); - au_writel(tmp, GPIO2_DIR); + + /* toggle wdt gpio */ + mtx1_wdt_device.gstate = ~mtx1_wdt_device.gstate; + if (mtx1_wdt_device.gstate) + gpio_direction_output(mtx1_wdt_device.gpio, 1); + else + gpio_direction_input(mtx1_wdt_device.gpio); if (mtx1_wdt_device.queue && ticks) mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); @@ -103,7 +104,8 @@ static void mtx1_wdt_start(void) spin_lock_irqsave(&mtx1_wdt_device.lock, flags); if (!mtx1_wdt_device.queue) { mtx1_wdt_device.queue = 1; - gpio_set_value(mtx1_wdt_device.gpio, 1); + mtx1_wdt_device.gstate = 1; + gpio_direction_output(mtx1_wdt_device.gpio, 1); mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); } mtx1_wdt_device.running++; @@ -117,7 +119,8 @@ static int mtx1_wdt_stop(void) spin_lock_irqsave(&mtx1_wdt_device.lock, flags); if (mtx1_wdt_device.queue) { mtx1_wdt_device.queue = 0; - gpio_set_value(mtx1_wdt_device.gpio, 0); + mtx1_wdt_device.gstate = 0; + gpio_direction_output(mtx1_wdt_device.gpio, 0); } ticks = mtx1_wdt_device.default_ticks; spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags); diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index f420f1ff7f1..4781f806701 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -4,21 +4,21 @@ obj-y += xenbus/ nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_features.o := $(nostackp) -obj-$(CONFIG_BLOCK) += biomerge.o -obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o -obj-$(CONFIG_XEN_XENCOMM) += xencomm.o -obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o -obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o -obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o +obj-$(CONFIG_BLOCK) += biomerge.o +obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o +obj-$(CONFIG_XEN_XENCOMM) += xencomm.o +obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o +obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o +obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o -obj-$(CONFIG_XENFS) += xenfs/ +obj-$(CONFIG_XENFS) += xenfs/ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o -obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o -obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o -obj-$(CONFIG_XEN_DOM0) += pci.o +obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o +obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o +obj-$(CONFIG_XEN_DOM0) += pci.o -xen-evtchn-y := evtchn.o +xen-evtchn-y := evtchn.o xen-gntdev-y := gntdev.o xen-gntalloc-y := gntalloc.o -xen-platform-pci-y := platform-pci.o +xen-platform-pci-y := platform-pci.o diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 043af8ad6b6..f54290baa3d 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -114,7 +114,6 @@ static void __balloon_append(struct page *page) if (PageHighMem(page)) { list_add_tail(&page->lru, &ballooned_pages); balloon_stats.balloon_high++; - dec_totalhigh_pages(); } else { list_add(&page->lru, &ballooned_pages); balloon_stats.balloon_low++; @@ -124,6 +123,8 @@ static void __balloon_append(struct page *page) static void balloon_append(struct page *page) { __balloon_append(page); + if (PageHighMem(page)) + dec_totalhigh_pages(); totalram_pages--; } @@ -193,7 +194,7 @@ static enum bp_state update_schedule(enum bp_state state) return BP_EAGAIN; } -static unsigned long current_target(void) +static long current_credit(void) { unsigned long target = balloon_stats.target_pages; @@ -202,7 +203,7 @@ static unsigned long current_target(void) balloon_stats.balloon_low + balloon_stats.balloon_high); - return target; + return target - balloon_stats.current_pages; } static enum bp_state increase_reservation(unsigned long nr_pages) @@ -246,7 +247,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) set_phys_to_machine(pfn, frame_list[i]); /* Link back into the page tables if not highmem. */ - if (!xen_hvm_domain() && pfn < max_low_pfn) { + if (xen_pv_domain() && !PageHighMem(page)) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), @@ -293,7 +294,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) scrub_page(page); - if (!xen_hvm_domain() && !PageHighMem(page)) { + if (xen_pv_domain() && !PageHighMem(page)) { ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), __pte_ma(0), 0); @@ -337,7 +338,7 @@ static void balloon_process(struct work_struct *work) mutex_lock(&balloon_mutex); do { - credit = current_target() - balloon_stats.current_pages; + credit = current_credit(); if (credit > 0) state = increase_reservation(credit); @@ -420,7 +421,7 @@ void free_xenballooned_pages(int nr_pages, struct page** pages) } /* The balloon may be too large now. Shrink it if needed. */ - if (current_target() != balloon_stats.current_pages) + if (current_credit()) schedule_delayed_work(&balloon_worker, 0); mutex_unlock(&balloon_mutex); @@ -429,7 +430,7 @@ EXPORT_SYMBOL(free_xenballooned_pages); static int __init balloon_init(void) { - unsigned long pfn, nr_pages, extra_pfn_end; + unsigned long pfn, extra_pfn_end; struct page *page; if (!xen_domain()) @@ -437,11 +438,7 @@ static int __init balloon_init(void) pr_info("xen/balloon: Initialising balloon driver.\n"); - if (xen_pv_domain()) - nr_pages = xen_start_info->nr_pages; - else - nr_pages = max_pfn; - balloon_stats.current_pages = min(nr_pages, max_pfn); + balloon_stats.current_pages = xen_pv_domain() ? min(xen_start_info->nr_pages, max_pfn) : max_pfn; balloon_stats.target_pages = balloon_stats.current_pages; balloon_stats.balloon_low = 0; balloon_stats.balloon_high = 0; @@ -466,7 +463,7 @@ static int __init balloon_init(void) pfn < extra_pfn_end; pfn++) { page = pfn_to_page(pfn); - /* totalram_pages doesn't include the boot-time + /* totalram_pages and totalhigh_pages do not include the boot-time balloon extension, so don't subtract from it. */ __balloon_append(page); } diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 33167b43ac7..3ff822b4814 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -101,6 +101,7 @@ struct irq_info unsigned short gsi; unsigned char vector; unsigned char flags; + uint16_t domid; } pirq; } u; }; @@ -118,6 +119,8 @@ static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG], static struct irq_chip xen_dynamic_chip; static struct irq_chip xen_percpu_chip; static struct irq_chip xen_pirq_chip; +static void enable_dynirq(struct irq_data *data); +static void disable_dynirq(struct irq_data *data); /* Get info for IRQ */ static struct irq_info *info_for_irq(unsigned irq) @@ -184,6 +187,7 @@ static void xen_irq_info_pirq_init(unsigned irq, unsigned short pirq, unsigned short gsi, unsigned short vector, + uint16_t domid, unsigned char flags) { struct irq_info *info = info_for_irq(irq); @@ -193,6 +197,7 @@ static void xen_irq_info_pirq_init(unsigned irq, info->u.pirq.pirq = pirq; info->u.pirq.gsi = gsi; info->u.pirq.vector = vector; + info->u.pirq.domid = domid; info->u.pirq.flags = flags; } @@ -473,16 +478,6 @@ static void xen_free_irq(unsigned irq) irq_free_desc(irq); } -static void pirq_unmask_notify(int irq) -{ - struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) }; - - if (unlikely(pirq_needs_eoi(irq))) { - int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); - WARN_ON(rc); - } -} - static void pirq_query_unmask(int irq) { struct physdev_irq_status_query irq_status; @@ -506,6 +501,29 @@ static bool probing_irq(int irq) return desc && desc->action == NULL; } +static void eoi_pirq(struct irq_data *data) +{ + int evtchn = evtchn_from_irq(data->irq); + struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; + int rc = 0; + + irq_move_irq(data); + + if (VALID_EVTCHN(evtchn)) + clear_evtchn(evtchn); + + if (pirq_needs_eoi(data->irq)) { + rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); + WARN_ON(rc); + } +} + +static void mask_ack_pirq(struct irq_data *data) +{ + disable_dynirq(data); + eoi_pirq(data); +} + static unsigned int __startup_pirq(unsigned int irq) { struct evtchn_bind_pirq bind_pirq; @@ -539,7 +557,7 @@ static unsigned int __startup_pirq(unsigned int irq) out: unmask_evtchn(evtchn); - pirq_unmask_notify(irq); + eoi_pirq(irq_get_irq_data(irq)); return 0; } @@ -579,18 +597,7 @@ static void enable_pirq(struct irq_data *data) static void disable_pirq(struct irq_data *data) { -} - -static void ack_pirq(struct irq_data *data) -{ - int evtchn = evtchn_from_irq(data->irq); - - irq_move_irq(data); - - if (VALID_EVTCHN(evtchn)) { - mask_evtchn(evtchn); - clear_evtchn(evtchn); - } + disable_dynirq(data); } static int find_irq_by_gsi(unsigned gsi) @@ -639,9 +646,6 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, if (irq < 0) goto out; - irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, - name); - irq_op.irq = irq; irq_op.vector = 0; @@ -655,9 +659,35 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi, goto out; } - xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, + xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF, shareable ? PIRQ_SHAREABLE : 0); + pirq_query_unmask(irq); + /* We try to use the handler with the appropriate semantic for the + * type of interrupt: if the interrupt doesn't need an eoi + * (pirq_needs_eoi returns false), we treat it like an edge + * triggered interrupt so we use handle_edge_irq. + * As a matter of fact this only happens when the corresponding + * physical interrupt is edge triggered or an msi. + * + * On the other hand if the interrupt needs an eoi (pirq_needs_eoi + * returns true) we treat it like a level triggered interrupt so we + * use handle_fasteoi_irq like the native code does for this kind of + * interrupts. + * Depending on the Xen version, pirq_needs_eoi might return true + * not only for level triggered interrupts but for edge triggered + * interrupts too. In any case Xen always honors the eoi mechanism, + * not injecting any more pirqs of the same kind if the first one + * hasn't received an eoi yet. Therefore using the fasteoi handler + * is the right choice either way. + */ + if (pirq_needs_eoi(irq)) + irq_set_chip_and_handler_name(irq, &xen_pirq_chip, + handle_fasteoi_irq, name); + else + irq_set_chip_and_handler_name(irq, &xen_pirq_chip, + handle_edge_irq, name); + out: spin_unlock(&irq_mapping_update_lock); @@ -680,7 +710,8 @@ int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) } int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, - int pirq, int vector, const char *name) + int pirq, int vector, const char *name, + domid_t domid) { int irq, ret; @@ -690,10 +721,10 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, if (irq == -1) goto out; - irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq, - name); + irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_edge_irq, + name); - xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, 0); + xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0); ret = irq_set_msi_desc(irq, msidesc); if (ret < 0) goto error_irq; @@ -722,9 +753,16 @@ int xen_destroy_irq(int irq) if (xen_initial_domain()) { unmap_irq.pirq = info->u.pirq.pirq; - unmap_irq.domid = DOMID_SELF; + unmap_irq.domid = info->u.pirq.domid; rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); - if (rc) { + /* If another domain quits without making the pci_disable_msix + * call, the Xen hypervisor takes care of freeing the PIRQs + * (free_domain_pirqs). + */ + if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF)) + printk(KERN_INFO "domain %d does not have %d anymore\n", + info->u.pirq.domid, info->u.pirq.pirq); + else if (rc) { printk(KERN_WARNING "unmap irq failed %d\n", rc); goto out; } @@ -759,6 +797,12 @@ out: return irq; } + +int xen_pirq_from_irq(unsigned irq) +{ + return pirq_from_irq(irq); +} +EXPORT_SYMBOL_GPL(xen_pirq_from_irq); int bind_evtchn_to_irq(unsigned int evtchn) { int irq; @@ -773,7 +817,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) goto out; irq_set_chip_and_handler_name(irq, &xen_dynamic_chip, - handle_fasteoi_irq, "event"); + handle_edge_irq, "event"); xen_irq_info_evtchn_init(irq, evtchn); } @@ -1179,9 +1223,6 @@ static void __xen_evtchn_do_upcall(void) port = (word_idx * BITS_PER_LONG) + bit_idx; irq = evtchn_to_irq[port]; - mask_evtchn(port); - clear_evtchn(port); - if (irq != -1) { desc = irq_to_desc(irq); if (desc) @@ -1337,10 +1378,16 @@ static void ack_dynirq(struct irq_data *data) { int evtchn = evtchn_from_irq(data->irq); - irq_move_masked_irq(data); + irq_move_irq(data); if (VALID_EVTCHN(evtchn)) - unmask_evtchn(evtchn); + clear_evtchn(evtchn); +} + +static void mask_ack_dynirq(struct irq_data *data) +{ + disable_dynirq(data); + ack_dynirq(data); } static int retrigger_dynirq(struct irq_data *data) @@ -1502,6 +1549,18 @@ void xen_poll_irq(int irq) xen_poll_irq_timeout(irq, 0 /* no timeout */); } +/* Check whether the IRQ line is shared with other guests. */ +int xen_test_irq_shared(int irq) +{ + struct irq_info *info = info_for_irq(irq); + struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq }; + + if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status)) + return 0; + return !(irq_status.flags & XENIRQSTAT_shared); +} +EXPORT_SYMBOL_GPL(xen_test_irq_shared); + void xen_irq_resume(void) { unsigned int cpu, evtchn; @@ -1535,7 +1594,9 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { .irq_mask = disable_dynirq, .irq_unmask = enable_dynirq, - .irq_eoi = ack_dynirq, + .irq_ack = ack_dynirq, + .irq_mask_ack = mask_ack_dynirq, + .irq_set_affinity = set_affinity_irq, .irq_retrigger = retrigger_dynirq, }; @@ -1545,14 +1606,15 @@ static struct irq_chip xen_pirq_chip __read_mostly = { .irq_startup = startup_pirq, .irq_shutdown = shutdown_pirq, - .irq_enable = enable_pirq, - .irq_unmask = enable_pirq, - .irq_disable = disable_pirq, - .irq_mask = disable_pirq, - .irq_ack = ack_pirq, + .irq_mask = disable_dynirq, + .irq_unmask = enable_dynirq, + + .irq_ack = eoi_pirq, + .irq_eoi = eoi_pirq, + .irq_mask_ack = mask_ack_pirq, .irq_set_affinity = set_affinity_irq, diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index a7ffdfe19fc..f6832f46aea 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c @@ -427,6 +427,17 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd, return 0; } +static void gntalloc_vma_open(struct vm_area_struct *vma) +{ + struct gntalloc_gref *gref = vma->vm_private_data; + if (!gref) + return; + + spin_lock(&gref_lock); + gref->users++; + spin_unlock(&gref_lock); +} + static void gntalloc_vma_close(struct vm_area_struct *vma) { struct gntalloc_gref *gref = vma->vm_private_data; @@ -441,6 +452,7 @@ static void gntalloc_vma_close(struct vm_area_struct *vma) } static struct vm_operations_struct gntalloc_vmops = { + .open = gntalloc_vma_open, .close = gntalloc_vma_close, }; @@ -471,8 +483,6 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_private_data = gref; vma->vm_flags |= VM_RESERVED; - vma->vm_flags |= VM_DONTCOPY; - vma->vm_flags |= VM_PFNMAP | VM_PFN_AT_MMAP; vma->vm_ops = &gntalloc_vmops; diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index b0f9e8fb005..f914b26cf0c 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -330,17 +330,26 @@ static int unmap_grant_pages(struct grant_map *map, int offset, int pages) /* ------------------------------------------------------------------ */ +static void gntdev_vma_open(struct vm_area_struct *vma) +{ + struct grant_map *map = vma->vm_private_data; + + pr_debug("gntdev_vma_open %p\n", vma); + atomic_inc(&map->users); +} + static void gntdev_vma_close(struct vm_area_struct *vma) { struct grant_map *map = vma->vm_private_data; - pr_debug("close %p\n", vma); + pr_debug("gntdev_vma_close %p\n", vma); map->vma = NULL; vma->vm_private_data = NULL; gntdev_put_map(map); } static struct vm_operations_struct gntdev_vmops = { + .open = gntdev_vma_open, .close = gntdev_vma_close, }; @@ -652,7 +661,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) vma->vm_ops = &gntdev_vmops; - vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP; + vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND; + + if (use_ptemod) + vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP; vma->vm_private_data = map; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 3745a318def..fd725cde6ad 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -466,13 +466,30 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, if (map_ops[i].status) continue; - /* m2p override only supported for GNTMAP_contains_pte mappings */ - if (!(map_ops[i].flags & GNTMAP_contains_pte)) - continue; - pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + + if (map_ops[i].flags & GNTMAP_contains_pte) { + pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + (map_ops[i].host_addr & ~PAGE_MASK)); - mfn = pte_mfn(*pte); - ret = m2p_add_override(mfn, pages[i]); + mfn = pte_mfn(*pte); + } else { + /* If you really wanted to do this: + * mfn = PFN_DOWN(map_ops[i].dev_bus_addr); + * + * The reason we do not implement it is b/c on the + * unmap path (gnttab_unmap_refs) we have no means of + * checking whether the page is !GNTMAP_contains_pte. + * + * That is without some extra data-structure to carry + * the struct page, bool clear_pte, and list_head next + * tuples and deal with allocation/delallocation, etc. + * + * The users of this API set the GNTMAP_contains_pte + * flag so lets just return not supported until it + * becomes neccessary to implement. + */ + return -EOPNOTSUPP; + } + ret = m2p_add_override(mfn, pages[i], + map_ops[i].flags & GNTMAP_contains_pte); if (ret) return ret; } @@ -494,7 +511,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, return ret; for (i = 0; i < count; i++) { - ret = m2p_remove_override(pages[i]); + ret = m2p_remove_override(pages[i], true /* clear the PTE */); if (ret) return ret; } diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index a2eee574784..0b5366b5be2 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -70,12 +70,7 @@ static int xen_suspend(void *data) BUG_ON(!irqs_disabled()); - err = sysdev_suspend(PMSG_FREEZE); - if (!err) { - err = syscore_suspend(); - if (err) - sysdev_resume(); - } + err = syscore_suspend(); if (err) { printk(KERN_ERR "xen_suspend: system core suspend failed: %d\n", err); @@ -102,7 +97,6 @@ static int xen_suspend(void *data) } syscore_resume(); - sysdev_resume(); return 0; } diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c index 60f1827a32c..1e0fe01eb67 100644 --- a/drivers/xen/sys-hypervisor.c +++ b/drivers/xen/sys-hypervisor.c @@ -215,7 +215,7 @@ static struct attribute_group xen_compilation_group = { .attrs = xen_compile_attrs, }; -int __init static xen_compilation_init(void) +static int __init xen_compilation_init(void) { return sysfs_create_group(hypervisor_kobj, &xen_compilation_group); } |