diff options
Diffstat (limited to 'drivers/base')
35 files changed, 2479 insertions, 1095 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 21cf46f4524..7be9f79018e 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -172,6 +172,21 @@ config SYS_HYPERVISOR bool default n +config GENERIC_CPU_DEVICES + bool + default n + source "drivers/base/regmap/Kconfig" +config DMA_SHARED_BUFFER + bool + default n + select ANON_INODES + depends on EXPERIMENTAL + help + This option enables the framework for buffer-sharing between + multiple drivers. A buffer is associated with a file using driver + APIs extension; the file's descriptor can then be passed on to other + driver. + endmenu diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 99a375ad2cc..2c8272dd93c 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -3,16 +3,17 @@ obj-y := core.o sys.o bus.o dd.o syscore.o \ driver.o class.o platform.o \ cpu.o firmware.o init.o map.o devres.o \ - attribute_container.o transport_class.o + attribute_container.o transport_class.o \ + topology.o obj-$(CONFIG_DEVTMPFS) += devtmpfs.o obj-y += power/ obj-$(CONFIG_HAS_DMA) += dma-mapping.o obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o +obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o obj-$(CONFIG_ISA) += isa.o obj-$(CONFIG_FW_LOADER) += firmware_class.o obj-$(CONFIG_NUMA) += node.o obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o -obj-$(CONFIG_SMP) += topology.o ifeq ($(CONFIG_SYSFS),y) obj-$(CONFIG_MODULES) += module.o endif diff --git a/drivers/base/base.h b/drivers/base/base.h index 21c1b96c34c..b858dfd9a37 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h @@ -4,7 +4,9 @@ * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure. * * @subsys - the struct kset that defines this subsystem - * @devices_kset - the list of devices associated + * @devices_kset - the subsystem's 'devices' directory + * @interfaces - list of subsystem interfaces associated + * @mutex - protect the devices, and interfaces lists. * * @drivers_kset - the list of drivers associated * @klist_devices - the klist to iterate over the @devices_kset @@ -14,10 +16,8 @@ * @bus - pointer back to the struct bus_type that this structure is associated * with. * - * @class_interfaces - list of class_interfaces associated * @glue_dirs - "glue" directory to put in-between the parent device to * avoid namespace conflicts - * @class_mutex - mutex to protect the children, devices, and interfaces lists. * @class - pointer back to the struct class that this structure is associated * with. * @@ -28,6 +28,8 @@ struct subsys_private { struct kset subsys; struct kset *devices_kset; + struct list_head interfaces; + struct mutex mutex; struct kset *drivers_kset; struct klist klist_devices; @@ -36,9 +38,7 @@ struct subsys_private { unsigned int drivers_autoprobe:1; struct bus_type *bus; - struct list_head class_interfaces; struct kset glue_dirs; - struct mutex class_mutex; struct class *class; }; #define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj) @@ -94,8 +94,7 @@ extern int hypervisor_init(void); static inline int hypervisor_init(void) { return 0; } #endif extern int platform_bus_init(void); -extern int system_bus_init(void); -extern int cpu_dev_init(void); +extern void cpu_dev_init(void); extern int bus_add_device(struct device *dev); extern void bus_probe_device(struct device *dev); @@ -116,6 +115,7 @@ extern char *make_class_name(const char *name, struct kobject *kobj); extern int devres_release_all(struct device *dev); +/* /sys/devices directory */ extern struct kset *devices_kset; #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS) diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 000e7b2006f..99dc5921e1d 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -16,9 +16,14 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/string.h> +#include <linux/mutex.h> #include "base.h" #include "power/power.h" +/* /sys/devices/system */ +/* FIXME: make static after drivers/base/sys.c is deleted */ +struct kset *system_kset; + #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr) /* @@ -360,6 +365,47 @@ struct device *bus_find_device_by_name(struct bus_type *bus, } EXPORT_SYMBOL_GPL(bus_find_device_by_name); +/** + * subsys_find_device_by_id - find a device with a specific enumeration number + * @subsys: subsystem + * @id: index 'id' in struct device + * @hint: device to check first + * + * Check the hint's next object and if it is a match return it directly, + * otherwise, fall back to a full list search. Either way a reference for + * the returned object is taken. + */ +struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id, + struct device *hint) +{ + struct klist_iter i; + struct device *dev; + + if (!subsys) + return NULL; + + if (hint) { + klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus); + dev = next_device(&i); + if (dev && dev->id == id && get_device(dev)) { + klist_iter_exit(&i); + return dev; + } + klist_iter_exit(&i); + } + + klist_iter_init_node(&subsys->p->klist_devices, &i, NULL); + while ((dev = next_device(&i))) { + if (dev->id == id && get_device(dev)) { + klist_iter_exit(&i); + return dev; + } + } + klist_iter_exit(&i); + return NULL; +} +EXPORT_SYMBOL_GPL(subsys_find_device_by_id); + static struct device_driver *next_driver(struct klist_iter *i) { struct klist_node *n = klist_next(i); @@ -487,38 +533,59 @@ out_put: void bus_probe_device(struct device *dev) { struct bus_type *bus = dev->bus; + struct subsys_interface *sif; int ret; - if (bus && bus->p->drivers_autoprobe) { + if (!bus) + return; + + if (bus->p->drivers_autoprobe) { ret = device_attach(dev); WARN_ON(ret < 0); } + + mutex_lock(&bus->p->mutex); + list_for_each_entry(sif, &bus->p->interfaces, node) + if (sif->add_dev) + sif->add_dev(dev, sif); + mutex_unlock(&bus->p->mutex); } /** * bus_remove_device - remove device from bus * @dev: device to be removed * - * - Remove symlink from bus's directory. + * - Remove device from all interfaces. + * - Remove symlink from bus' directory. * - Delete device from bus's list. * - Detach from its driver. * - Drop reference taken in bus_add_device(). */ void bus_remove_device(struct device *dev) { - if (dev->bus) { - sysfs_remove_link(&dev->kobj, "subsystem"); - sysfs_remove_link(&dev->bus->p->devices_kset->kobj, - dev_name(dev)); - device_remove_attrs(dev->bus, dev); - if (klist_node_attached(&dev->p->knode_bus)) - klist_del(&dev->p->knode_bus); - - pr_debug("bus: '%s': remove device %s\n", - dev->bus->name, dev_name(dev)); - device_release_driver(dev); - bus_put(dev->bus); - } + struct bus_type *bus = dev->bus; + struct subsys_interface *sif; + + if (!bus) + return; + + mutex_lock(&bus->p->mutex); + list_for_each_entry(sif, &bus->p->interfaces, node) + if (sif->remove_dev) + sif->remove_dev(dev, sif); + mutex_unlock(&bus->p->mutex); + + sysfs_remove_link(&dev->kobj, "subsystem"); + sysfs_remove_link(&dev->bus->p->devices_kset->kobj, + dev_name(dev)); + device_remove_attrs(dev->bus, dev); + if (klist_node_attached(&dev->p->knode_bus)) + klist_del(&dev->p->knode_bus); + + pr_debug("bus: '%s': remove device %s\n", + dev->bus->name, dev_name(dev)); + device_release_driver(dev); + bus_put(dev->bus); } static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv) @@ -847,14 +914,14 @@ static ssize_t bus_uevent_store(struct bus_type *bus, static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); /** - * bus_register - register a bus with the system. + * __bus_register - register a driver-core subsystem * @bus: bus. * * Once we have that, we registered the bus with the kobject * infrastructure, then register the children subsystems it has: - * the devices and drivers that belong to the bus. + * the devices and drivers that belong to the subsystem. */ -int bus_register(struct bus_type *bus) +int __bus_register(struct bus_type *bus, struct lock_class_key *key) { int retval; struct subsys_private *priv; @@ -898,6 +965,8 @@ int bus_register(struct bus_type *bus) goto bus_drivers_fail; } + INIT_LIST_HEAD(&priv->interfaces); + __mutex_init(&priv->mutex, "subsys mutex", key); klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put); klist_init(&priv->klist_drivers, NULL, NULL); @@ -927,7 +996,7 @@ out: bus->p = NULL; return retval; } -EXPORT_SYMBOL_GPL(bus_register); +EXPORT_SYMBOL_GPL(__bus_register); /** * bus_unregister - remove a bus from the system @@ -939,6 +1008,8 @@ EXPORT_SYMBOL_GPL(bus_register); void bus_unregister(struct bus_type *bus) { pr_debug("bus: '%s': unregistering\n", bus->name); + if (bus->dev_root) + device_unregister(bus->dev_root); bus_remove_attrs(bus); remove_probe_files(bus); kset_unregister(bus->p->drivers_kset); @@ -1028,10 +1099,194 @@ void bus_sort_breadthfirst(struct bus_type *bus, } EXPORT_SYMBOL_GPL(bus_sort_breadthfirst); +/** + * subsys_dev_iter_init - initialize subsys device iterator + * @iter: subsys iterator to initialize + * @subsys: the subsys we wanna iterate over + * @start: the device to start iterating from, if any + * @type: device_type of the devices to iterate over, NULL for all + * + * Initialize subsys iterator @iter such that it iterates over devices + * of @subsys. If @start is set, the list iteration will start there, + * otherwise if it is NULL, the iteration starts at the beginning of + * the list. + */ +void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys, + struct device *start, const struct device_type *type) +{ + struct klist_node *start_knode = NULL; + + if (start) + start_knode = &start->p->knode_bus; + klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode); + iter->type = type; +} +EXPORT_SYMBOL_GPL(subsys_dev_iter_init); + +/** + * subsys_dev_iter_next - iterate to the next device + * @iter: subsys iterator to proceed + * + * Proceed @iter to the next device and return it. Returns NULL if + * iteration is complete. + * + * The returned device is referenced and won't be released till + * iterator is proceed to the next device or exited. The caller is + * free to do whatever it wants to do with the device including + * calling back into subsys code. + */ +struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter) +{ + struct klist_node *knode; + struct device *dev; + + for (;;) { + knode = klist_next(&iter->ki); + if (!knode) + return NULL; + dev = container_of(knode, struct device_private, knode_bus)->device; + if (!iter->type || iter->type == dev->type) + return dev; + } +} +EXPORT_SYMBOL_GPL(subsys_dev_iter_next); + +/** + * subsys_dev_iter_exit - finish iteration + * @iter: subsys iterator to finish + * + * Finish an iteration. Always call this function after iteration is + * complete whether the iteration ran till the end or not. + */ +void subsys_dev_iter_exit(struct subsys_dev_iter *iter) +{ + klist_iter_exit(&iter->ki); +} +EXPORT_SYMBOL_GPL(subsys_dev_iter_exit); + +int subsys_interface_register(struct subsys_interface *sif) +{ + struct bus_type *subsys; + struct subsys_dev_iter iter; + struct device *dev; + + if (!sif || !sif->subsys) + return -ENODEV; + + subsys = bus_get(sif->subsys); + if (!subsys) + return -EINVAL; + + mutex_lock(&subsys->p->mutex); + list_add_tail(&sif->node, &subsys->p->interfaces); + if (sif->add_dev) { + subsys_dev_iter_init(&iter, subsys, NULL, NULL); + while ((dev = subsys_dev_iter_next(&iter))) + sif->add_dev(dev, sif); + subsys_dev_iter_exit(&iter); + } + mutex_unlock(&subsys->p->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(subsys_interface_register); + +void subsys_interface_unregister(struct subsys_interface *sif) +{ + struct bus_type *subsys = sif->subsys; + struct subsys_dev_iter iter; + struct device *dev; + + if (!sif) + return; + + mutex_lock(&subsys->p->mutex); + list_del_init(&sif->node); + if (sif->remove_dev) { + subsys_dev_iter_init(&iter, subsys, NULL, NULL); + while ((dev = subsys_dev_iter_next(&iter))) + sif->remove_dev(dev, sif); + subsys_dev_iter_exit(&iter); + } + mutex_unlock(&subsys->p->mutex); + + bus_put(subsys); +} +EXPORT_SYMBOL_GPL(subsys_interface_unregister); + +static void system_root_device_release(struct device *dev) +{ + kfree(dev); +} +/** + * subsys_system_register - register a subsystem at /sys/devices/system/ + * @subsys - system subsystem + * @groups - default attributes for the root device + * + * All 'system' subsystems have a /sys/devices/system/<name> root device + * with the name of the subsystem. The root device can carry subsystem- + * wide attributes. All registered devices are below this single root + * device and are named after the subsystem with a simple enumeration + * number appended. The registered devices are not explicitely named; + * only 'id' in the device needs to be set. + * + * Do not use this interface for anything new, it exists for compatibility + * with bad ideas only. New subsystems should use plain subsystems; and + * add the subsystem-wide attributes should be added to the subsystem + * directory itself and not some create fake root-device placed in + * /sys/devices/system/<name>. + */ +int subsys_system_register(struct bus_type *subsys, + const struct attribute_group **groups) +{ + struct device *dev; + int err; + + err = bus_register(subsys); + if (err < 0) + return err; + + dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!dev) { + err = -ENOMEM; + goto err_dev; + } + + err = dev_set_name(dev, "%s", subsys->name); + if (err < 0) + goto err_name; + + dev->kobj.parent = &system_kset->kobj; + dev->groups = groups; + dev->release = system_root_device_release; + + err = device_register(dev); + if (err < 0) + goto err_dev_reg; + + subsys->dev_root = dev; + return 0; + +err_dev_reg: + put_device(dev); + dev = NULL; +err_name: + kfree(dev); +err_dev: + bus_unregister(subsys); + return err; +} +EXPORT_SYMBOL_GPL(subsys_system_register); + int __init buses_init(void) { bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL); if (!bus_kset) return -ENOMEM; + + system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj); + if (!system_kset) + return -ENOMEM; + return 0; } diff --git a/drivers/base/class.c b/drivers/base/class.c index b80d91cc8c3..03243d4002f 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -184,9 +184,9 @@ int __class_register(struct class *cls, struct lock_class_key *key) if (!cp) return -ENOMEM; klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put); - INIT_LIST_HEAD(&cp->class_interfaces); + INIT_LIST_HEAD(&cp->interfaces); kset_init(&cp->glue_dirs); - __mutex_init(&cp->class_mutex, "struct class mutex", key); + __mutex_init(&cp->mutex, "subsys mutex", key); error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name); if (error) { kfree(cp); @@ -460,15 +460,15 @@ int class_interface_register(struct class_interface *class_intf) if (!parent) return -EINVAL; - mutex_lock(&parent->p->class_mutex); - list_add_tail(&class_intf->node, &parent->p->class_interfaces); + mutex_lock(&parent->p->mutex); + list_add_tail(&class_intf->node, &parent->p->interfaces); if (class_intf->add_dev) { class_dev_iter_init(&iter, parent, NULL, NULL); while ((dev = class_dev_iter_next(&iter))) class_intf->add_dev(dev, class_intf); class_dev_iter_exit(&iter); } - mutex_unlock(&parent->p->class_mutex); + mutex_unlock(&parent->p->mutex); return 0; } @@ -482,7 +482,7 @@ void class_interface_unregister(struct class_interface *class_intf) if (!parent) return; - mutex_lock(&parent->p->class_mutex); + mutex_lock(&parent->p->mutex); list_del_init(&class_intf->node); if (class_intf->remove_dev) { class_dev_iter_init(&iter, parent, NULL, NULL); @@ -490,7 +490,7 @@ void class_interface_unregister(struct class_interface *class_intf) class_intf->remove_dev(dev, class_intf); class_dev_iter_exit(&iter); } - mutex_unlock(&parent->p->class_mutex); + mutex_unlock(&parent->p->mutex); class_put(parent); } diff --git a/drivers/base/core.c b/drivers/base/core.c index 82c865452c7..4a67cc0c8b3 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -22,6 +22,7 @@ #include <linux/kallsyms.h> #include <linux/mutex.h> #include <linux/async.h> +#include <linux/pm_runtime.h> #include "base.h" #include "power/power.h" @@ -117,6 +118,56 @@ static const struct sysfs_ops dev_sysfs_ops = { .store = dev_attr_store, }; +#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr) + +ssize_t device_store_ulong(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + char *end; + unsigned long new = simple_strtoul(buf, &end, 0); + if (end == buf) + return -EINVAL; + *(unsigned long *)(ea->var) = new; + /* Always return full write size even if we didn't consume all */ + return size; +} +EXPORT_SYMBOL_GPL(device_store_ulong); + +ssize_t device_show_ulong(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var)); +} +EXPORT_SYMBOL_GPL(device_show_ulong); + +ssize_t device_store_int(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + char *end; + long new = simple_strtol(buf, &end, 0); + if (end == buf || new > INT_MAX || new < INT_MIN) + return -EINVAL; + *(int *)(ea->var) = new; + /* Always return full write size even if we didn't consume all */ + return size; +} +EXPORT_SYMBOL_GPL(device_store_int); + +ssize_t device_show_int(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dev_ext_attribute *ea = to_ext_attr(attr); + + return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var)); +} +EXPORT_SYMBOL_GPL(device_show_int); /** * device_release - free device structure. @@ -197,7 +248,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, if (MAJOR(dev->devt)) { const char *tmp; const char *name; - mode_t mode = 0; + umode_t mode = 0; add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt)); add_uevent_var(env, "MINOR=%u", MINOR(dev->devt)); @@ -463,7 +514,7 @@ static ssize_t show_dev(struct device *dev, struct device_attribute *attr, static struct device_attribute devt_attr = __ATTR(dev, S_IRUGO, show_dev, NULL); -/* kset to create /sys/devices/ */ +/* /sys/devices/ */ struct kset *devices_kset; /** @@ -710,6 +761,10 @@ static struct kobject *get_device_parent(struct device *dev, return k; } + /* subsystems can specify a default root directory for their devices */ + if (!parent && dev->bus && dev->bus->dev_root) + return &dev->bus->dev_root->kobj; + if (parent) return &parent->kobj; return NULL; @@ -730,14 +785,6 @@ static void cleanup_device_parent(struct device *dev) cleanup_glue_dir(dev, dev->kobj.parent); } -static void setup_parent(struct device *dev, struct device *parent) -{ - struct kobject *kobj; - kobj = get_device_parent(dev, parent); - if (kobj) - dev->kobj.parent = kobj; -} - static int device_add_class_symlinks(struct device *dev) { int error; @@ -890,6 +937,7 @@ int device_private_init(struct device *dev) int device_add(struct device *dev) { struct device *parent = NULL; + struct kobject *kobj; struct class_interface *class_intf; int error = -EINVAL; @@ -913,6 +961,10 @@ int device_add(struct device *dev) dev->init_name = NULL; } + /* subsystems can specify simple device enumeration */ + if (!dev_name(dev) && dev->bus && dev->bus->dev_name) + dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id); + if (!dev_name(dev)) { error = -EINVAL; goto name_error; @@ -921,7 +973,9 @@ int device_add(struct device *dev) pr_debug("device: '%s': %s\n", dev_name(dev), __func__); parent = get_device(dev->parent); - setup_parent(dev, parent); + kobj = get_device_parent(dev, parent); + if (kobj) + dev->kobj.parent = kobj; /* use parent numa_node */ if (parent) @@ -981,17 +1035,17 @@ int device_add(struct device *dev) &parent->p->klist_children); if (dev->class) { - mutex_lock(&dev->class->p->class_mutex); + mutex_lock(&dev->class->p->mutex); /* tie the class to the device */ klist_add_tail(&dev->knode_class, &dev->class->p->klist_devices); /* notify any interfaces that the device is here */ list_for_each_entry(class_intf, - &dev->class->p->class_interfaces, node) + &dev->class->p->interfaces, node) if (class_intf->add_dev) class_intf->add_dev(dev, class_intf); - mutex_unlock(&dev->class->p->class_mutex); + mutex_unlock(&dev->class->p->mutex); } done: put_device(dev); @@ -1106,15 +1160,15 @@ void device_del(struct device *dev) if (dev->class) { device_remove_class_symlinks(dev); - mutex_lock(&dev->class->p->class_mutex); + mutex_lock(&dev->class->p->mutex); /* notify any interfaces that the device is now gone */ list_for_each_entry(class_intf, - &dev->class->p->class_interfaces, node) + &dev->class->p->interfaces, node) if (class_intf->remove_dev) class_intf->remove_dev(dev, class_intf); /* remove the device from the class list */ klist_del(&dev->knode_class); - mutex_unlock(&dev->class->p->class_mutex); + mutex_unlock(&dev->class->p->mutex); } device_remove_file(dev, &uevent_attr); device_remove_attrs(dev); @@ -1181,7 +1235,7 @@ static struct device *next_device(struct klist_iter *i) * freed by the caller. */ const char *device_get_devnode(struct device *dev, - mode_t *mode, const char **tmp) + umode_t *mode, const char **tmp) { char *s; @@ -1743,6 +1797,10 @@ void device_shutdown(void) list_del_init(&dev->kobj.entry); spin_unlock(&devices_kset->list_lock); + /* Don't allow any more runtime suspends */ + pm_runtime_get_noresume(dev); + pm_runtime_barrier(dev); + if (dev->bus && dev->bus->shutdown) { dev_dbg(dev, "shutdown\n"); dev->bus->shutdown(dev); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 251acea3d35..db87e78d745 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -1,8 +1,8 @@ /* - * drivers/base/cpu.c - basic CPU class support + * CPU subsystem support */ -#include <linux/sysdev.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> @@ -11,43 +11,44 @@ #include <linux/device.h> #include <linux/node.h> #include <linux/gfp.h> +#include <linux/percpu.h> #include "base.h" -static struct sysdev_class_attribute *cpu_sysdev_class_attrs[]; - -struct sysdev_class cpu_sysdev_class = { +struct bus_type cpu_subsys = { .name = "cpu", - .attrs = cpu_sysdev_class_attrs, + .dev_name = "cpu", }; -EXPORT_SYMBOL(cpu_sysdev_class); +EXPORT_SYMBOL_GPL(cpu_subsys); -static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices); +static DEFINE_PER_CPU(struct device *, cpu_sys_devices); #ifdef CONFIG_HOTPLUG_CPU -static ssize_t show_online(struct sys_device *dev, struct sysdev_attribute *attr, +static ssize_t show_online(struct device *dev, + struct device_attribute *attr, char *buf) { - struct cpu *cpu = container_of(dev, struct cpu, sysdev); + struct cpu *cpu = container_of(dev, struct cpu, dev); - return sprintf(buf, "%u\n", !!cpu_online(cpu->sysdev.id)); + return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id)); } -static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr, - const char *buf, size_t count) +static ssize_t __ref store_online(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { - struct cpu *cpu = container_of(dev, struct cpu, sysdev); + struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t ret; cpu_hotplug_driver_lock(); switch (buf[0]) { case '0': - ret = cpu_down(cpu->sysdev.id); + ret = cpu_down(cpu->dev.id); if (!ret) kobject_uevent(&dev->kobj, KOBJ_OFFLINE); break; case '1': - ret = cpu_up(cpu->sysdev.id); + ret = cpu_up(cpu->dev.id); if (!ret) kobject_uevent(&dev->kobj, KOBJ_ONLINE); break; @@ -60,44 +61,44 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut ret = count; return ret; } -static SYSDEV_ATTR(online, 0644, show_online, store_online); +static DEVICE_ATTR(online, 0644, show_online, store_online); static void __cpuinit register_cpu_control(struct cpu *cpu) { - sysdev_create_file(&cpu->sysdev, &attr_online); + device_create_file(&cpu->dev, &dev_attr_online); } void unregister_cpu(struct cpu *cpu) { - int logical_cpu = cpu->sysdev.id; + int logical_cpu = cpu->dev.id; unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu)); - sysdev_remove_file(&cpu->sysdev, &attr_online); + device_remove_file(&cpu->dev, &dev_attr_online); - sysdev_unregister(&cpu->sysdev); + device_unregister(&cpu->dev); per_cpu(cpu_sys_devices, logical_cpu) = NULL; return; } #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE -static ssize_t cpu_probe_store(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t cpu_probe_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { return arch_cpu_probe(buf, count); } -static ssize_t cpu_release_store(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t cpu_release_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { return arch_cpu_release(buf, count); } -static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); -static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store); +static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); +static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store); #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ #else /* ... !CONFIG_HOTPLUG_CPU */ @@ -109,15 +110,15 @@ static inline void register_cpu_control(struct cpu *cpu) #ifdef CONFIG_KEXEC #include <linux/kexec.h> -static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute *attr, +static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr, char *buf) { - struct cpu *cpu = container_of(dev, struct cpu, sysdev); + struct cpu *cpu = container_of(dev, struct cpu, dev); ssize_t rc; unsigned long long addr; int cpunum; - cpunum = cpu->sysdev.id; + cpunum = cpu->dev.id; /* * Might be reading other cpu's data based on which cpu read thread @@ -129,7 +130,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute rc = sprintf(buf, "%Lx\n", addr); return rc; } -static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); +static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL); #endif /* @@ -137,12 +138,12 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); */ struct cpu_attr { - struct sysdev_class_attribute attr; + struct device_attribute attr; const struct cpumask *const * const map; }; -static ssize_t show_cpus_attr(struct sysdev_class *class, - struct sysdev_class_attribute *attr, +static ssize_t show_cpus_attr(struct device *dev, + struct device_attribute *attr, char *buf) { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); @@ -153,10 +154,10 @@ static ssize_t show_cpus_attr(struct sysdev_class *class, return n; } -#define _CPU_ATTR(name, map) \ - { _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map } +#define _CPU_ATTR(name, map) \ + { __ATTR(name, 0444, show_cpus_attr, NULL), map } -/* Keep in sync with cpu_sysdev_class_attrs */ +/* Keep in sync with cpu_subsys_attrs */ static struct cpu_attr cpu_attrs[] = { _CPU_ATTR(online, &cpu_online_mask), _CPU_ATTR(possible, &cpu_possible_mask), @@ -166,19 +167,19 @@ static struct cpu_attr cpu_attrs[] = { /* * Print values for NR_CPUS and offlined cpus */ -static ssize_t print_cpus_kernel_max(struct sysdev_class *class, - struct sysdev_class_attribute *attr, char *buf) +static ssize_t print_cpus_kernel_max(struct device *dev, + struct device_attribute *attr, char *buf) { int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); return n; } -static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); +static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ unsigned int total_cpus; -static ssize_t print_cpus_offline(struct sysdev_class *class, - struct sysdev_class_attribute *attr, char *buf) +static ssize_t print_cpus_offline(struct device *dev, + struct device_attribute *attr, char *buf) { int n = 0, len = PAGE_SIZE-2; cpumask_var_t offline; @@ -205,7 +206,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class, n += snprintf(&buf[n], len - n, "\n"); return n; } -static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); +static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL); /* * register_cpu - Setup a sysfs device for a CPU. @@ -218,57 +219,87 @@ static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); int __cpuinit register_cpu(struct cpu *cpu, int num) { int error; - cpu->node_id = cpu_to_node(num); - cpu->sysdev.id = num; - cpu->sysdev.cls = &cpu_sysdev_class; - - error = sysdev_register(&cpu->sysdev); + cpu->node_id = cpu_to_node(num); + cpu->dev.id = num; + cpu->dev.bus = &cpu_subsys; + error = device_register(&cpu->dev); if (!error && cpu->hotpluggable) register_cpu_control(cpu); if (!error) - per_cpu(cpu_sys_devices, num) = &cpu->sysdev; + per_cpu(cpu_sys_devices, num) = &cpu->dev; if (!error) register_cpu_under_node(num, cpu_to_node(num)); #ifdef CONFIG_KEXEC if (!error) - error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes); + error = device_create_file(&cpu->dev, &dev_attr_crash_notes); #endif return error; } -struct sys_device *get_cpu_sysdev(unsigned cpu) +struct device *get_cpu_device(unsigned cpu) { if (cpu < nr_cpu_ids && cpu_possible(cpu)) return per_cpu(cpu_sys_devices, cpu); else return NULL; } -EXPORT_SYMBOL_GPL(get_cpu_sysdev); +EXPORT_SYMBOL_GPL(get_cpu_device); + +static struct attribute *cpu_root_attrs[] = { +#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE + &dev_attr_probe.attr, + &dev_attr_release.attr, +#endif + &cpu_attrs[0].attr.attr, + &cpu_attrs[1].attr.attr, + &cpu_attrs[2].attr.attr, + &dev_attr_kernel_max.attr, + &dev_attr_offline.attr, + NULL +}; + +static struct attribute_group cpu_root_attr_group = { + .attrs = cpu_root_attrs, +}; -int __init cpu_dev_init(void) +static const struct attribute_group *cpu_root_attr_groups[] = { + &cpu_root_attr_group, + NULL, +}; + +bool cpu_is_hotpluggable(unsigned cpu) { - int err; + struct device *dev = get_cpu_device(cpu); + return dev && container_of(dev, struct cpu, dev)->hotpluggable; +} +EXPORT_SYMBOL_GPL(cpu_is_hotpluggable); - err = sysdev_class_register(&cpu_sysdev_class); -#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) - if (!err) - err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class); +#ifdef CONFIG_GENERIC_CPU_DEVICES +static DEFINE_PER_CPU(struct cpu, cpu_devices); #endif - return err; +static void __init cpu_dev_register_generic(void) +{ +#ifdef CONFIG_GENERIC_CPU_DEVICES + int i; + + for_each_possible_cpu(i) { + if (register_cpu(&per_cpu(cpu_devices, i), i)) + panic("Failed to register CPU device"); + } +#endif } -static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = { -#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE - &attr_probe, - &attr_release, +void __init cpu_dev_init(void) +{ + if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups)) + panic("Failed to register CPU subsystem"); + + cpu_dev_register_generic(); + +#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) + sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root); #endif - &cpu_attrs[0].attr, - &cpu_attrs[1].attr, - &cpu_attrs[2].attr, - &attr_kernel_max, - &attr_offline, - NULL -}; +} diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 65cd7483245..524bf96c289 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -639,7 +639,7 @@ EXPORT_SYMBOL_GPL(devm_kzalloc); * @dev: Device this memory belongs to * @p: Memory to free * - * Free memory allocated with dev_kzalloc(). + * Free memory allocated with devm_kzalloc(). */ void devm_kfree(struct device *dev, void *p) { diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index a4760e095ff..8493536ea55 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -40,7 +40,7 @@ static struct req { struct completion done; int err; const char *name; - mode_t mode; /* 0 => delete */ + umode_t mode; /* 0 => delete */ struct device *dev; } *requests; @@ -142,7 +142,7 @@ int devtmpfs_delete_node(struct device *dev) return req.err; } -static int dev_mkdir(const char *name, mode_t mode) +static int dev_mkdir(const char *name, umode_t mode) { struct dentry *dentry; struct path path; @@ -189,7 +189,7 @@ static int create_path(const char *nodepath) return err; } -static int handle_create(const char *nodename, mode_t mode, struct device *dev) +static int handle_create(const char *nodename, umode_t mode, struct device *dev) { struct dentry *dentry; struct path path; @@ -378,7 +378,7 @@ int devtmpfs_mount(const char *mntdir) static DECLARE_COMPLETION(setup_done); -static int handle(const char *name, mode_t mode, struct device *dev) +static int handle(const char *name, umode_t mode, struct device *dev) { if (mode) return handle_create(name, mode, dev); @@ -413,10 +413,9 @@ static int devtmpfsd(void *p) } spin_lock(&req_lock); } - set_current_state(TASK_INTERRUPTIBLE); + __set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&req_lock); schedule(); - __set_current_state(TASK_RUNNING); } return 0; out: diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c new file mode 100644 index 00000000000..e38ad243b4b --- /dev/null +++ b/drivers/base/dma-buf.c @@ -0,0 +1,291 @@ +/* + * Framework for buffer objects that can be shared across devices/subsystems. + * + * Copyright(C) 2011 Linaro Limited. All rights reserved. + * Author: Sumit Semwal <sumit.semwal@ti.com> + * + * Many thanks to linaro-mm-sig list, and specially + * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and + * Daniel Vetter <daniel@ffwll.ch> for their support in creation and + * refining of this idea. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/dma-buf.h> +#include <linux/anon_inodes.h> +#include <linux/export.h> + +static inline int is_dma_buf_file(struct file *); + +static int dma_buf_release(struct inode *inode, struct file *file) +{ + struct dma_buf *dmabuf; + + if (!is_dma_buf_file(file)) + return -EINVAL; + + dmabuf = file->private_data; + + dmabuf->ops->release(dmabuf); + kfree(dmabuf); + return 0; +} + +static const struct file_operations dma_buf_fops = { + .release = dma_buf_release, +}; + +/* + * is_dma_buf_file - Check if struct file* is associated with dma_buf + */ +static inline int is_dma_buf_file(struct file *file) +{ + return file->f_op == &dma_buf_fops; +} + +/** + * dma_buf_export - Creates a new dma_buf, and associates an anon file + * with this buffer, so it can be exported. + * Also connect the allocator specific data and ops to the buffer. + * + * @priv: [in] Attach private data of allocator to this buffer + * @ops: [in] Attach allocator-defined dma buf ops to the new buffer. + * @size: [in] Size of the buffer + * @flags: [in] mode flags for the file. + * + * Returns, on success, a newly created dma_buf object, which wraps the + * supplied private data and operations for dma_buf_ops. On either missing + * ops, or error in allocating struct dma_buf, will return negative error. + * + */ +struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops, + size_t size, int flags) +{ + struct dma_buf *dmabuf; + struct file *file; + + if (WARN_ON(!priv || !ops + || !ops->map_dma_buf + || !ops->unmap_dma_buf + || !ops->release)) { + return ERR_PTR(-EINVAL); + } + + dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL); + if (dmabuf == NULL) + return ERR_PTR(-ENOMEM); + + dmabuf->priv = priv; + dmabuf->ops = ops; + dmabuf->size = size; + + file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags); + + dmabuf->file = file; + + mutex_init(&dmabuf->lock); + INIT_LIST_HEAD(&dmabuf->attachments); + + return dmabuf; +} +EXPORT_SYMBOL_GPL(dma_buf_export); + + +/** + * dma_buf_fd - returns a file descriptor for the given dma_buf + * @dmabuf: [in] pointer to dma_buf for which fd is required. + * + * On success, returns an associated 'fd'. Else, returns error. + */ +int dma_buf_fd(struct dma_buf *dmabuf) +{ + int error, fd; + + if (!dmabuf || !dmabuf->file) + return -EINVAL; + + error = get_unused_fd(); + if (error < 0) + return error; + fd = error; + + fd_install(fd, dmabuf->file); + + return fd; +} +EXPORT_SYMBOL_GPL(dma_buf_fd); + +/** + * dma_buf_get - returns the dma_buf structure related to an fd + * @fd: [in] fd associated with the dma_buf to be returned + * + * On success, returns the dma_buf structure associated with an fd; uses + * file's refcounting done by fget to increase refcount. returns ERR_PTR + * otherwise. + */ +struct dma_buf *dma_buf_get(int fd) +{ + struct file *file; + + file = fget(fd); + + if (!file) + return ERR_PTR(-EBADF); + + if (!is_dma_buf_file(file)) { + fput(file); + return ERR_PTR(-EINVAL); + } + + return file->private_data; +} +EXPORT_SYMBOL_GPL(dma_buf_get); + +/** + * dma_buf_put - decreases refcount of the buffer + * @dmabuf: [in] buffer to reduce refcount of + * + * Uses file's refcounting done implicitly by fput() + */ +void dma_buf_put(struct dma_buf *dmabuf) +{ + if (WARN_ON(!dmabuf || !dmabuf->file)) + return; + + fput(dmabuf->file); +} +EXPORT_SYMBOL_GPL(dma_buf_put); + +/** + * dma_buf_attach - Add the device to dma_buf's attachments list; optionally, + * calls attach() of dma_buf_ops to allow device-specific attach functionality + * @dmabuf: [in] buffer to attach device to. + * @dev: [in] device to be attached. + * + * Returns struct dma_buf_attachment * for this attachment; may return negative + * error codes. + * + */ +struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, + struct device *dev) +{ + struct dma_buf_attachment *attach; + int ret; + + if (WARN_ON(!dmabuf || !dev || !dmabuf->ops)) + return ERR_PTR(-EINVAL); + + attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); + if (attach == NULL) + goto err_alloc; + + mutex_lock(&dmabuf->lock); + + attach->dev = dev; + attach->dmabuf = dmabuf; + if (dmabuf->ops->attach) { + ret = dmabuf->ops->attach(dmabuf, dev, attach); + if (ret) + goto err_attach; + } + list_add(&attach->node, &dmabuf->attachments); + + mutex_unlock(&dmabuf->lock); + return attach; + +err_alloc: + return ERR_PTR(-ENOMEM); +err_attach: + kfree(attach); + mutex_unlock(&dmabuf->lock); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(dma_buf_attach); + +/** + * dma_buf_detach - Remove the given attachment from dmabuf's attachments list; + * optionally calls detach() of dma_buf_ops for device-specific detach + * @dmabuf: [in] buffer to detach from. + * @attach: [in] attachment to be detached; is free'd after this call. + * + */ +void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) +{ + if (WARN_ON(!dmabuf || !attach || !dmabuf->ops)) + return; + + mutex_lock(&dmabuf->lock); + list_del(&attach->node); + if (dmabuf->ops->detach) + dmabuf->ops->detach(dmabuf, attach); + + mutex_unlock(&dmabuf->lock); + kfree(attach); +} +EXPORT_SYMBOL_GPL(dma_buf_detach); + +/** + * dma_buf_map_attachment - Returns the scatterlist table of the attachment; + * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the + * dma_buf_ops. + * @attach: [in] attachment whose scatterlist is to be returned + * @direction: [in] direction of DMA transfer + * + * Returns sg_table containing the scatterlist to be returned; may return NULL + * or ERR_PTR. + * + */ +struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, + enum dma_data_direction direction) +{ + struct sg_table *sg_table = ERR_PTR(-EINVAL); + + might_sleep(); + + if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops)) + return ERR_PTR(-EINVAL); + + mutex_lock(&attach->dmabuf->lock); + if (attach->dmabuf->ops->map_dma_buf) + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); + mutex_unlock(&attach->dmabuf->lock); + + return sg_table; +} +EXPORT_SYMBOL_GPL(dma_buf_map_attachment); + +/** + * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might + * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of + * dma_buf_ops. + * @attach: [in] attachment to unmap buffer from + * @sg_table: [in] scatterlist info of the buffer to unmap + * + */ +void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, + struct sg_table *sg_table) +{ + if (WARN_ON(!attach || !attach->dmabuf || !sg_table + || !attach->dmabuf->ops)) + return; + + mutex_lock(&attach->dmabuf->lock); + if (attach->dmabuf->ops->unmap_dma_buf) + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table); + mutex_unlock(&attach->dmabuf->lock); + +} +EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 06ed6b4e7df..26ab358dac6 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -226,13 +226,13 @@ static ssize_t firmware_loading_store(struct device *dev, int loading = simple_strtol(buf, NULL, 10); int i; + mutex_lock(&fw_lock); + + if (!fw_priv->fw) + goto out; + switch (loading) { case 1: - mutex_lock(&fw_lock); - if (!fw_priv->fw) { - mutex_unlock(&fw_lock); - break; - } firmware_free_data(fw_priv->fw); memset(fw_priv->fw, 0, sizeof(struct firmware)); /* If the pages are not owned by 'struct firmware' */ @@ -243,7 +243,6 @@ static ssize_t firmware_loading_store(struct device *dev, fw_priv->page_array_size = 0; fw_priv->nr_pages = 0; set_bit(FW_STATUS_LOADING, &fw_priv->status); - mutex_unlock(&fw_lock); break; case 0: if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { @@ -274,7 +273,8 @@ static ssize_t firmware_loading_store(struct device *dev, fw_load_abort(fw_priv); break; } - +out: + mutex_unlock(&fw_lock); return count; } @@ -534,6 +534,8 @@ static int _request_firmware(const struct firmware **firmware_p, return 0; } + read_lock_usermodehelper(); + if (WARN_ON(usermodehelper_is_disabled())) { dev_err(device, "firmware: %s will not be loaded\n", name); retval = -EBUSY; @@ -572,6 +574,8 @@ static int _request_firmware(const struct firmware **firmware_p, fw_destroy_instance(fw_priv); out: + read_unlock_usermodehelper(); + if (retval) { release_firmware(firmware); *firmware_p = NULL; diff --git a/drivers/base/init.c b/drivers/base/init.c index c8a934e7942..c16f0b808a1 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -31,7 +31,6 @@ void __init driver_init(void) * core core pieces. */ platform_bus_init(); - system_bus_init(); cpu_dev_init(); memory_dev_init(); } diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 8272d92d22c..ed5de58c340 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -1,5 +1,5 @@ /* - * drivers/base/memory.c - basic Memory class support + * Memory subsystem support * * Written by Matt Tolentino <matthew.e.tolentino@intel.com> * Dave Hansen <haveblue@us.ibm.com> @@ -10,7 +10,6 @@ * SPARSEMEM should be contained here, or in mm/memory_hotplug.c. */ -#include <linux/sysdev.h> #include <linux/module.h> #include <linux/init.h> #include <linux/topology.h> @@ -38,26 +37,9 @@ static inline int base_memory_block_id(int section_nr) return section_nr / sections_per_block; } -static struct sysdev_class memory_sysdev_class = { +static struct bus_type memory_subsys = { .name = MEMORY_CLASS_NAME, -}; - -static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj) -{ - return MEMORY_CLASS_NAME; -} - -static int memory_uevent(struct kset *kset, struct kobject *obj, - struct kobj_uevent_env *env) -{ - int retval = 0; - - return retval; -} - -static const struct kset_uevent_ops memory_uevent_ops = { - .name = memory_uevent_name, - .uevent = memory_uevent, + .dev_name = MEMORY_CLASS_NAME, }; static BLOCKING_NOTIFIER_HEAD(memory_chain); @@ -96,21 +78,21 @@ int register_memory(struct memory_block *memory) { int error; - memory->sysdev.cls = &memory_sysdev_class; - memory->sysdev.id = memory->start_section_nr / sections_per_block; + memory->dev.bus = &memory_subsys; + memory->dev.id = memory->start_section_nr / sections_per_block; - error = sysdev_register(&memory->sysdev); + error = device_register(&memory->dev); return error; } static void unregister_memory(struct memory_block *memory) { - BUG_ON(memory->sysdev.cls != &memory_sysdev_class); + BUG_ON(memory->dev.bus != &memory_subsys); /* drop the ref. we got in remove_memory_block() */ - kobject_put(&memory->sysdev.kobj); - sysdev_unregister(&memory->sysdev); + kobject_put(&memory->dev.kobj); + device_unregister(&memory->dev); } unsigned long __weak memory_block_size_bytes(void) @@ -138,22 +120,22 @@ static unsigned long get_memory_block_size(void) * uses. */ -static ssize_t show_mem_start_phys_index(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_mem_start_phys_index(struct device *dev, + struct device_attribute *attr, char *buf) { struct memory_block *mem = - container_of(dev, struct memory_block, sysdev); + container_of(dev, struct memory_block, dev); unsigned long phys_index; phys_index = mem->start_section_nr / sections_per_block; return sprintf(buf, "%08lx\n", phys_index); } -static ssize_t show_mem_end_phys_index(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_mem_end_phys_index(struct device *dev, + struct device_attribute *attr, char *buf) { struct memory_block *mem = - container_of(dev, struct memory_block, sysdev); + container_of(dev, struct memory_block, dev); unsigned long phys_index; phys_index = mem->end_section_nr / sections_per_block; @@ -163,13 +145,13 @@ static ssize_t show_mem_end_phys_index(struct sys_device *dev, /* * Show whether the section of memory is likely to be hot-removable */ -static ssize_t show_mem_removable(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_mem_removable(struct device *dev, + struct device_attribute *attr, char *buf) { unsigned long i, pfn; int ret = 1; struct memory_block *mem = - container_of(dev, struct memory_block, sysdev); + container_of(dev, struct memory_block, dev); for (i = 0; i < sections_per_block; i++) { pfn = section_nr_to_pfn(mem->start_section_nr + i); @@ -182,11 +164,11 @@ static ssize_t show_mem_removable(struct sys_device *dev, /* * online, offline, going offline, etc. */ -static ssize_t show_mem_state(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_mem_state(struct device *dev, + struct device_attribute *attr, char *buf) { struct memory_block *mem = - container_of(dev, struct memory_block, sysdev); + container_of(dev, struct memory_block, dev); ssize_t len = 0; /* @@ -313,24 +295,35 @@ static int memory_block_change_state(struct memory_block *mem, ret = memory_block_action(mem->start_section_nr, to_state); - if (ret) + if (ret) { mem->state = from_state_req; - else - mem->state = to_state; + goto out; + } + mem->state = to_state; + switch (mem->state) { + case MEM_OFFLINE: + kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE); + break; + case MEM_ONLINE: + kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE); + break; + default: + break; + } out: mutex_unlock(&mem->state_mutex); return ret; } static ssize_t -store_mem_state(struct sys_device *dev, - struct sysdev_attribute *attr, const char *buf, size_t count) +store_mem_state(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { struct memory_block *mem; int ret = -EINVAL; - mem = container_of(dev, struct memory_block, sysdev); + mem = container_of(dev, struct memory_block, dev); if (!strncmp(buf, "online", min((int)count, 6))) ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); @@ -351,41 +344,41 @@ store_mem_state(struct sys_device *dev, * s.t. if I offline all of these sections I can then * remove the physical device? */ -static ssize_t show_phys_device(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_phys_device(struct device *dev, + struct device_attribute *attr, char *buf) { struct memory_block *mem = - container_of(dev, struct memory_block, sysdev); + container_of(dev, struct memory_block, dev); return sprintf(buf, "%d\n", mem->phys_device); } -static SYSDEV_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); -static SYSDEV_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL); -static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state); -static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL); -static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL); +static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); +static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL); +static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); +static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); +static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); #define mem_create_simple_file(mem, attr_name) \ - sysdev_create_file(&mem->sysdev, &attr_##attr_name) + device_create_file(&mem->dev, &dev_attr_##attr_name) #define mem_remove_simple_file(mem, attr_name) \ - sysdev_remove_file(&mem->sysdev, &attr_##attr_name) + device_remove_file(&mem->dev, &dev_attr_##attr_name) /* * Block size attribute stuff */ static ssize_t -print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, +print_block_size(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lx\n", get_memory_block_size()); } -static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); +static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL); static int block_size_init(void) { - return sysfs_create_file(&memory_sysdev_class.kset.kobj, - &attr_block_size_bytes.attr); + return device_create_file(memory_subsys.dev_root, + &dev_attr_block_size_bytes); } /* @@ -396,7 +389,7 @@ static int block_size_init(void) */ #ifdef CONFIG_ARCH_MEMORY_PROBE static ssize_t -memory_probe_store(struct class *class, struct class_attribute *attr, +memory_probe_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u64 phys_addr; @@ -423,12 +416,11 @@ memory_probe_store(struct class *class, struct class_attribute *attr, out: return ret; } -static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store); +static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store); static int memory_probe_init(void) { - return sysfs_create_file(&memory_sysdev_class.kset.kobj, - &class_attr_probe.attr); + return device_create_file(memory_subsys.dev_root, &dev_attr_probe); } #else static inline int memory_probe_init(void) @@ -444,8 +436,8 @@ static inline int memory_probe_init(void) /* Soft offline a page */ static ssize_t -store_soft_offline_page(struct class *class, - struct class_attribute *attr, +store_soft_offline_page(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { int ret; @@ -463,8 +455,8 @@ store_soft_offline_page(struct class *class, /* Forcibly offline a page, including killing processes. */ static ssize_t -store_hard_offline_page(struct class *class, - struct class_attribute *attr, +store_hard_offline_page(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) { int ret; @@ -478,18 +470,18 @@ store_hard_offline_page(struct class *class, return ret ? ret : count; } -static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page); -static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page); +static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page); +static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page); static __init int memory_fail_init(void) { int err; - err = sysfs_create_file(&memory_sysdev_class.kset.kobj, - &class_attr_soft_offline_page.attr); + err = device_create_file(memory_subsys.dev_root, + &dev_attr_soft_offline_page); if (!err) - err = sysfs_create_file(&memory_sysdev_class.kset.kobj, - &class_attr_hard_offline_page.attr); + err = device_create_file(memory_subsys.dev_root, + &dev_attr_hard_offline_page); return err; } #else @@ -509,31 +501,23 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn) return 0; } +/* + * A reference for the returned object is held and the reference for the + * hinted object is released. + */ struct memory_block *find_memory_block_hinted(struct mem_section *section, struct memory_block *hint) { - struct kobject *kobj; - struct sys_device *sysdev; - struct memory_block *mem; - char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1]; int block_id = base_memory_block_id(__section_nr(section)); + struct device *hintdev = hint ? &hint->dev : NULL; + struct device *dev; - kobj = hint ? &hint->sysdev.kobj : NULL; - - /* - * This only works because we know that section == sysdev->id - * slightly redundant with sysdev_register() - */ - sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, block_id); - - kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj); - if (!kobj) + dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev); + if (hint) + put_device(&hint->dev); + if (!dev) return NULL; - - sysdev = container_of(kobj, struct sys_device, kobj); - mem = container_of(sysdev, struct memory_block, sysdev); - - return mem; + return container_of(dev, struct memory_block, dev); } /* @@ -542,7 +526,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section, * this gets to be a real problem, we can always use a radix * tree or something here. * - * This could be made generic for all sysdev classes. + * This could be made generic for all device subsystems. */ struct memory_block *find_memory_block(struct mem_section *section) { @@ -598,7 +582,7 @@ static int add_memory_section(int nid, struct mem_section *section, mem = find_memory_block(section); if (mem) { mem->section_count++; - kobject_put(&mem->sysdev.kobj); + kobject_put(&mem->dev.kobj); } else ret = init_memory_block(&mem, section, state); @@ -631,7 +615,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, unregister_memory(mem); kfree(mem); } else - kobject_put(&mem->sysdev.kobj); + kobject_put(&mem->dev.kobj); mutex_unlock(&mem_sysfs_mutex); return 0; @@ -664,8 +648,7 @@ int __init memory_dev_init(void) int err; unsigned long block_sz; - memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops; - ret = sysdev_class_register(&memory_sysdev_class); + ret = subsys_system_register(&memory_subsys, NULL); if (ret) goto out; diff --git a/drivers/base/node.c b/drivers/base/node.c index 793f796c4da..44f427a6611 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -1,8 +1,7 @@ /* - * drivers/base/node.c - basic Node class support + * Basic Node interface support */ -#include <linux/sysdev.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mm.h> @@ -19,18 +18,16 @@ #include <linux/swap.h> #include <linux/slab.h> -static struct sysdev_class_attribute *node_state_attrs[]; - -static struct sysdev_class node_class = { +static struct bus_type node_subsys = { .name = "node", - .attrs = node_state_attrs, + .dev_name = "node", }; -static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf) +static ssize_t node_read_cpumap(struct device *dev, int type, char *buf) { struct node *node_dev = to_node(dev); - const struct cpumask *mask = cpumask_of_node(node_dev->sysdev.id); + const struct cpumask *mask = cpumask_of_node(node_dev->dev.id); int len; /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ @@ -44,23 +41,23 @@ static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf) return len; } -static inline ssize_t node_read_cpumask(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static inline ssize_t node_read_cpumask(struct device *dev, + struct device_attribute *attr, char *buf) { return node_read_cpumap(dev, 0, buf); } -static inline ssize_t node_read_cpulist(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static inline ssize_t node_read_cpulist(struct device *dev, + struct device_attribute *attr, char *buf) { return node_read_cpumap(dev, 1, buf); } -static SYSDEV_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL); -static SYSDEV_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL); +static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL); +static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL); #define K(x) ((x) << (PAGE_SHIFT - 10)) -static ssize_t node_read_meminfo(struct sys_device * dev, - struct sysdev_attribute *attr, char * buf) +static ssize_t node_read_meminfo(struct device *dev, + struct device_attribute *attr, char *buf) { int n; int nid = dev->id; @@ -127,12 +124,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev, nid, K(node_page_state(nid, NR_WRITEBACK)), nid, K(node_page_state(nid, NR_FILE_PAGES)), nid, K(node_page_state(nid, NR_FILE_MAPPED)), - nid, K(node_page_state(nid, NR_ANON_PAGES) #ifdef CONFIG_TRANSPARENT_HUGEPAGE + nid, K(node_page_state(nid, NR_ANON_PAGES) + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR + HPAGE_PMD_NR), +#else + nid, K(node_page_state(nid, NR_ANON_PAGES)), #endif - ), nid, K(node_page_state(nid, NR_SHMEM)), nid, node_page_state(nid, NR_KERNEL_STACK) * THREAD_SIZE / 1024, @@ -143,22 +141,23 @@ static ssize_t node_read_meminfo(struct sys_device * dev, nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), - nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) #ifdef CONFIG_TRANSPARENT_HUGEPAGE + nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) , nid, K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * - HPAGE_PMD_NR) + HPAGE_PMD_NR)); +#else + nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); #endif - ); n += hugetlb_report_node_meminfo(nid, buf + n); return n; } #undef K -static SYSDEV_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL); +static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL); -static ssize_t node_read_numastat(struct sys_device * dev, - struct sysdev_attribute *attr, char * buf) +static ssize_t node_read_numastat(struct device *dev, + struct device_attribute *attr, char *buf) { return sprintf(buf, "numa_hit %lu\n" @@ -174,10 +173,10 @@ static ssize_t node_read_numastat(struct sys_device * dev, node_page_state(dev->id, NUMA_LOCAL), node_page_state(dev->id, NUMA_OTHER)); } -static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); +static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); -static ssize_t node_read_vmstat(struct sys_device *dev, - struct sysdev_attribute *attr, char *buf) +static ssize_t node_read_vmstat(struct device *dev, + struct device_attribute *attr, char *buf) { int nid = dev->id; int i; @@ -189,10 +188,10 @@ static ssize_t node_read_vmstat(struct sys_device *dev, return n; } -static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL); +static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL); -static ssize_t node_read_distance(struct sys_device * dev, - struct sysdev_attribute *attr, char * buf) +static ssize_t node_read_distance(struct device *dev, + struct device_attribute *attr, char * buf) { int nid = dev->id; int len = 0; @@ -210,7 +209,7 @@ static ssize_t node_read_distance(struct sys_device * dev, len += sprintf(buf + len, "\n"); return len; } -static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); +static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL); #ifdef CONFIG_HUGETLBFS /* @@ -228,7 +227,7 @@ static node_registration_func_t __hugetlb_unregister_node; static inline bool hugetlb_register_node(struct node *node) { if (__hugetlb_register_node && - node_state(node->sysdev.id, N_HIGH_MEMORY)) { + node_state(node->dev.id, N_HIGH_MEMORY)) { __hugetlb_register_node(node); return true; } @@ -264,17 +263,17 @@ int register_node(struct node *node, int num, struct node *parent) { int error; - node->sysdev.id = num; - node->sysdev.cls = &node_class; - error = sysdev_register(&node->sysdev); + node->dev.id = num; + node->dev.bus = &node_subsys; + error = device_register(&node->dev); if (!error){ - sysdev_create_file(&node->sysdev, &attr_cpumap); - sysdev_create_file(&node->sysdev, &attr_cpulist); - sysdev_create_file(&node->sysdev, &attr_meminfo); - sysdev_create_file(&node->sysdev, &attr_numastat); - sysdev_create_file(&node->sysdev, &attr_distance); - sysdev_create_file(&node->sysdev, &attr_vmstat); + device_create_file(&node->dev, &dev_attr_cpumap); + device_create_file(&node->dev, &dev_attr_cpulist); + device_create_file(&node->dev, &dev_attr_meminfo); + device_create_file(&node->dev, &dev_attr_numastat); + device_create_file(&node->dev, &dev_attr_distance); + device_create_file(&node->dev, &dev_attr_vmstat); scan_unevictable_register_node(node); @@ -294,17 +293,17 @@ int register_node(struct node *node, int num, struct node *parent) */ void unregister_node(struct node *node) { - sysdev_remove_file(&node->sysdev, &attr_cpumap); - sysdev_remove_file(&node->sysdev, &attr_cpulist); - sysdev_remove_file(&node->sysdev, &attr_meminfo); - sysdev_remove_file(&node->sysdev, &attr_numastat); - sysdev_remove_file(&node->sysdev, &attr_distance); - sysdev_remove_file(&node->sysdev, &attr_vmstat); + device_remove_file(&node->dev, &dev_attr_cpumap); + device_remove_file(&node->dev, &dev_attr_cpulist); + device_remove_file(&node->dev, &dev_attr_meminfo); + device_remove_file(&node->dev, &dev_attr_numastat); + device_remove_file(&node->dev, &dev_attr_distance); + device_remove_file(&node->dev, &dev_attr_vmstat); scan_unevictable_unregister_node(node); hugetlb_unregister_node(node); /* no-op, if memoryless node */ - sysdev_unregister(&node->sysdev); + device_unregister(&node->dev); } struct node node_devices[MAX_NUMNODES]; @@ -315,41 +314,41 @@ struct node node_devices[MAX_NUMNODES]; int register_cpu_under_node(unsigned int cpu, unsigned int nid) { int ret; - struct sys_device *obj; + struct device *obj; if (!node_online(nid)) return 0; - obj = get_cpu_sysdev(cpu); + obj = get_cpu_device(cpu); if (!obj) return 0; - ret = sysfs_create_link(&node_devices[nid].sysdev.kobj, + ret = sysfs_create_link(&node_devices[nid].dev.kobj, &obj->kobj, kobject_name(&obj->kobj)); if (ret) return ret; return sysfs_create_link(&obj->kobj, - &node_devices[nid].sysdev.kobj, - kobject_name(&node_devices[nid].sysdev.kobj)); + &node_devices[nid].dev.kobj, + kobject_name(&node_devices[nid].dev.kobj)); } int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) { - struct sys_device *obj; + struct device *obj; if (!node_online(nid)) return 0; - obj = get_cpu_sysdev(cpu); + obj = get_cpu_device(cpu); if (!obj) return 0; - sysfs_remove_link(&node_devices[nid].sysdev.kobj, + sysfs_remove_link(&node_devices[nid].dev.kobj, kobject_name(&obj->kobj)); sysfs_remove_link(&obj->kobj, - kobject_name(&node_devices[nid].sysdev.kobj)); + kobject_name(&node_devices[nid].dev.kobj)); return 0; } @@ -391,15 +390,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) continue; if (page_nid != nid) continue; - ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, - &mem_blk->sysdev.kobj, - kobject_name(&mem_blk->sysdev.kobj)); + ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj, + &mem_blk->dev.kobj, + kobject_name(&mem_blk->dev.kobj)); if (ret) return ret; - return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj, - &node_devices[nid].sysdev.kobj, - kobject_name(&node_devices[nid].sysdev.kobj)); + return sysfs_create_link_nowarn(&mem_blk->dev.kobj, + &node_devices[nid].dev.kobj, + kobject_name(&node_devices[nid].dev.kobj)); } /* mem section does not span the specified node */ return 0; @@ -432,10 +431,10 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, continue; if (node_test_and_set(nid, *unlinked_nodes)) continue; - sysfs_remove_link(&node_devices[nid].sysdev.kobj, - kobject_name(&mem_blk->sysdev.kobj)); - sysfs_remove_link(&mem_blk->sysdev.kobj, - kobject_name(&node_devices[nid].sysdev.kobj)); + sysfs_remove_link(&node_devices[nid].dev.kobj, + kobject_name(&mem_blk->dev.kobj)); + sysfs_remove_link(&mem_blk->dev.kobj, + kobject_name(&node_devices[nid].dev.kobj)); } NODEMASK_FREE(unlinked_nodes); return 0; @@ -466,7 +465,7 @@ static int link_mem_sections(int nid) } if (mem_blk) - kobject_put(&mem_blk->sysdev.kobj); + kobject_put(&mem_blk->dev.kobj); return err; } @@ -594,19 +593,19 @@ static ssize_t print_nodes_state(enum node_states state, char *buf) } struct node_attr { - struct sysdev_class_attribute attr; + struct device_attribute attr; enum node_states state; }; -static ssize_t show_node_state(struct sysdev_class *class, - struct sysdev_class_attribute *attr, char *buf) +static ssize_t show_node_state(struct device *dev, + struct device_attribute *attr, char *buf) { struct node_attr *na = container_of(attr, struct node_attr, attr); return print_nodes_state(na->state, buf); } #define _NODE_ATTR(name, state) \ - { _SYSDEV_CLASS_ATTR(name, 0444, show_node_state, NULL), state } + { __ATTR(name, 0444, show_node_state, NULL), state } static struct node_attr node_state_attr[] = { _NODE_ATTR(possible, N_POSSIBLE), @@ -618,17 +617,26 @@ static struct node_attr node_state_attr[] = { #endif }; -static struct sysdev_class_attribute *node_state_attrs[] = { - &node_state_attr[0].attr, - &node_state_attr[1].attr, - &node_state_attr[2].attr, - &node_state_attr[3].attr, +static struct attribute *node_state_attrs[] = { + &node_state_attr[0].attr.attr, + &node_state_attr[1].attr.attr, + &node_state_attr[2].attr.attr, + &node_state_attr[3].attr.attr, #ifdef CONFIG_HIGHMEM - &node_state_attr[4].attr, + &node_state_attr[4].attr.attr, #endif NULL }; +static struct attribute_group memory_root_attr_group = { + .attrs = node_state_attrs, +}; + +static const struct attribute_group *cpu_root_attr_groups[] = { + &memory_root_attr_group, + NULL, +}; + #define NODE_CALLBACK_PRI 2 /* lower than SLAB */ static int __init register_node_type(void) { @@ -637,7 +645,7 @@ static int __init register_node_type(void) BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); - ret = sysdev_class_register(&node_class); + ret = subsys_system_register(&node_subsys, cpu_root_attr_groups); if (!ret) { hotplug_memory_notifier(node_memory_callback, NODE_CALLBACK_PRI); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 7a24895543e..f0c605e99ad 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -383,7 +383,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister); * Returns &struct platform_device pointer on success, or ERR_PTR() on error. */ struct platform_device *platform_device_register_full( - struct platform_device_info *pdevinfo) + const struct platform_device_info *pdevinfo) { int ret = -ENOMEM; struct platform_device *pdev; @@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev) return ret; } -int platform_pm_prepare(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (drv && drv->pm && drv->pm->prepare) - ret = drv->pm->prepare(dev); - - return ret; -} - -void platform_pm_complete(struct device *dev) -{ - struct device_driver *drv = dev->driver; - - if (drv && drv->pm && drv->pm->complete) - drv->pm->complete(dev); -} - #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND @@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev) return ret; } -int platform_pm_suspend_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->suspend_noirq) - ret = drv->pm->suspend_noirq(dev); - } - - return ret; -} - int platform_pm_resume(struct device *dev) { struct device_driver *drv = dev->driver; @@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev) return ret; } -int platform_pm_resume_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->resume_noirq) - ret = drv->pm->resume_noirq(dev); - } - - return ret; -} - #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS @@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev) return ret; } -int platform_pm_freeze_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->freeze_noirq) - ret = drv->pm->freeze_noirq(dev); - } - - return ret; -} - int platform_pm_thaw(struct device *dev) { struct device_driver *drv = dev->driver; @@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev) return ret; } -int platform_pm_thaw_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->thaw_noirq) - ret = drv->pm->thaw_noirq(dev); - } - - return ret; -} - int platform_pm_poweroff(struct device *dev) { struct device_driver *drv = dev->driver; @@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev) return ret; } -int platform_pm_poweroff_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->poweroff_noirq) - ret = drv->pm->poweroff_noirq(dev); - } - - return ret; -} - int platform_pm_restore(struct device *dev) { struct device_driver *drv = dev->driver; @@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev) return ret; } -int platform_pm_restore_noirq(struct device *dev) -{ - struct device_driver *drv = dev->driver; - int ret = 0; - - if (!drv) - return 0; - - if (drv->pm) { - if (drv->pm->restore_noirq) - ret = drv->pm->restore_noirq(dev); - } - - return ret; -} - #endif /* CONFIG_HIBERNATE_CALLBACKS */ static const struct dev_pm_ops platform_dev_pm_ops = { diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 81676dd1790..2e58ebb1f6c 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile @@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o obj-$(CONFIG_PM_RUNTIME) += runtime.o obj-$(CONFIG_PM_TRACE_RTC) += trace.o obj-$(CONFIG_PM_OPP) += opp.o -obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o +obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o obj-$(CONFIG_HAVE_CLK) += clock_ops.o ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 5f0f85d5c57..428e55e012d 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -229,7 +229,8 @@ int pm_clk_suspend(struct device *dev) list_for_each_entry_reverse(ce, &psd->clock_list, node) { if (ce->status < PCE_STATUS_ERROR) { - clk_disable(ce->clk); + if (ce->status == PCE_STATUS_ENABLED) + clk_disable(ce->clk); ce->status = PCE_STATUS_ACQUIRED; } } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 6790cf7eba5..92e6a904806 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -15,13 +15,44 @@ #include <linux/err.h> #include <linux/sched.h> #include <linux/suspend.h> +#include <linux/export.h> + +#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \ +({ \ + type (*__routine)(struct device *__d); \ + type __ret = (type)0; \ + \ + __routine = genpd->dev_ops.callback; \ + if (__routine) { \ + __ret = __routine(dev); \ + } else { \ + __routine = dev_gpd_data(dev)->ops.callback; \ + if (__routine) \ + __ret = __routine(dev); \ + } \ + __ret; \ +}) + +#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \ +({ \ + ktime_t __start = ktime_get(); \ + type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \ + s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \ + struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \ + if (__elapsed > __gpd_data->td.field) { \ + __gpd_data->td.field = __elapsed; \ + dev_warn(dev, name " latency exceeded, new value %lld ns\n", \ + __elapsed); \ + } \ + __retval; \ +}) static LIST_HEAD(gpd_list); static DEFINE_MUTEX(gpd_list_lock); #ifdef CONFIG_PM -static struct generic_pm_domain *dev_to_genpd(struct device *dev) +struct generic_pm_domain *dev_to_genpd(struct device *dev) { if (IS_ERR_OR_NULL(dev->pm_domain)) return ERR_PTR(-EINVAL); @@ -29,6 +60,31 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) return pd_to_genpd(dev->pm_domain); } +static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev, + stop_latency_ns, "stop"); +} + +static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, + start_latency_ns, "start"); +} + +static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, + save_state_latency_ns, "state save"); +} + +static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, + restore_state_latency_ns, + "state restore"); +} + static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) { bool ret = false; @@ -145,9 +201,21 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd) } if (genpd->power_on) { + ktime_t time_start = ktime_get(); + s64 elapsed_ns; + ret = genpd->power_on(genpd); if (ret) goto err; + + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > genpd->power_on_latency_ns) { + genpd->power_on_latency_ns = elapsed_ns; + if (genpd->name) + pr_warning("%s: Power-on latency exceeded, " + "new value %lld ns\n", genpd->name, + elapsed_ns); + } } genpd_set_active(genpd); @@ -190,7 +258,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd, { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; - struct device_driver *drv = dev->driver; int ret = 0; if (gpd_data->need_restore) @@ -198,15 +265,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd, mutex_unlock(&genpd->lock); - if (drv && drv->pm && drv->pm->runtime_suspend) { - if (genpd->start_device) - genpd->start_device(dev); - - ret = drv->pm->runtime_suspend(dev); - - if (genpd->stop_device) - genpd->stop_device(dev); - } + genpd_start_dev(genpd, dev); + ret = genpd_save_dev(genpd, dev); + genpd_stop_dev(genpd, dev); mutex_lock(&genpd->lock); @@ -227,22 +288,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd, { struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); struct device *dev = pdd->dev; - struct device_driver *drv = dev->driver; if (!gpd_data->need_restore) return; mutex_unlock(&genpd->lock); - if (drv && drv->pm && drv->pm->runtime_resume) { - if (genpd->start_device) - genpd->start_device(dev); - - drv->pm->runtime_resume(dev); - - if (genpd->stop_device) - genpd->stop_device(dev); - } + genpd_start_dev(genpd, dev); + genpd_restore_dev(genpd, dev); + genpd_stop_dev(genpd, dev); mutex_lock(&genpd->lock); @@ -354,11 +408,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) } if (genpd->power_off) { + ktime_t time_start; + s64 elapsed_ns; + if (atomic_read(&genpd->sd_count) > 0) { ret = -EBUSY; goto out; } + time_start = ktime_get(); + /* * If sd_count > 0 at this point, one of the subdomains hasn't * managed to call pm_genpd_poweron() for the master yet after @@ -372,9 +431,29 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd_set_active(genpd); goto out; } + + elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start)); + if (elapsed_ns > genpd->power_off_latency_ns) { + genpd->power_off_latency_ns = elapsed_ns; + if (genpd->name) + pr_warning("%s: Power-off latency exceeded, " + "new value %lld ns\n", genpd->name, + elapsed_ns); + } } genpd->status = GPD_STATE_POWER_OFF; + genpd->power_off_time = ktime_get(); + + /* Update PM QoS information for devices in the domain. */ + list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { + struct gpd_timing_data *td = &to_gpd_data(pdd)->td; + + pm_runtime_update_max_time_suspended(pdd->dev, + td->start_latency_ns + + td->restore_state_latency_ns + + genpd->power_on_latency_ns); + } list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); @@ -413,6 +492,8 @@ static void genpd_power_off_work_fn(struct work_struct *work) static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; + bool (*stop_ok)(struct device *__dev); + int ret; dev_dbg(dev, "%s()\n", __func__); @@ -422,11 +503,16 @@ static int pm_genpd_runtime_suspend(struct device *dev) might_sleep_if(!genpd->dev_irq_safe); - if (genpd->stop_device) { - int ret = genpd->stop_device(dev); - if (ret) - return ret; - } + stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; + if (stop_ok && !stop_ok(dev)) + return -EBUSY; + + ret = genpd_stop_dev(genpd, dev); + if (ret) + return ret; + + pm_runtime_update_max_time_suspended(dev, + dev_gpd_data(dev)->td.start_latency_ns); /* * If power.irq_safe is set, this routine will be run with interrupts @@ -502,8 +588,7 @@ static int pm_genpd_runtime_resume(struct device *dev) mutex_unlock(&genpd->lock); out: - if (genpd->start_device) - genpd->start_device(dev); + genpd_start_dev(genpd, dev); return 0; } @@ -534,6 +619,52 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {} #ifdef CONFIG_PM_SLEEP +static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd, + struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev); +} + +static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, suspend, dev); +} + +static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev); +} + +static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev); +} + +static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, resume, dev); +} + +static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, freeze, dev); +} + +static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev); +} + +static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev); +} + +static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev) +{ + return GENPD_DEV_CALLBACK(genpd, int, thaw, dev); +} + /** * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. * @genpd: PM domain to power off, if possible. @@ -590,7 +721,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd) if (!device_can_wakeup(dev)) return false; - active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev); + active_wakeup = genpd_dev_active_wakeup(genpd, dev); return device_may_wakeup(dev) ? active_wakeup : !active_wakeup; } @@ -646,7 +777,7 @@ static int pm_genpd_prepare(struct device *dev) /* * The PM domain must be in the GPD_STATE_ACTIVE state at this point, * so pm_genpd_poweron() will return immediately, but if the device - * is suspended (e.g. it's been stopped by .stop_device()), we need + * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need * to make it operational. */ pm_runtime_resume(dev); @@ -685,7 +816,7 @@ static int pm_genpd_suspend(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); + return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev); } /** @@ -710,16 +841,14 @@ static int pm_genpd_suspend_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - ret = pm_generic_suspend_noirq(dev); + ret = genpd_suspend_late(genpd, dev); if (ret) return ret; - if (dev->power.wakeup_path - && genpd->active_wakeup && genpd->active_wakeup(dev)) + if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) return 0; - if (genpd->stop_device) - genpd->stop_device(dev); + genpd_stop_dev(genpd, dev); /* * Since all of the "noirq" callbacks are executed sequentially, it is @@ -761,10 +890,9 @@ static int pm_genpd_resume_noirq(struct device *dev) */ pm_genpd_poweron(genpd); genpd->suspended_count--; - if (genpd->start_device) - genpd->start_device(dev); + genpd_start_dev(genpd, dev); - return pm_generic_resume_noirq(dev); + return genpd_resume_early(genpd, dev); } /** @@ -785,7 +913,7 @@ static int pm_genpd_resume(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); + return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev); } /** @@ -806,7 +934,7 @@ static int pm_genpd_freeze(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); + return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev); } /** @@ -832,12 +960,11 @@ static int pm_genpd_freeze_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - ret = pm_generic_freeze_noirq(dev); + ret = genpd_freeze_late(genpd, dev); if (ret) return ret; - if (genpd->stop_device) - genpd->stop_device(dev); + genpd_stop_dev(genpd, dev); return 0; } @@ -864,10 +991,9 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (genpd->suspend_power_off) return 0; - if (genpd->start_device) - genpd->start_device(dev); + genpd_start_dev(genpd, dev); - return pm_generic_thaw_noirq(dev); + return genpd_thaw_early(genpd, dev); } /** @@ -888,72 +1014,7 @@ static int pm_genpd_thaw(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); -} - -/** - * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain. - * @dev: Device to suspend. - * - * Power off a device under the assumption that its pm_domain field points to - * the domain member of an object of type struct generic_pm_domain representing - * a PM domain consisting of I/O devices. - */ -static int pm_genpd_dev_poweroff(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev); -} - -/** - * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain. - * @dev: Device to suspend. - * - * Carry out a late powering off of a device under the assumption that its - * pm_domain field points to the domain member of an object of type - * struct generic_pm_domain representing a PM domain consisting of I/O devices. - */ -static int pm_genpd_dev_poweroff_noirq(struct device *dev) -{ - struct generic_pm_domain *genpd; - int ret; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; - - if (genpd->suspend_power_off) - return 0; - - ret = pm_generic_poweroff_noirq(dev); - if (ret) - return ret; - - if (dev->power.wakeup_path - && genpd->active_wakeup && genpd->active_wakeup(dev)) - return 0; - - if (genpd->stop_device) - genpd->stop_device(dev); - - /* - * Since all of the "noirq" callbacks are executed sequentially, it is - * guaranteed that this function will never run twice in parallel for - * the same PM domain, so it is not necessary to use locking here. - */ - genpd->suspended_count++; - pm_genpd_sync_poweroff(genpd); - - return 0; + return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev); } /** @@ -993,31 +1054,9 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_poweron(genpd); genpd->suspended_count--; - if (genpd->start_device) - genpd->start_device(dev); - - return pm_generic_restore_noirq(dev); -} - -/** - * pm_genpd_restore - Restore a device belonging to an I/O power domain. - * @dev: Device to resume. - * - * Restore a device under the assumption that its pm_domain field points to the - * domain member of an object of type struct generic_pm_domain representing - * a power domain consisting of I/O devices. - */ -static int pm_genpd_restore(struct device *dev) -{ - struct generic_pm_domain *genpd; - - dev_dbg(dev, "%s()\n", __func__); - - genpd = dev_to_genpd(dev); - if (IS_ERR(genpd)) - return -EINVAL; + genpd_start_dev(genpd, dev); - return genpd->suspend_power_off ? 0 : pm_generic_restore(dev); + return genpd_resume_early(genpd, dev); } /** @@ -1067,20 +1106,19 @@ static void pm_genpd_complete(struct device *dev) #define pm_genpd_freeze_noirq NULL #define pm_genpd_thaw_noirq NULL #define pm_genpd_thaw NULL -#define pm_genpd_dev_poweroff_noirq NULL -#define pm_genpd_dev_poweroff NULL #define pm_genpd_restore_noirq NULL -#define pm_genpd_restore NULL #define pm_genpd_complete NULL #endif /* CONFIG_PM_SLEEP */ /** - * pm_genpd_add_device - Add a device to an I/O PM domain. + * __pm_genpd_add_device - Add a device to an I/O PM domain. * @genpd: PM domain to add the device to. * @dev: Device to be added. + * @td: Set of PM QoS timing parameters to attach to the device. */ -int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) +int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, + struct gpd_timing_data *td) { struct generic_pm_domain_data *gpd_data; struct pm_domain_data *pdd; @@ -1123,6 +1161,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) gpd_data->base.dev = dev; gpd_data->need_restore = false; list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); + if (td) + gpd_data->td = *td; out: genpd_release_lock(genpd); @@ -1280,6 +1320,204 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, } /** + * pm_genpd_add_callbacks - Add PM domain callbacks to a given device. + * @dev: Device to add the callbacks to. + * @ops: Set of callbacks to add. + * @td: Timing data to add to the device along with the callbacks (optional). + */ +int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops, + struct gpd_timing_data *td) +{ + struct pm_domain_data *pdd; + int ret = 0; + + if (!(dev && dev->power.subsys_data && ops)) + return -EINVAL; + + pm_runtime_disable(dev); + device_pm_lock(); + + pdd = dev->power.subsys_data->domain_data; + if (pdd) { + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + + gpd_data->ops = *ops; + if (td) + gpd_data->td = *td; + } else { + ret = -EINVAL; + } + + device_pm_unlock(); + pm_runtime_enable(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks); + +/** + * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device. + * @dev: Device to remove the callbacks from. + * @clear_td: If set, clear the device's timing data too. + */ +int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td) +{ + struct pm_domain_data *pdd; + int ret = 0; + + if (!(dev && dev->power.subsys_data)) + return -EINVAL; + + pm_runtime_disable(dev); + device_pm_lock(); + + pdd = dev->power.subsys_data->domain_data; + if (pdd) { + struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); + + gpd_data->ops = (struct gpd_dev_ops){ 0 }; + if (clear_td) + gpd_data->td = (struct gpd_timing_data){ 0 }; + } else { + ret = -EINVAL; + } + + device_pm_unlock(); + pm_runtime_enable(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); + +/* Default device callbacks for generic PM domains. */ + +/** + * pm_genpd_default_save_state - Default "save device state" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_save_state(struct device *dev) +{ + int (*cb)(struct device *__dev); + struct device_driver *drv = dev->driver; + + cb = dev_gpd_data(dev)->ops.save_state; + if (cb) + return cb(dev); + + if (drv && drv->pm && drv->pm->runtime_suspend) + return drv->pm->runtime_suspend(dev); + + return 0; +} + +/** + * pm_genpd_default_restore_state - Default PM domians "restore device state". + * @dev: Device to handle. + */ +static int pm_genpd_default_restore_state(struct device *dev) +{ + int (*cb)(struct device *__dev); + struct device_driver *drv = dev->driver; + + cb = dev_gpd_data(dev)->ops.restore_state; + if (cb) + return cb(dev); + + if (drv && drv->pm && drv->pm->runtime_resume) + return drv->pm->runtime_resume(dev); + + return 0; +} + +/** + * pm_genpd_default_suspend - Default "device suspend" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_suspend(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend; + + return cb ? cb(dev) : pm_generic_suspend(dev); +} + +/** + * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_suspend_late(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; + + return cb ? cb(dev) : pm_generic_suspend_noirq(dev); +} + +/** + * pm_genpd_default_resume_early - Default "early device resume" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_resume_early(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; + + return cb ? cb(dev) : pm_generic_resume_noirq(dev); +} + +/** + * pm_genpd_default_resume - Default "device resume" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_resume(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume; + + return cb ? cb(dev) : pm_generic_resume(dev); +} + +/** + * pm_genpd_default_freeze - Default "device freeze" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_freeze(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze; + + return cb ? cb(dev) : pm_generic_freeze(dev); +} + +/** + * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_freeze_late(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; + + return cb ? cb(dev) : pm_generic_freeze_noirq(dev); +} + +/** + * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_thaw_early(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; + + return cb ? cb(dev) : pm_generic_thaw_noirq(dev); +} + +/** + * pm_genpd_default_thaw - Default "device thaw" for PM domians. + * @dev: Device to handle. + */ +static int pm_genpd_default_thaw(struct device *dev) +{ + int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw; + + return cb ? cb(dev) : pm_generic_thaw(dev); +} + +/** * pm_genpd_init - Initialize a generic I/O PM domain object. * @genpd: PM domain object to initialize. * @gov: PM domain governor to associate with the domain (may be NULL). @@ -1305,6 +1543,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->resume_count = 0; genpd->device_count = 0; genpd->suspended_count = 0; + genpd->max_off_time_ns = -1; genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; @@ -1317,11 +1556,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; genpd->domain.ops.thaw = pm_genpd_thaw; - genpd->domain.ops.poweroff = pm_genpd_dev_poweroff; - genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq; + genpd->domain.ops.poweroff = pm_genpd_suspend; + genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; - genpd->domain.ops.restore = pm_genpd_restore; + genpd->domain.ops.restore = pm_genpd_resume; genpd->domain.ops.complete = pm_genpd_complete; + genpd->dev_ops.save_state = pm_genpd_default_save_state; + genpd->dev_ops.restore_state = pm_genpd_default_restore_state; + genpd->dev_ops.suspend = pm_genpd_default_suspend; + genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late; + genpd->dev_ops.resume_early = pm_genpd_default_resume_early; + genpd->dev_ops.resume = pm_genpd_default_resume; + genpd->dev_ops.freeze = pm_genpd_default_freeze; + genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late; + genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early; + genpd->dev_ops.thaw = pm_genpd_default_thaw; mutex_lock(&gpd_list_lock); list_add(&genpd->gpd_list_node, &gpd_list); mutex_unlock(&gpd_list_lock); diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c new file mode 100644 index 00000000000..51527ee92d1 --- /dev/null +++ b/drivers/base/power/domain_governor.c @@ -0,0 +1,156 @@ +/* + * drivers/base/power/domain_governor.c - Governors for device PM domains. + * + * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. + * + * This file is released under the GPLv2. + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/pm_domain.h> +#include <linux/pm_qos.h> +#include <linux/hrtimer.h> + +/** + * default_stop_ok - Default PM domain governor routine for stopping devices. + * @dev: Device to check. + */ +bool default_stop_ok(struct device *dev) +{ + struct gpd_timing_data *td = &dev_gpd_data(dev)->td; + + dev_dbg(dev, "%s()\n", __func__); + + if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0) + return true; + + return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns + && td->break_even_ns < dev->power.max_time_suspended_ns; +} + +/** + * default_power_down_ok - Default generic PM domain power off governor routine. + * @pd: PM domain to check. + * + * This routine must be executed under the PM domain's lock. + */ +static bool default_power_down_ok(struct dev_pm_domain *pd) +{ + struct generic_pm_domain *genpd = pd_to_genpd(pd); + struct gpd_link *link; + struct pm_domain_data *pdd; + s64 min_dev_off_time_ns; + s64 off_on_time_ns; + ktime_t time_now = ktime_get(); + + off_on_time_ns = genpd->power_off_latency_ns + + genpd->power_on_latency_ns; + /* + * It doesn't make sense to remove power from the domain if saving + * the state of all devices in it and the power off/power on operations + * take too much time. + * + * All devices in this domain have been stopped already at this point. + */ + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + if (pdd->dev->driver) + off_on_time_ns += + to_gpd_data(pdd)->td.save_state_latency_ns; + } + + /* + * Check if subdomains can be off for enough time. + * + * All subdomains have been powered off already at this point. + */ + list_for_each_entry(link, &genpd->master_links, master_node) { + struct generic_pm_domain *sd = link->slave; + s64 sd_max_off_ns = sd->max_off_time_ns; + + if (sd_max_off_ns < 0) + continue; + + sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now, + sd->power_off_time)); + /* + * Check if the subdomain is allowed to be off long enough for + * the current domain to turn off and on (that's how much time + * it will have to wait worst case). + */ + if (sd_max_off_ns <= off_on_time_ns) + return false; + } + + /* + * Check if the devices in the domain can be off enough time. + */ + min_dev_off_time_ns = -1; + list_for_each_entry(pdd, &genpd->dev_list, list_node) { + struct gpd_timing_data *td; + struct device *dev = pdd->dev; + s64 dev_off_time_ns; + + if (!dev->driver || dev->power.max_time_suspended_ns < 0) + continue; + + td = &to_gpd_data(pdd)->td; + dev_off_time_ns = dev->power.max_time_suspended_ns - + (td->start_latency_ns + td->restore_state_latency_ns + + ktime_to_ns(ktime_sub(time_now, + dev->power.suspend_time))); + if (dev_off_time_ns <= off_on_time_ns) + return false; + + if (min_dev_off_time_ns > dev_off_time_ns + || min_dev_off_time_ns < 0) + min_dev_off_time_ns = dev_off_time_ns; + } + + if (min_dev_off_time_ns < 0) { + /* + * There are no latency constraints, so the domain can spend + * arbitrary time in the "off" state. + */ + genpd->max_off_time_ns = -1; + return true; + } + + /* + * The difference between the computed minimum delta and the time needed + * to turn the domain on is the maximum theoretical time this domain can + * spend in the "off" state. + */ + min_dev_off_time_ns -= genpd->power_on_latency_ns; + + /* + * If the difference between the computed minimum delta and the time + * needed to turn the domain off and back on on is smaller than the + * domain's power break even time, removing power from the domain is not + * worth it. + */ + if (genpd->break_even_ns > + min_dev_off_time_ns - genpd->power_off_latency_ns) + return false; + + genpd->max_off_time_ns = min_dev_off_time_ns; + return true; +} + +struct dev_power_governor simple_qos_governor = { + .stop_ok = default_stop_ok, + .power_down_ok = default_power_down_ok, +}; + +static bool always_on_power_down_ok(struct dev_pm_domain *domain) +{ + return false; +} + +/** + * pm_genpd_gov_always_on - A governor implementing an always-on policy + */ +struct dev_power_governor pm_domain_always_on_gov = { + .power_down_ok = always_on_power_down_ok, + .stop_ok = default_stop_ok, +}; diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 265a0ee3b49..10bdd793f0b 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev) * @event: PM transition of the system under way. * @bool: Whether or not this is the "noirq" stage. * - * If the device has not been suspended at run time, execute the - * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and - * return its error code. Otherwise, return zero. + * Execute the PM callback corresponding to @event provided by the driver of + * @dev, if defined, and return its error code. Return 0 if the callback is + * not present. */ static int __pm_generic_call(struct device *dev, int event, bool noirq) { const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int (*callback)(struct device *); - if (!pm || pm_runtime_suspended(dev)) + if (!pm) return 0; switch (event) { @@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq) case PM_EVENT_HIBERNATE: callback = noirq ? pm->poweroff_noirq : pm->poweroff; break; + case PM_EVENT_RESUME: + callback = noirq ? pm->resume_noirq : pm->resume; + break; case PM_EVENT_THAW: callback = noirq ? pm->thaw_noirq : pm->thaw; break; + case PM_EVENT_RESTORE: + callback = noirq ? pm->restore_noirq : pm->restore; + break; default: callback = NULL; break; @@ -211,56 +217,12 @@ int pm_generic_thaw(struct device *dev) EXPORT_SYMBOL_GPL(pm_generic_thaw); /** - * __pm_generic_resume - Generic resume/restore callback for subsystems. - * @dev: Device to handle. - * @event: PM transition of the system under way. - * @bool: Whether or not this is the "noirq" stage. - * - * Execute the resume/resotre callback provided by the @dev's driver, if - * defined. If it returns 0, change the device's runtime PM status to 'active'. - * Return the callback's error code. - */ -static int __pm_generic_resume(struct device *dev, int event, bool noirq) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - int (*callback)(struct device *); - int ret; - - if (!pm) - return 0; - - switch (event) { - case PM_EVENT_RESUME: - callback = noirq ? pm->resume_noirq : pm->resume; - break; - case PM_EVENT_RESTORE: - callback = noirq ? pm->restore_noirq : pm->restore; - break; - default: - callback = NULL; - break; - } - - if (!callback) - return 0; - - ret = callback(dev); - if (!ret && !noirq && pm_runtime_enabled(dev)) { - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - } - - return ret; -} - -/** * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems. * @dev: Device to resume. */ int pm_generic_resume_noirq(struct device *dev) { - return __pm_generic_resume(dev, PM_EVENT_RESUME, true); + return __pm_generic_call(dev, PM_EVENT_RESUME, true); } EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); @@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); */ int pm_generic_resume(struct device *dev) { - return __pm_generic_resume(dev, PM_EVENT_RESUME, false); + return __pm_generic_call(dev, PM_EVENT_RESUME, false); } EXPORT_SYMBOL_GPL(pm_generic_resume); @@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume); */ int pm_generic_restore_noirq(struct device *dev) { - return __pm_generic_resume(dev, PM_EVENT_RESTORE, true); + return __pm_generic_call(dev, PM_EVENT_RESTORE, true); } EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); @@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); */ int pm_generic_restore(struct device *dev) { - return __pm_generic_resume(dev, PM_EVENT_RESTORE, false); + return __pm_generic_call(dev, PM_EVENT_RESTORE, false); } EXPORT_SYMBOL_GPL(pm_generic_restore); @@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev) pm_runtime_idle(dev); } #endif /* CONFIG_PM_SLEEP */ - -struct dev_pm_ops generic_subsys_pm_ops = { -#ifdef CONFIG_PM_SLEEP - .prepare = pm_generic_prepare, - .suspend = pm_generic_suspend, - .suspend_noirq = pm_generic_suspend_noirq, - .resume = pm_generic_resume, - .resume_noirq = pm_generic_resume_noirq, - .freeze = pm_generic_freeze, - .freeze_noirq = pm_generic_freeze_noirq, - .thaw = pm_generic_thaw, - .thaw_noirq = pm_generic_thaw_noirq, - .poweroff = pm_generic_poweroff, - .poweroff_noirq = pm_generic_poweroff_noirq, - .restore = pm_generic_restore, - .restore_noirq = pm_generic_restore_noirq, - .complete = pm_generic_complete, -#endif -#ifdef CONFIG_PM_RUNTIME - .runtime_suspend = pm_generic_runtime_suspend, - .runtime_resume = pm_generic_runtime_resume, - .runtime_idle = pm_generic_runtime_idle, -#endif -}; -EXPORT_SYMBOL_GPL(generic_subsys_pm_ops); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 7fa098464da..e2cc3d2e0ec 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -32,6 +32,8 @@ #include "../base.h" #include "power.h" +typedef int (*pm_callback_t)(struct device *); + /* * The entries in the dpm_list list are in a depth first order, simply * because children are guaranteed to be discovered after parents, and @@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev) ktime_t calltime = ktime_set(0, 0); if (initcall_debug) { - pr_info("calling %s+ @ %i\n", - dev_name(dev), task_pid_nr(current)); + pr_info("calling %s+ @ %i, parent: %s\n", + dev_name(dev), task_pid_nr(current), + dev->parent ? dev_name(dev->parent) : "none"); calltime = ktime_get(); } @@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async) } /** - * pm_op - Execute the PM operation appropriate for given PM event. - * @dev: Device to handle. + * pm_op - Return the PM operation appropriate for given PM event. * @ops: PM operations to choose from. * @state: PM transition of the system being carried out. */ -static int pm_op(struct device *dev, - const struct dev_pm_ops *ops, - pm_message_t state) +static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state) { - int error = 0; - ktime_t calltime; - - calltime = initcall_debug_start(dev); - switch (state.event) { #ifdef CONFIG_SUSPEND case PM_EVENT_SUSPEND: - if (ops->suspend) { - error = ops->suspend(dev); - suspend_report_result(ops->suspend, error); - } - break; + return ops->suspend; case PM_EVENT_RESUME: - if (ops->resume) { - error = ops->resume(dev); - suspend_report_result(ops->resume, error); - } - break; + return ops->resume; #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: - if (ops->freeze) { - error = ops->freeze(dev); - suspend_report_result(ops->freeze, error); - } - break; + return ops->freeze; case PM_EVENT_HIBERNATE: - if (ops->poweroff) { - error = ops->poweroff(dev); - suspend_report_result(ops->poweroff, error); - } - break; + return ops->poweroff; case PM_EVENT_THAW: case PM_EVENT_RECOVER: - if (ops->thaw) { - error = ops->thaw(dev); - suspend_report_result(ops->thaw, error); - } + return ops->thaw; break; case PM_EVENT_RESTORE: - if (ops->restore) { - error = ops->restore(dev); - suspend_report_result(ops->restore, error); - } - break; + return ops->restore; #endif /* CONFIG_HIBERNATE_CALLBACKS */ - default: - error = -EINVAL; } - initcall_debug_report(dev, calltime, error); - - return error; + return NULL; } /** - * pm_noirq_op - Execute the PM operation appropriate for given PM event. - * @dev: Device to handle. + * pm_noirq_op - Return the PM operation appropriate for given PM event. * @ops: PM operations to choose from. * @state: PM transition of the system being carried out. * * The driver of @dev will not receive interrupts while this function is being * executed. */ -static int pm_noirq_op(struct device *dev, - const struct dev_pm_ops *ops, - pm_message_t state) +static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state) { - int error = 0; - ktime_t calltime = ktime_set(0, 0), delta, rettime; - - if (initcall_debug) { - pr_info("calling %s+ @ %i, parent: %s\n", - dev_name(dev), task_pid_nr(current), - dev->parent ? dev_name(dev->parent) : "none"); - calltime = ktime_get(); - } - switch (state.event) { #ifdef CONFIG_SUSPEND case PM_EVENT_SUSPEND: - if (ops->suspend_noirq) { - error = ops->suspend_noirq(dev); - suspend_report_result(ops->suspend_noirq, error); - } - break; + return ops->suspend_noirq; case PM_EVENT_RESUME: - if (ops->resume_noirq) { - error = ops->resume_noirq(dev); - suspend_report_result(ops->resume_noirq, error); - } - break; + return ops->resume_noirq; #endif /* CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS case PM_EVENT_FREEZE: case PM_EVENT_QUIESCE: - if (ops->freeze_noirq) { - error = ops->freeze_noirq(dev); - suspend_report_result(ops->freeze_noirq, error); - } - break; + return ops->freeze_noirq; case PM_EVENT_HIBERNATE: - if (ops->poweroff_noirq) { - error = ops->poweroff_noirq(dev); - suspend_report_result(ops->poweroff_noirq, error); - } - break; + return ops->poweroff_noirq; case PM_EVENT_THAW: case PM_EVENT_RECOVER: - if (ops->thaw_noirq) { - error = ops->thaw_noirq(dev); - suspend_report_result(ops->thaw_noirq, error); - } - break; + return ops->thaw_noirq; case PM_EVENT_RESTORE: - if (ops->restore_noirq) { - error = ops->restore_noirq(dev); - suspend_report_result(ops->restore_noirq, error); - } - break; + return ops->restore_noirq; #endif /* CONFIG_HIBERNATE_CALLBACKS */ - default: - error = -EINVAL; - } - - if (initcall_debug) { - rettime = ktime_get(); - delta = ktime_sub(rettime, calltime); - printk("initcall %s_i+ returned %d after %Ld usecs\n", - dev_name(dev), error, - (unsigned long long)ktime_to_ns(delta) >> 10); } - return error; + return NULL; } static char *pm_verb(int event) @@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); } +static int dpm_run_callback(pm_callback_t cb, struct device *dev, + pm_message_t state, char *info) +{ + ktime_t calltime; + int error; + + if (!cb) + return 0; + + calltime = initcall_debug_start(dev); + + pm_dev_dbg(dev, state, info); + error = cb(dev); + suspend_report_result(cb, error); + + initcall_debug_report(dev, calltime, error); + + return error; +} + /*------------------------- Resume routines -------------------------*/ /** @@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) */ static int device_resume_noirq(struct device *dev, pm_message_t state) { + pm_callback_t callback = NULL; + char *info = NULL; int error = 0; TRACE_DEVICE(dev); TRACE_RESUME(0); if (dev->pm_domain) { - pm_dev_dbg(dev, state, "EARLY power domain "); - error = pm_noirq_op(dev, &dev->pm_domain->ops, state); + info = "EARLY power domain "; + callback = pm_noirq_op(&dev->pm_domain->ops, state); } else if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "EARLY type "); - error = pm_noirq_op(dev, dev->type->pm, state); + info = "EARLY type "; + callback = pm_noirq_op(dev->type->pm, state); } else if (dev->class && dev->class->pm) { - pm_dev_dbg(dev, state, "EARLY class "); - error = pm_noirq_op(dev, dev->class->pm, state); + info = "EARLY class "; + callback = pm_noirq_op(dev->class->pm, state); } else if (dev->bus && dev->bus->pm) { - pm_dev_dbg(dev, state, "EARLY "); - error = pm_noirq_op(dev, dev->bus->pm, state); + info = "EARLY bus "; + callback = pm_noirq_op(dev->bus->pm, state); } + if (!callback && dev->driver && dev->driver->pm) { + info = "EARLY driver "; + callback = pm_noirq_op(dev->driver->pm, state); + } + + error = dpm_run_callback(callback, dev, state, info); + TRACE_RESUME(error); return error; } @@ -486,26 +436,6 @@ void dpm_resume_noirq(pm_message_t state) EXPORT_SYMBOL_GPL(dpm_resume_noirq); /** - * legacy_resume - Execute a legacy (bus or class) resume callback for device. - * @dev: Device to resume. - * @cb: Resume callback to execute. - */ -static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) -{ - int error; - ktime_t calltime; - - calltime = initcall_debug_start(dev); - - error = cb(dev); - suspend_report_result(cb, error); - - initcall_debug_report(dev, calltime, error); - - return error; -} - -/** * device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. @@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) */ static int device_resume(struct device *dev, pm_message_t state, bool async) { + pm_callback_t callback = NULL; + char *info = NULL; int error = 0; bool put = false; @@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) put = true; if (dev->pm_domain) { - pm_dev_dbg(dev, state, "power domain "); - error = pm_op(dev, &dev->pm_domain->ops, state); - goto End; + info = "power domain "; + callback = pm_op(&dev->pm_domain->ops, state); + goto Driver; } if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "type "); - error = pm_op(dev, dev->type->pm, state); - goto End; + info = "type "; + callback = pm_op(dev->type->pm, state); + goto Driver; } if (dev->class) { if (dev->class->pm) { - pm_dev_dbg(dev, state, "class "); - error = pm_op(dev, dev->class->pm, state); - goto End; + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Driver; } else if (dev->class->resume) { - pm_dev_dbg(dev, state, "legacy class "); - error = legacy_resume(dev, dev->class->resume); + info = "legacy class "; + callback = dev->class->resume; goto End; } } if (dev->bus) { if (dev->bus->pm) { - pm_dev_dbg(dev, state, ""); - error = pm_op(dev, dev->bus->pm, state); + info = "bus "; + callback = pm_op(dev->bus->pm, state); } else if (dev->bus->resume) { - pm_dev_dbg(dev, state, "legacy "); - error = legacy_resume(dev, dev->bus->resume); + info = "legacy bus "; + callback = dev->bus->resume; + goto End; } } + Driver: + if (!callback && dev->driver && dev->driver->pm) { + info = "driver "; + callback = pm_op(dev->driver->pm, state); + } + End: + error = dpm_run_callback(callback, dev, state, info); dev->power.is_suspended = false; Unlock: @@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state) */ static void device_complete(struct device *dev, pm_message_t state) { + void (*callback)(struct device *) = NULL; + char *info = NULL; + device_lock(dev); if (dev->pm_domain) { - pm_dev_dbg(dev, state, "completing power domain "); - if (dev->pm_domain->ops.complete) - dev->pm_domain->ops.complete(dev); + info = "completing power domain "; + callback = dev->pm_domain->ops.complete; } else if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "completing type "); - if (dev->type->pm->complete) - dev->type->pm->complete(dev); + info = "completing type "; + callback = dev->type->pm->complete; } else if (dev->class && dev->class->pm) { - pm_dev_dbg(dev, state, "completing class "); - if (dev->class->pm->complete) - dev->class->pm->complete(dev); + info = "completing class "; + callback = dev->class->pm->complete; } else if (dev->bus && dev->bus->pm) { - pm_dev_dbg(dev, state, "completing "); - if (dev->bus->pm->complete) - dev->bus->pm->complete(dev); + info = "completing bus "; + callback = dev->bus->pm->complete; + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "completing driver "; + callback = dev->driver->pm->complete; + } + + if (callback) { + pm_dev_dbg(dev, state, info); + callback(dev); } device_unlock(dev); @@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state) */ static int device_suspend_noirq(struct device *dev, pm_message_t state) { - int error; + pm_callback_t callback = NULL; + char *info = NULL; if (dev->pm_domain) { - pm_dev_dbg(dev, state, "LATE power domain "); - error = pm_noirq_op(dev, &dev->pm_domain->ops, state); - if (error) - return error; + info = "LATE power domain "; + callback = pm_noirq_op(&dev->pm_domain->ops, state); } else if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "LATE type "); - error = pm_noirq_op(dev, dev->type->pm, state); - if (error) - return error; + info = "LATE type "; + callback = pm_noirq_op(dev->type->pm, state); } else if (dev->class && dev->class->pm) { - pm_dev_dbg(dev, state, "LATE class "); - error = pm_noirq_op(dev, dev->class->pm, state); - if (error) - return error; + info = "LATE class "; + callback = pm_noirq_op(dev->class->pm, state); } else if (dev->bus && dev->bus->pm) { - pm_dev_dbg(dev, state, "LATE "); - error = pm_noirq_op(dev, dev->bus->pm, state); - if (error) - return error; + info = "LATE bus "; + callback = pm_noirq_op(dev->bus->pm, state); } - return 0; + if (!callback && dev->driver && dev->driver->pm) { + info = "LATE driver "; + callback = pm_noirq_op(dev->driver->pm, state); + } + + return dpm_run_callback(callback, dev, state, info); } /** @@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state, */ static int __device_suspend(struct device *dev, pm_message_t state, bool async) { + pm_callback_t callback = NULL; + char *info = NULL; int error = 0; dpm_wait_for_children(dev, async); @@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) device_lock(dev); if (dev->pm_domain) { - pm_dev_dbg(dev, state, "power domain "); - error = pm_op(dev, &dev->pm_domain->ops, state); - goto End; + info = "power domain "; + callback = pm_op(&dev->pm_domain->ops, state); + goto Run; } if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "type "); - error = pm_op(dev, dev->type->pm, state); - goto End; + info = "type "; + callback = pm_op(dev->type->pm, state); + goto Run; } if (dev->class) { if (dev->class->pm) { - pm_dev_dbg(dev, state, "class "); - error = pm_op(dev, dev->class->pm, state); - goto End; + info = "class "; + callback = pm_op(dev->class->pm, state); + goto Run; } else if (dev->class->suspend) { pm_dev_dbg(dev, state, "legacy class "); error = legacy_suspend(dev, state, dev->class->suspend); @@ -909,18 +858,28 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->bus) { if (dev->bus->pm) { - pm_dev_dbg(dev, state, ""); - error = pm_op(dev, dev->bus->pm, state); + info = "bus "; + callback = pm_op(dev->bus->pm, state); } else if (dev->bus->suspend) { - pm_dev_dbg(dev, state, "legacy "); + pm_dev_dbg(dev, state, "legacy bus "); error = legacy_suspend(dev, state, dev->bus->suspend); + goto End; } } + Run: + if (!callback && dev->driver && dev->driver->pm) { + info = "driver "; + callback = pm_op(dev->driver->pm, state); + } + + error = dpm_run_callback(callback, dev, state, info); + End: if (!error) { dev->power.is_suspended = true; - if (dev->power.wakeup_path && dev->parent) + if (dev->power.wakeup_path + && dev->parent && !dev->parent->power.ignore_children) dev->parent->power.wakeup_path = true; } @@ -1021,6 +980,8 @@ int dpm_suspend(pm_message_t state) */ static int device_prepare(struct device *dev, pm_message_t state) { + int (*callback)(struct device *) = NULL; + char *info = NULL; int error = 0; device_lock(dev); @@ -1028,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state) dev->power.wakeup_path = device_may_wakeup(dev); if (dev->pm_domain) { - pm_dev_dbg(dev, state, "preparing power domain "); - if (dev->pm_domain->ops.prepare) - error = dev->pm_domain->ops.prepare(dev); - suspend_report_result(dev->pm_domain->ops.prepare, error); - if (error) - goto End; + info = "preparing power domain "; + callback = dev->pm_domain->ops.prepare; } else if (dev->type && dev->type->pm) { - pm_dev_dbg(dev, state, "preparing type "); - if (dev->type->pm->prepare) - error = dev->type->pm->prepare(dev); - suspend_report_result(dev->type->pm->prepare, error); - if (error) - goto End; + info = "preparing type "; + callback = dev->type->pm->prepare; } else if (dev->class && dev->class->pm) { - pm_dev_dbg(dev, state, "preparing class "); - if (dev->class->pm->prepare) - error = dev->class->pm->prepare(dev); - suspend_report_result(dev->class->pm->prepare, error); - if (error) - goto End; + info = "preparing class "; + callback = dev->class->pm->prepare; } else if (dev->bus && dev->bus->pm) { - pm_dev_dbg(dev, state, "preparing "); - if (dev->bus->pm->prepare) - error = dev->bus->pm->prepare(dev); - suspend_report_result(dev->bus->pm->prepare, error); + info = "preparing bus "; + callback = dev->bus->pm->prepare; + } + + if (!callback && dev->driver && dev->driver->pm) { + info = "preparing driver "; + callback = dev->driver->pm->prepare; + } + + if (callback) { + error = callback(dev); + suspend_report_result(callback, error); } - End: device_unlock(dev); return error; diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 434a6c01167..95706fa24c7 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -669,7 +669,7 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev) struct device_opp *dev_opp = find_device_opp(dev); if (IS_ERR(dev_opp)) - return ERR_PTR(PTR_ERR(dev_opp)); /* matching type */ + return ERR_CAST(dev_opp); /* matching type */ return &dev_opp->head; } diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 30a94eadc20..c5d35883746 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -47,21 +47,29 @@ static DEFINE_MUTEX(dev_pm_qos_mtx); static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); /** - * dev_pm_qos_read_value - Get PM QoS constraint for a given device. + * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. + * @dev: Device to get the PM QoS constraint value for. + * + * This routine must be called with dev->power.lock held. + */ +s32 __dev_pm_qos_read_value(struct device *dev) +{ + struct pm_qos_constraints *c = dev->power.constraints; + + return c ? pm_qos_read_value(c) : 0; +} + +/** + * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked). * @dev: Device to get the PM QoS constraint value for. */ s32 dev_pm_qos_read_value(struct device *dev) { - struct pm_qos_constraints *c; unsigned long flags; - s32 ret = 0; + s32 ret; spin_lock_irqsave(&dev->power.lock, flags); - - c = dev->power.constraints; - if (c) - ret = pm_qos_read_value(c); - + ret = __dev_pm_qos_read_value(dev); spin_unlock_irqrestore(&dev->power.lock, flags); return ret; @@ -212,11 +220,9 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, if (!dev || !req) /*guard against callers passing in null */ return -EINVAL; - if (dev_pm_qos_request_active(req)) { - WARN(1, KERN_ERR "dev_pm_qos_add_request() called for already " - "added request\n"); + if (WARN(dev_pm_qos_request_active(req), + "%s() called for already added request\n", __func__)) return -EINVAL; - } req->dev = dev; @@ -271,11 +277,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, if (!req) /*guard against callers passing in null */ return -EINVAL; - if (!dev_pm_qos_request_active(req)) { - WARN(1, KERN_ERR "dev_pm_qos_update_request() called for " - "unknown object\n"); + if (WARN(!dev_pm_qos_request_active(req), + "%s() called for unknown object\n", __func__)) return -EINVAL; - } mutex_lock(&dev_pm_qos_mtx); @@ -312,11 +316,9 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) if (!req) /*guard against callers passing in null */ return -EINVAL; - if (!dev_pm_qos_request_active(req)) { - WARN(1, KERN_ERR "dev_pm_qos_remove_request() called for " - "unknown object\n"); + if (WARN(!dev_pm_qos_request_active(req), + "%s() called for unknown object\n", __func__)) return -EINVAL; - } mutex_lock(&dev_pm_qos_mtx); @@ -418,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); } EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); + +/** + * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor. + * @dev: Device whose ancestor to add the request for. + * @req: Pointer to the preallocated handle. + * @value: Constraint latency value. + */ +int dev_pm_qos_add_ancestor_request(struct device *dev, + struct dev_pm_qos_request *req, s32 value) +{ + struct device *ancestor = dev->parent; + int error = -ENODEV; + + while (ancestor && !ancestor->power.ignore_children) + ancestor = ancestor->parent; + + if (ancestor) + error = dev_pm_qos_add_request(ancestor, req, value); + + if (error) + req->dev = NULL; + + return error; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 8c78443bca8..541f821d4ea 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags) else callback = NULL; + if (!callback && dev->driver && dev->driver->pm) + callback = dev->driver->pm->runtime_idle; + if (callback) __rpm_callback(callback, dev); @@ -279,6 +282,47 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) return retval != -EACCES ? retval : -EIO; } +struct rpm_qos_data { + ktime_t time_now; + s64 constraint_ns; +}; + +/** + * rpm_update_qos_constraint - Update a given PM QoS constraint data. + * @dev: Device whose timing data to use. + * @data: PM QoS constraint data to update. + * + * Use the suspend timing data of @dev to update PM QoS constraint data pointed + * to by @data. + */ +static int rpm_update_qos_constraint(struct device *dev, void *data) +{ + struct rpm_qos_data *qos = data; + unsigned long flags; + s64 delta_ns; + int ret = 0; + + spin_lock_irqsave(&dev->power.lock, flags); + + if (dev->power.max_time_suspended_ns < 0) + goto out; + + delta_ns = dev->power.max_time_suspended_ns - + ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time)); + if (delta_ns <= 0) { + ret = -EBUSY; + goto out; + } + + if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0) + qos->constraint_ns = delta_ns; + + out: + spin_unlock_irqrestore(&dev->power.lock, flags); + + return ret; +} + /** * rpm_suspend - Carry out runtime suspend of given device. * @dev: Device to suspend. @@ -305,6 +349,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) { int (*callback)(struct device *); struct device *parent = NULL; + struct rpm_qos_data qos; int retval; trace_rpm_suspend(dev, rpmflags); @@ -400,8 +445,38 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto out; } + qos.constraint_ns = __dev_pm_qos_read_value(dev); + if (qos.constraint_ns < 0) { + /* Negative constraint means "never suspend". */ + retval = -EPERM; + goto out; + } + qos.constraint_ns *= NSEC_PER_USEC; + qos.time_now = ktime_get(); + __update_runtime_status(dev, RPM_SUSPENDING); + if (!dev->power.ignore_children) { + if (dev->power.irq_safe) + spin_unlock(&dev->power.lock); + else + spin_unlock_irq(&dev->power.lock); + + retval = device_for_each_child(dev, &qos, + rpm_update_qos_constraint); + + if (dev->power.irq_safe) + spin_lock(&dev->power.lock); + else + spin_lock_irq(&dev->power.lock); + + if (retval) + goto fail; + } + + dev->power.suspend_time = qos.time_now; + dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1; + if (dev->pm_domain) callback = dev->pm_domain->ops.runtime_suspend; else if (dev->type && dev->type->pm) @@ -413,28 +488,13 @@ static int rpm_suspend(struct device *dev, int rpmflags) else callback = NULL; + if (!callback && dev->driver && dev->driver->pm) + callback = dev->driver->pm->runtime_suspend; + retval = rpm_callback(callback, dev); - if (retval) { - __update_runtime_status(dev, RPM_ACTIVE); - dev->power.deferred_resume = false; - if (retval == -EAGAIN || retval == -EBUSY) { - dev->power.runtime_error = 0; + if (retval) + goto fail; - /* - * If the callback routine failed an autosuspend, and - * if the last_busy time has been updated so that there - * is a new autosuspend expiration time, automatically - * reschedule another autosuspend. - */ - if ((rpmflags & RPM_AUTO) && - pm_runtime_autosuspend_expiration(dev) != 0) - goto repeat; - } else { - pm_runtime_cancel_pending(dev); - } - wake_up_all(&dev->power.wait_queue); - goto out; - } no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -466,6 +526,29 @@ static int rpm_suspend(struct device *dev, int rpmflags) trace_rpm_return_int(dev, _THIS_IP_, retval); return retval; + + fail: + __update_runtime_status(dev, RPM_ACTIVE); + dev->power.suspend_time = ktime_set(0, 0); + dev->power.max_time_suspended_ns = -1; + dev->power.deferred_resume = false; + if (retval == -EAGAIN || retval == -EBUSY) { + dev->power.runtime_error = 0; + + /* + * If the callback routine failed an autosuspend, and + * if the last_busy time has been updated so that there + * is a new autosuspend expiration time, automatically + * reschedule another autosuspend. + */ + if ((rpmflags & RPM_AUTO) && + pm_runtime_autosuspend_expiration(dev) != 0) + goto repeat; + } else { + pm_runtime_cancel_pending(dev); + } + wake_up_all(&dev->power.wait_queue); + goto out; } /** @@ -620,6 +703,9 @@ static int rpm_resume(struct device *dev, int rpmflags) if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ + dev->power.suspend_time = ktime_set(0, 0); + dev->power.max_time_suspended_ns = -1; + __update_runtime_status(dev, RPM_RESUMING); if (dev->pm_domain) @@ -633,6 +719,9 @@ static int rpm_resume(struct device *dev, int rpmflags) else callback = NULL; + if (!callback && dev->driver && dev->driver->pm) + callback = dev->driver->pm->runtime_resume; + retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); @@ -1279,6 +1368,9 @@ void pm_runtime_init(struct device *dev) setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, (unsigned long)dev); + dev->power.suspend_time = ktime_set(0, 0); + dev->power.max_time_suspended_ns = -1; + init_waitqueue_head(&dev->power.wait_queue); } @@ -1296,3 +1388,28 @@ void pm_runtime_remove(struct device *dev) if (dev->power.irq_safe && dev->parent) pm_runtime_put_sync(dev->parent); } + +/** + * pm_runtime_update_max_time_suspended - Update device's suspend time data. + * @dev: Device to handle. + * @delta_ns: Value to subtract from the device's max_time_suspended_ns field. + * + * Update the device's power.max_time_suspended_ns field by subtracting + * @delta_ns from it. The resulting value of power.max_time_suspended_ns is + * never negative. + */ +void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->power.lock, flags); + + if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) { + if (dev->power.max_time_suspended_ns > delta_ns) + dev->power.max_time_suspended_ns -= delta_ns; + else + dev->power.max_time_suspended_ns = 0; + } + + spin_unlock_irqrestore(&dev->power.lock, flags); +} diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 2fc6a66f39a..0f6c7fb418e 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -13,3 +13,6 @@ config REGMAP_I2C config REGMAP_SPI tristate + +config REGMAP_IRQ + bool diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index 0573c8a9dac..defd57963c8 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -1,4 +1,6 @@ -obj-$(CONFIG_REGMAP) += regmap.o regcache.o regcache-indexed.o regcache-rbtree.o regcache-lzo.o +obj-$(CONFIG_REGMAP) += regmap.o regcache.o +obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o +obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 348ff02eb93..1a02b7537c8 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -74,6 +74,7 @@ struct regmap { struct reg_default *reg_defaults; const void *reg_defaults_raw; void *cache; + bool cache_dirty; }; struct regcache_ops { @@ -105,7 +106,7 @@ static inline void regmap_debugfs_exit(struct regmap *map) { } #endif /* regcache core declarations */ -int regcache_init(struct regmap *map); +int regcache_init(struct regmap *map, const struct regmap_config *config); void regcache_exit(struct regmap *map); int regcache_read(struct regmap *map, unsigned int reg, unsigned int *value); @@ -118,10 +119,7 @@ unsigned int regcache_get_val(const void *base, unsigned int idx, bool regcache_set_val(void *base, unsigned int idx, unsigned int val, unsigned int word_size); int regcache_lookup_reg(struct regmap *map, unsigned int reg); -int regcache_insert_reg(struct regmap *map, unsigned int reg, - unsigned int val); -extern struct regcache_ops regcache_indexed_ops; extern struct regcache_ops regcache_rbtree_ops; extern struct regcache_ops regcache_lzo_ops; diff --git a/drivers/base/regmap/regcache-indexed.c b/drivers/base/regmap/regcache-indexed.c deleted file mode 100644 index 507731ad8ec..00000000000 --- a/drivers/base/regmap/regcache-indexed.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Register cache access API - indexed caching support - * - * Copyright 2011 Wolfson Microelectronics plc - * - * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/slab.h> - -#include "internal.h" - -static int regcache_indexed_read(struct regmap *map, unsigned int reg, - unsigned int *value) -{ - int ret; - - ret = regcache_lookup_reg(map, reg); - if (ret >= 0) - *value = map->reg_defaults[ret].def; - - return ret; -} - -static int regcache_indexed_write(struct regmap *map, unsigned int reg, - unsigned int value) -{ - int ret; - - ret = regcache_lookup_reg(map, reg); - if (ret < 0) - return regcache_insert_reg(map, reg, value); - map->reg_defaults[ret].def = value; - return 0; -} - -static int regcache_indexed_sync(struct regmap *map) -{ - unsigned int i; - int ret; - - for (i = 0; i < map->num_reg_defaults; i++) { - ret = _regmap_write(map, map->reg_defaults[i].reg, - map->reg_defaults[i].def); - if (ret < 0) - return ret; - dev_dbg(map->dev, "Synced register %#x, value %#x\n", - map->reg_defaults[i].reg, - map->reg_defaults[i].def); - } - return 0; -} - -struct regcache_ops regcache_indexed_ops = { - .type = REGCACHE_INDEXED, - .name = "indexed", - .read = regcache_indexed_read, - .write = regcache_indexed_write, - .sync = regcache_indexed_sync -}; diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c index 066aeece362..b7d16143ede 100644 --- a/drivers/base/regmap/regcache-lzo.c +++ b/drivers/base/regmap/regcache-lzo.c @@ -15,6 +15,8 @@ #include "internal.h" +static int regcache_lzo_exit(struct regmap *map); + struct regcache_lzo_ctx { void *wmem; void *dst; @@ -27,7 +29,7 @@ struct regcache_lzo_ctx { }; #define LZO_BLOCK_NUM 8 -static int regcache_lzo_block_count(void) +static int regcache_lzo_block_count(struct regmap *map) { return LZO_BLOCK_NUM; } @@ -106,19 +108,22 @@ static inline int regcache_lzo_get_blkindex(struct regmap *map, unsigned int reg) { return (reg * map->cache_word_size) / - DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()); + DIV_ROUND_UP(map->cache_size_raw, + regcache_lzo_block_count(map)); } static inline int regcache_lzo_get_blkpos(struct regmap *map, unsigned int reg) { - return reg % (DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()) / + return reg % (DIV_ROUND_UP(map->cache_size_raw, + regcache_lzo_block_count(map)) / map->cache_word_size); } static inline int regcache_lzo_get_blksize(struct regmap *map) { - return DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()); + return DIV_ROUND_UP(map->cache_size_raw, + regcache_lzo_block_count(map)); } static int regcache_lzo_init(struct regmap *map) @@ -131,7 +136,7 @@ static int regcache_lzo_init(struct regmap *map) ret = 0; - blkcount = regcache_lzo_block_count(); + blkcount = regcache_lzo_block_count(map); map->cache = kzalloc(blkcount * sizeof *lzo_blocks, GFP_KERNEL); if (!map->cache) @@ -190,7 +195,7 @@ static int regcache_lzo_init(struct regmap *map) return 0; err: - regcache_exit(map); + regcache_lzo_exit(map); return ret; } @@ -203,7 +208,7 @@ static int regcache_lzo_exit(struct regmap *map) if (!lzo_blocks) return 0; - blkcount = regcache_lzo_block_count(); + blkcount = regcache_lzo_block_count(map); /* * the pointer to the bitmap used for syncing the cache * is shared amongst all lzo_blocks. Ensure it is freed @@ -351,7 +356,7 @@ static int regcache_lzo_sync(struct regmap *map) } struct regcache_ops regcache_lzo_ops = { - .type = REGCACHE_LZO, + .type = REGCACHE_COMPRESSED, .name = "lzo", .init = regcache_lzo_init, .exit = regcache_lzo_exit, diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index e31498499b0..32620c4f168 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c @@ -11,12 +11,15 @@ */ #include <linux/slab.h> +#include <linux/debugfs.h> #include <linux/rbtree.h> +#include <linux/seq_file.h> #include "internal.h" static int regcache_rbtree_write(struct regmap *map, unsigned int reg, unsigned int value); +static int regcache_rbtree_exit(struct regmap *map); struct regcache_rbtree_node { /* the actual rbtree node holding this block */ @@ -124,6 +127,60 @@ static int regcache_rbtree_insert(struct rb_root *root, return 1; } +#ifdef CONFIG_DEBUG_FS +static int rbtree_show(struct seq_file *s, void *ignored) +{ + struct regmap *map = s->private; + struct regcache_rbtree_ctx *rbtree_ctx = map->cache; + struct regcache_rbtree_node *n; + struct rb_node *node; + unsigned int base, top; + int nodes = 0; + int registers = 0; + + mutex_lock(&map->lock); + + for (node = rb_first(&rbtree_ctx->root); node != NULL; + node = rb_next(node)) { + n = container_of(node, struct regcache_rbtree_node, node); + + regcache_rbtree_get_base_top_reg(n, &base, &top); + seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1); + + nodes++; + registers += top - base + 1; + } + + seq_printf(s, "%d nodes, %d registers, average %d registers\n", + nodes, registers, registers / nodes); + + mutex_unlock(&map->lock); + + return 0; +} + +static int rbtree_open(struct inode *inode, struct file *file) +{ + return single_open(file, rbtree_show, inode->i_private); +} + +static const struct file_operations rbtree_fops = { + .open = rbtree_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void rbtree_debugfs_init(struct regmap *map) +{ + debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); +} +#else +static void rbtree_debugfs_init(struct regmap *map) +{ +} +#endif + static int regcache_rbtree_init(struct regmap *map) { struct regcache_rbtree_ctx *rbtree_ctx; @@ -146,10 +203,12 @@ static int regcache_rbtree_init(struct regmap *map) goto err; } + rbtree_debugfs_init(map); + return 0; err: - regcache_exit(map); + regcache_rbtree_exit(map); return ret; } diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 666f6f5011d..1ead66186b7 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -19,7 +19,6 @@ #include "internal.h" static const struct regcache_ops *cache_types[] = { - ®cache_indexed_ops, ®cache_rbtree_ops, ®cache_lzo_ops, }; @@ -61,8 +60,10 @@ static int regcache_hw_init(struct regmap *map) map->reg_defaults = kmalloc(count * sizeof(struct reg_default), GFP_KERNEL); - if (!map->reg_defaults) - return -ENOMEM; + if (!map->reg_defaults) { + ret = -ENOMEM; + goto err_free; + } /* fill the reg_defaults */ map->num_reg_defaults = count; @@ -77,9 +78,15 @@ static int regcache_hw_init(struct regmap *map) } return 0; + +err_free: + if (map->cache_free) + kfree(map->reg_defaults_raw); + + return ret; } -int regcache_init(struct regmap *map) +int regcache_init(struct regmap *map, const struct regmap_config *config) { int ret; int i; @@ -100,6 +107,12 @@ int regcache_init(struct regmap *map) return -EINVAL; } + map->num_reg_defaults = config->num_reg_defaults; + map->num_reg_defaults_raw = config->num_reg_defaults_raw; + map->reg_defaults_raw = config->reg_defaults_raw; + map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); + map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; + map->cache = NULL; map->cache_ops = cache_types[i]; @@ -112,10 +125,10 @@ int regcache_init(struct regmap *map) * won't vanish from under us. We'll need to make * a copy of it. */ - if (map->reg_defaults) { + if (config->reg_defaults) { if (!map->num_reg_defaults) return -EINVAL; - tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults * + tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * sizeof(struct reg_default), GFP_KERNEL); if (!tmp_buf) return -ENOMEM; @@ -136,9 +149,18 @@ int regcache_init(struct regmap *map) if (map->cache_ops->init) { dev_dbg(map->dev, "Initializing %s cache\n", map->cache_ops->name); - return map->cache_ops->init(map); + ret = map->cache_ops->init(map); + if (ret) + goto err_free; } return 0; + +err_free: + kfree(map->reg_defaults); + if (map->cache_free) + kfree(map->reg_defaults_raw); + + return ret; } void regcache_exit(struct regmap *map) @@ -171,16 +193,21 @@ void regcache_exit(struct regmap *map) int regcache_read(struct regmap *map, unsigned int reg, unsigned int *value) { + int ret; + if (map->cache_type == REGCACHE_NONE) return -ENOSYS; BUG_ON(!map->cache_ops); - if (!regmap_readable(map, reg)) - return -EIO; + if (!regmap_volatile(map, reg)) { + ret = map->cache_ops->read(map, reg, value); - if (!regmap_volatile(map, reg)) - return map->cache_ops->read(map, reg, value); + if (ret == 0) + trace_regmap_reg_read_cache(map->dev, reg, *value); + + return ret; + } return -EINVAL; } @@ -241,6 +268,8 @@ int regcache_sync(struct regmap *map) map->cache_ops->name); name = map->cache_ops->name; trace_regcache_sync(map->dev, name, "start"); + if (!map->cache_dirty) + goto out; if (map->cache_ops->sync) { ret = map->cache_ops->sync(map); } else { @@ -291,6 +320,23 @@ void regcache_cache_only(struct regmap *map, bool enable) EXPORT_SYMBOL_GPL(regcache_cache_only); /** + * regcache_mark_dirty: Mark the register cache as dirty + * + * @map: map to mark + * + * Mark the register cache as dirty, for example due to the device + * having been powered down for suspend. If the cache is not marked + * as dirty then the cache sync will be suppressed. + */ +void regcache_mark_dirty(struct regmap *map) +{ + mutex_lock(&map->lock); + map->cache_dirty = true; + mutex_unlock(&map->lock); +} +EXPORT_SYMBOL_GPL(regcache_mark_dirty); + +/** * regcache_cache_bypass: Put a register map into cache bypass mode * * @map: map to configure @@ -381,22 +427,3 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg) else return -ENOENT; } - -int regcache_insert_reg(struct regmap *map, unsigned int reg, - unsigned int val) -{ - void *tmp; - - tmp = krealloc(map->reg_defaults, - (map->num_reg_defaults + 1) * sizeof(struct reg_default), - GFP_KERNEL); - if (!tmp) - return -ENOMEM; - map->reg_defaults = tmp; - map->num_reg_defaults++; - map->reg_defaults[map->num_reg_defaults - 1].reg = reg; - map->reg_defaults[map->num_reg_defaults - 1].def = val; - sort(map->reg_defaults, map->num_reg_defaults, - sizeof(struct reg_default), regcache_default_cmp, NULL); - return 0; -} diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c new file mode 100644 index 00000000000..428836fc583 --- /dev/null +++ b/drivers/base/regmap/regmap-irq.c @@ -0,0 +1,302 @@ +/* + * regmap based irq_chip + * + * Copyright 2011 Wolfson Microelectronics plc + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/export.h> +#include <linux/regmap.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/slab.h> + +#include "internal.h" + +struct regmap_irq_chip_data { + struct mutex lock; + + struct regmap *map; + struct regmap_irq_chip *chip; + + int irq_base; + + void *status_reg_buf; + unsigned int *status_buf; + unsigned int *mask_buf; + unsigned int *mask_buf_def; +}; + +static inline const +struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data, + int irq) +{ + return &data->chip->irqs[irq - data->irq_base]; +} + +static void regmap_irq_lock(struct irq_data *data) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + + mutex_lock(&d->lock); +} + +static void regmap_irq_sync_unlock(struct irq_data *data) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + int i, ret; + + /* + * If there's been a change in the mask write it back to the + * hardware. We rely on the use of the regmap core cache to + * suppress pointless writes. + */ + for (i = 0; i < d->chip->num_regs; i++) { + ret = regmap_update_bits(d->map, d->chip->mask_base + i, + d->mask_buf_def[i], d->mask_buf[i]); + if (ret != 0) + dev_err(d->map->dev, "Failed to sync masks in %x\n", + d->chip->mask_base + i); + } + + mutex_unlock(&d->lock); +} + +static void regmap_irq_enable(struct irq_data *data) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq); + + d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask; +} + +static void regmap_irq_disable(struct irq_data *data) +{ + struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); + const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq); + + d->mask_buf[irq_data->reg_offset] |= irq_data->mask; +} + +static struct irq_chip regmap_irq_chip = { + .name = "regmap", + .irq_bus_lock = regmap_irq_lock, + .irq_bus_sync_unlock = regmap_irq_sync_unlock, + .irq_disable = regmap_irq_disable, + .irq_enable = regmap_irq_enable, +}; + +static irqreturn_t regmap_irq_thread(int irq, void *d) +{ + struct regmap_irq_chip_data *data = d; + struct regmap_irq_chip *chip = data->chip; + struct regmap *map = data->map; + int ret, i; + u8 *buf8 = data->status_reg_buf; + u16 *buf16 = data->status_reg_buf; + u32 *buf32 = data->status_reg_buf; + bool handled = false; + + ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf, + chip->num_regs); + if (ret != 0) { + dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); + return IRQ_NONE; + } + + /* + * Ignore masked IRQs and ack if we need to; we ack early so + * there is no race between handling and acknowleding the + * interrupt. We assume that typically few of the interrupts + * will fire simultaneously so don't worry about overhead from + * doing a write per register. + */ + for (i = 0; i < data->chip->num_regs; i++) { + switch (map->format.val_bytes) { + case 1: + data->status_buf[i] = buf8[i]; + break; + case 2: + data->status_buf[i] = buf16[i]; + break; + case 4: + data->status_buf[i] = buf32[i]; + break; + default: + BUG(); + return IRQ_NONE; + } + + data->status_buf[i] &= ~data->mask_buf[i]; + + if (data->status_buf[i] && chip->ack_base) { + ret = regmap_write(map, chip->ack_base + i, + data->status_buf[i]); + if (ret != 0) + dev_err(map->dev, "Failed to ack 0x%x: %d\n", + chip->ack_base + i, ret); + } + } + + for (i = 0; i < chip->num_irqs; i++) { + if (data->status_buf[chip->irqs[i].reg_offset] & + chip->irqs[i].mask) { + handle_nested_irq(data->irq_base + i); + handled = true; + } + } + + if (handled) + return IRQ_HANDLED; + else + return IRQ_NONE; +} + +/** + * regmap_add_irq_chip(): Use standard regmap IRQ controller handling + * + * map: The regmap for the device. + * irq: The IRQ the device uses to signal interrupts + * irq_flags: The IRQF_ flags to use for the primary interrupt. + * chip: Configuration for the interrupt controller. + * data: Runtime data structure for the controller, allocated on success + * + * Returns 0 on success or an errno on failure. + * + * In order for this to be efficient the chip really should use a + * register cache. The chip driver is responsible for restoring the + * register values used by the IRQ controller over suspend and resume. + */ +int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, + int irq_base, struct regmap_irq_chip *chip, + struct regmap_irq_chip_data **data) +{ + struct regmap_irq_chip_data *d; + int cur_irq, i; + int ret = -ENOMEM; + + irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0); + if (irq_base < 0) { + dev_warn(map->dev, "Failed to allocate IRQs: %d\n", + irq_base); + return irq_base; + } + + d = kzalloc(sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, + GFP_KERNEL); + if (!d->status_buf) + goto err_alloc; + + d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs, + GFP_KERNEL); + if (!d->status_reg_buf) + goto err_alloc; + + d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs, + GFP_KERNEL); + if (!d->mask_buf) + goto err_alloc; + + d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs, + GFP_KERNEL); + if (!d->mask_buf_def) + goto err_alloc; + + d->map = map; + d->chip = chip; + d->irq_base = irq_base; + mutex_init(&d->lock); + + for (i = 0; i < chip->num_irqs; i++) + d->mask_buf_def[chip->irqs[i].reg_offset] + |= chip->irqs[i].mask; + + /* Mask all the interrupts by default */ + for (i = 0; i < chip->num_regs; i++) { + d->mask_buf[i] = d->mask_buf_def[i]; + ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]); + if (ret != 0) { + dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", + chip->mask_base + i, ret); + goto err_alloc; + } + } + + /* Register them with genirq */ + for (cur_irq = irq_base; + cur_irq < chip->num_irqs + irq_base; + cur_irq++) { + irq_set_chip_data(cur_irq, d); + irq_set_chip_and_handler(cur_irq, ®map_irq_chip, + handle_edge_irq); + irq_set_nested_thread(cur_irq, 1); + + /* ARM needs us to explicitly flag the IRQ as valid + * and will set them noprobe when we do so. */ +#ifdef CONFIG_ARM + set_irq_flags(cur_irq, IRQF_VALID); +#else + irq_set_noprobe(cur_irq); +#endif + } + + ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, + chip->name, d); + if (ret != 0) { + dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret); + goto err_alloc; + } + + return 0; + +err_alloc: + kfree(d->mask_buf_def); + kfree(d->mask_buf); + kfree(d->status_reg_buf); + kfree(d->status_buf); + kfree(d); + return ret; +} +EXPORT_SYMBOL_GPL(regmap_add_irq_chip); + +/** + * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip + * + * @irq: Primary IRQ for the device + * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip() + */ +void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d) +{ + if (!d) + return; + + free_irq(irq, d); + kfree(d->mask_buf_def); + kfree(d->mask_buf); + kfree(d->status_reg_buf); + kfree(d->status_buf); + kfree(d); +} +EXPORT_SYMBOL_GPL(regmap_del_irq_chip); + +/** + * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip + * + * Useful for drivers to request their own IRQs. + * + * @data: regmap_irq controller to operate on. + */ +int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data) +{ + return data->irq_base; +} +EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base); diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index bf441db1ee9..be10a4ff660 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -64,6 +64,18 @@ bool regmap_precious(struct regmap *map, unsigned int reg) return false; } +static bool regmap_volatile_range(struct regmap *map, unsigned int reg, + unsigned int num) +{ + unsigned int i; + + for (i = 0; i < num; i++) + if (!regmap_volatile(map, reg + i)) + return false; + + return true; +} + static void regmap_format_4_12_write(struct regmap *map, unsigned int reg, unsigned int val) { @@ -78,6 +90,16 @@ static void regmap_format_7_9_write(struct regmap *map, *out = cpu_to_be16((reg << 9) | val); } +static void regmap_format_10_14_write(struct regmap *map, + unsigned int reg, unsigned int val) +{ + u8 *out = map->work_buf; + + out[2] = val; + out[1] = (val >> 8) | (reg << 6); + out[0] = reg >> 2; +} + static void regmap_format_8(void *buf, unsigned int val) { u8 *b = buf; @@ -127,7 +149,7 @@ struct regmap *regmap_init(struct device *dev, int ret = -EINVAL; if (!bus || !config) - return NULL; + goto err; map = kzalloc(sizeof(*map), GFP_KERNEL); if (map == NULL) { @@ -147,12 +169,6 @@ struct regmap *regmap_init(struct device *dev, map->volatile_reg = config->volatile_reg; map->precious_reg = config->precious_reg; map->cache_type = config->cache_type; - map->reg_defaults = config->reg_defaults; - map->num_reg_defaults = config->num_reg_defaults; - map->num_reg_defaults_raw = config->num_reg_defaults_raw; - map->reg_defaults_raw = config->reg_defaults_raw; - map->cache_size_raw = (config->val_bits / 8) * config->num_reg_defaults_raw; - map->cache_word_size = config->val_bits / 8; if (config->read_flag_mask || config->write_flag_mask) { map->read_flag_mask = config->read_flag_mask; @@ -182,6 +198,16 @@ struct regmap *regmap_init(struct device *dev, } break; + case 10: + switch (config->val_bits) { + case 14: + map->format.format_write = regmap_format_10_14_write; + break; + default: + goto err_map; + } + break; + case 8: map->format.format_reg = regmap_format_8; break; @@ -215,14 +241,16 @@ struct regmap *regmap_init(struct device *dev, goto err_map; } - ret = regcache_init(map); - if (ret < 0) - goto err_map; - regmap_debugfs_init(map); + ret = regcache_init(map, config); + if (ret < 0) + goto err_free_workbuf; + return map; +err_free_workbuf: + kfree(map->work_buf); err_map: kfree(map); err: @@ -231,6 +259,39 @@ err: EXPORT_SYMBOL_GPL(regmap_init); /** + * regmap_reinit_cache(): Reinitialise the current register cache + * + * @map: Register map to operate on. + * @config: New configuration. Only the cache data will be used. + * + * Discard any existing register cache for the map and initialize a + * new cache. This can be used to restore the cache to defaults or to + * update the cache configuration to reflect runtime discovery of the + * hardware. + */ +int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) +{ + int ret; + + mutex_lock(&map->lock); + + regcache_exit(map); + + map->max_register = config->max_register; + map->writeable_reg = config->writeable_reg; + map->readable_reg = config->readable_reg; + map->volatile_reg = config->volatile_reg; + map->precious_reg = config->precious_reg; + map->cache_type = config->cache_type; + + ret = regcache_init(map, config); + + mutex_unlock(&map->lock); + + return ret; +} + +/** * regmap_exit(): Free a previously allocated register map */ void regmap_exit(struct regmap *map) @@ -306,8 +367,10 @@ int _regmap_write(struct regmap *map, unsigned int reg, ret = regcache_write(map, reg, val); if (ret != 0) return ret; - if (map->cache_only) + if (map->cache_only) { + map->cache_dirty = true; return 0; + } } trace_regmap_reg_write(map->dev, reg, val); @@ -375,9 +438,11 @@ EXPORT_SYMBOL_GPL(regmap_write); int regmap_raw_write(struct regmap *map, unsigned int reg, const void *val, size_t val_len) { + size_t val_count = val_len / map->format.val_bytes; int ret; - WARN_ON(map->cache_type != REGCACHE_NONE); + WARN_ON(!regmap_volatile_range(map, reg, val_count) && + map->cache_type != REGCACHE_NONE); mutex_lock(&map->lock); @@ -422,15 +487,15 @@ static int _regmap_read(struct regmap *map, unsigned int reg, { int ret; - if (!map->format.parse_val) - return -EINVAL; - if (!map->cache_bypass) { ret = regcache_read(map, reg, val); if (ret == 0) return 0; } + if (!map->format.parse_val) + return -EINVAL; + if (map->cache_only) return -EBUSY; @@ -481,15 +546,11 @@ EXPORT_SYMBOL_GPL(regmap_read); int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, size_t val_len) { + size_t val_count = val_len / map->format.val_bytes; int ret; - int i; - bool vol = true; - for (i = 0; i < val_len / map->format.val_bytes; i++) - if (!regmap_volatile(map, reg + i)) - vol = false; - - WARN_ON(!vol && map->cache_type != REGCACHE_NONE); + WARN_ON(!regmap_volatile_range(map, reg, val_count) && + map->cache_type != REGCACHE_NONE); mutex_lock(&map->lock); @@ -517,16 +578,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, { int ret, i; size_t val_bytes = map->format.val_bytes; - bool vol = true; + bool vol = regmap_volatile_range(map, reg, val_count); if (!map->format.parse_val) return -EINVAL; - /* Is this a block of volatile registers? */ - for (i = 0; i < val_count; i++) - if (!regmap_volatile(map, reg + i)) - vol = false; - if (vol || map->cache_type == REGCACHE_NONE) { ret = regmap_raw_read(map, reg, val, val_bytes * val_count); if (ret != 0) @@ -546,40 +602,73 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, } EXPORT_SYMBOL_GPL(regmap_bulk_read); -/** - * remap_update_bits: Perform a read/modify/write cycle on the register map - * - * @map: Register map to update - * @reg: Register to update - * @mask: Bitmask to change - * @val: New value for bitmask - * - * Returns zero for success, a negative number on error. - */ -int regmap_update_bits(struct regmap *map, unsigned int reg, - unsigned int mask, unsigned int val) +static int _regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) { int ret; - unsigned int tmp; + unsigned int tmp, orig; mutex_lock(&map->lock); - ret = _regmap_read(map, reg, &tmp); + ret = _regmap_read(map, reg, &orig); if (ret != 0) goto out; - tmp &= ~mask; + tmp = orig & ~mask; tmp |= val & mask; - ret = _regmap_write(map, reg, tmp); + if (tmp != orig) { + ret = _regmap_write(map, reg, tmp); + *change = true; + } else { + *change = false; + } out: mutex_unlock(&map->lock); return ret; } + +/** + * regmap_update_bits: Perform a read/modify/write cycle on the register map + * + * @map: Register map to update + * @reg: Register to update + * @mask: Bitmask to change + * @val: New value for bitmask + * + * Returns zero for success, a negative number on error. + */ +int regmap_update_bits(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val) +{ + bool change; + return _regmap_update_bits(map, reg, mask, val, &change); +} EXPORT_SYMBOL_GPL(regmap_update_bits); +/** + * regmap_update_bits_check: Perform a read/modify/write cycle on the + * register map and report if updated + * + * @map: Register map to update + * @reg: Register to update + * @mask: Bitmask to change + * @val: New value for bitmask + * @change: Boolean indicating if a write was done + * + * Returns zero for success, a negative number on error. + */ +int regmap_update_bits_check(struct regmap *map, unsigned int reg, + unsigned int mask, unsigned int val, + bool *change) +{ + return _regmap_update_bits(map, reg, mask, val, change); +} +EXPORT_SYMBOL_GPL(regmap_update_bits_check); + static int __init regmap_initcall(void) { regmap_debugfs_initcall(); diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 9dff77bfe1e..409f5ce7882 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c @@ -126,7 +126,7 @@ void sysdev_class_remove_file(struct sysdev_class *c, } EXPORT_SYMBOL_GPL(sysdev_class_remove_file); -static struct kset *system_kset; +extern struct kset *system_kset; int sysdev_class_register(struct sysdev_class *cls) { @@ -331,14 +331,6 @@ void sysdev_unregister(struct sys_device *sysdev) EXPORT_SYMBOL_GPL(sysdev_register); EXPORT_SYMBOL_GPL(sysdev_unregister); -int __init system_bus_init(void) -{ - system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj); - if (!system_kset) - return -ENOMEM; - return 0; -} - #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) ssize_t sysdev_store_ulong(struct sys_device *sysdev, diff --git a/drivers/base/topology.c b/drivers/base/topology.c index f6f37a05a0c..ae989c57cd5 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c @@ -23,7 +23,6 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ -#include <linux/sysdev.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/cpu.h> @@ -32,14 +31,14 @@ #include <linux/topology.h> #define define_one_ro_named(_name, _func) \ -static SYSDEV_ATTR(_name, 0444, _func, NULL) + static DEVICE_ATTR(_name, 0444, _func, NULL) #define define_one_ro(_name) \ -static SYSDEV_ATTR(_name, 0444, show_##_name, NULL) + static DEVICE_ATTR(_name, 0444, show_##_name, NULL) #define define_id_show_func(name) \ -static ssize_t show_##name(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ unsigned int cpu = dev->id; \ return sprintf(buf, "%d\n", topology_##name(cpu)); \ @@ -65,16 +64,16 @@ static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf) #ifdef arch_provides_topology_pointers #define define_siblings_show_map(name) \ -static ssize_t show_##name(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ unsigned int cpu = dev->id; \ return show_cpumap(0, topology_##name(cpu), buf); \ } #define define_siblings_show_list(name) \ -static ssize_t show_##name##_list(struct sys_device *dev, \ - struct sysdev_attribute *attr, \ +static ssize_t show_##name##_list(struct device *dev, \ + struct device_attribute *attr, \ char *buf) \ { \ unsigned int cpu = dev->id; \ @@ -83,15 +82,15 @@ static ssize_t show_##name##_list(struct sys_device *dev, \ #else #define define_siblings_show_map(name) \ -static ssize_t show_##name(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##name(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ return show_cpumap(0, topology_##name(dev->id), buf); \ } #define define_siblings_show_list(name) \ -static ssize_t show_##name##_list(struct sys_device *dev, \ - struct sysdev_attribute *attr, \ +static ssize_t show_##name##_list(struct device *dev, \ + struct device_attribute *attr, \ char *buf) \ { \ return show_cpumap(1, topology_##name(dev->id), buf); \ @@ -124,16 +123,16 @@ define_one_ro_named(book_siblings_list, show_book_cpumask_list); #endif static struct attribute *default_attrs[] = { - &attr_physical_package_id.attr, - &attr_core_id.attr, - &attr_thread_siblings.attr, - &attr_thread_siblings_list.attr, - &attr_core_siblings.attr, - &attr_core_siblings_list.attr, + &dev_attr_physical_package_id.attr, + &dev_attr_core_id.attr, + &dev_attr_thread_siblings.attr, + &dev_attr_thread_siblings_list.attr, + &dev_attr_core_siblings.attr, + &dev_attr_core_siblings_list.attr, #ifdef CONFIG_SCHED_BOOK - &attr_book_id.attr, - &attr_book_siblings.attr, - &attr_book_siblings_list.attr, + &dev_attr_book_id.attr, + &dev_attr_book_siblings.attr, + &dev_attr_book_siblings_list.attr, #endif NULL }; @@ -146,16 +145,16 @@ static struct attribute_group topology_attr_group = { /* Add/Remove cpu_topology interface for CPU device */ static int __cpuinit topology_add_dev(unsigned int cpu) { - struct sys_device *sys_dev = get_cpu_sysdev(cpu); + struct device *dev = get_cpu_device(cpu); - return sysfs_create_group(&sys_dev->kobj, &topology_attr_group); + return sysfs_create_group(&dev->kobj, &topology_attr_group); } static void __cpuinit topology_remove_dev(unsigned int cpu) { - struct sys_device *sys_dev = get_cpu_sysdev(cpu); + struct device *dev = get_cpu_device(cpu); - sysfs_remove_group(&sys_dev->kobj, &topology_attr_group); + sysfs_remove_group(&dev->kobj, &topology_attr_group); } static int __cpuinit topology_cpu_callback(struct notifier_block *nfb, |