summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/base.h6
-rw-r--r--drivers/base/bus.c6
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/base/cpu.c11
-rw-r--r--drivers/base/dd.c148
-rw-r--r--drivers/base/driver.c65
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/domain.c253
-rw-r--r--drivers/base/power/generic_ops.c157
-rw-r--r--drivers/base/power/main.c247
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/qos.c61
-rw-r--r--drivers/base/power/sysfs.c47
-rw-r--r--drivers/base/power/wakeup.c85
-rw-r--r--drivers/base/soc.c183
18 files changed, 1069 insertions, 219 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 7be9f79018e..9aa618acfe9 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -176,6 +176,9 @@ config GENERIC_CPU_DEVICES
bool
default n
+config SOC_BUS
+ bool
+
source "drivers/base/regmap/Kconfig"
config DMA_SHARED_BUFFER
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 610f9997a40..b6d1b9c4200 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MODULES) += module.o
endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
obj-$(CONFIG_REGMAP) += regmap/
+obj-$(CONFIG_SOC_BUS) += soc.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/base.h b/drivers/base/base.h
index b858dfd9a37..6ee17bb391a 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -59,6 +59,10 @@ struct driver_private {
* @knode_parent - node in sibling list
* @knode_driver - node in driver list
* @knode_bus - node in bus list
+ * @deferred_probe - entry in deferred_probe_list which is used to retry the
+ * binding of drivers which were unable to get all the resources needed by
+ * the device; typically because it depends on another driver getting
+ * probed first.
* @driver_data - private pointer for driver specific info. Will turn into a
* list soon.
* @device - pointer back to the struct class that this structure is
@@ -71,6 +75,7 @@ struct device_private {
struct klist_node knode_parent;
struct klist_node knode_driver;
struct klist_node knode_bus;
+ struct list_head deferred_probe;
void *driver_data;
struct device *device;
};
@@ -105,6 +110,7 @@ extern void bus_remove_driver(struct device_driver *drv);
extern void driver_detach(struct device_driver *drv);
extern int driver_probe_device(struct device_driver *drv, struct device *dev);
+extern void driver_deferred_probe_del(struct device *dev);
static inline int driver_match_device(struct device_driver *drv,
struct device *dev)
{
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 40fb12288ce..26a06b801b5 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1194,13 +1194,15 @@ EXPORT_SYMBOL_GPL(subsys_interface_register);
void subsys_interface_unregister(struct subsys_interface *sif)
{
- struct bus_type *subsys = sif->subsys;
+ struct bus_type *subsys;
struct subsys_dev_iter iter;
struct device *dev;
- if (!sif)
+ if (!sif || !sif->subsys)
return;
+ subsys = sif->subsys;
+
mutex_lock(&subsys->p->mutex);
list_del_init(&sif->node);
if (sif->remove_dev) {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 74dda4f697f..e28ce9898af 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -18,6 +18,8 @@
#include <linux/string.h>
#include <linux/kdev_t.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/genhd.h>
#include <linux/kallsyms.h>
#include <linux/mutex.h>
@@ -267,6 +269,9 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
if (dev->driver)
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
+ /* Add common DT information about the device */
+ of_device_uevent(dev, env);
+
/* have the bus specific function add its stuff */
if (dev->bus && dev->bus->uevent) {
retval = dev->bus->uevent(dev, env);
@@ -921,6 +926,7 @@ int device_private_init(struct device *dev)
dev->p->device = dev;
klist_init(&dev->p->klist_children, klist_children_get,
klist_children_put);
+ INIT_LIST_HEAD(&dev->p->deferred_probe);
return 0;
}
@@ -1188,6 +1194,7 @@ void device_del(struct device *dev)
device_remove_file(dev, &uevent_attr);
device_remove_attrs(dev);
bus_remove_device(dev);
+ driver_deferred_probe_del(dev);
/*
* Some platform devices are driven without driver attached
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4dabf5077c4..adf937bf409 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/node.h>
#include <linux/gfp.h>
+#include <linux/slab.h>
#include <linux/percpu.h>
#include "base.h"
@@ -244,6 +245,9 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
cpu->dev.id = num;
cpu->dev.bus = &cpu_subsys;
cpu->dev.release = cpu_device_release;
+#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+ cpu->dev.bus->uevent = arch_cpu_uevent;
+#endif
error = device_register(&cpu->dev);
if (!error && cpu->hotpluggable)
register_cpu_control(cpu);
@@ -268,6 +272,10 @@ struct device *get_cpu_device(unsigned cpu)
}
EXPORT_SYMBOL_GPL(get_cpu_device);
+#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
+#endif
+
static struct attribute *cpu_root_attrs[] = {
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
&dev_attr_probe.attr,
@@ -278,6 +286,9 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
+#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+ &dev_attr_modalias.attr,
+#endif
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 142e3d600f1..1b1cbb571d3 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -28,6 +28,141 @@
#include "base.h"
#include "power/power.h"
+/*
+ * Deferred Probe infrastructure.
+ *
+ * Sometimes driver probe order matters, but the kernel doesn't always have
+ * dependency information which means some drivers will get probed before a
+ * resource it depends on is available. For example, an SDHCI driver may
+ * first need a GPIO line from an i2c GPIO controller before it can be
+ * initialized. If a required resource is not available yet, a driver can
+ * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
+ *
+ * Deferred probe maintains two lists of devices, a pending list and an active
+ * list. A driver returning -EPROBE_DEFER causes the device to be added to the
+ * pending list. A successful driver probe will trigger moving all devices
+ * from the pending to the active list so that the workqueue will eventually
+ * retry them.
+ *
+ * The deferred_probe_mutex must be held any time the deferred_probe_*_list
+ * of the (struct device*)->p->deferred_probe pointers are manipulated
+ */
+static DEFINE_MUTEX(deferred_probe_mutex);
+static LIST_HEAD(deferred_probe_pending_list);
+static LIST_HEAD(deferred_probe_active_list);
+static struct workqueue_struct *deferred_wq;
+
+/**
+ * deferred_probe_work_func() - Retry probing devices in the active list.
+ */
+static void deferred_probe_work_func(struct work_struct *work)
+{
+ struct device *dev;
+ struct device_private *private;
+ /*
+ * This block processes every device in the deferred 'active' list.
+ * Each device is removed from the active list and passed to
+ * bus_probe_device() to re-attempt the probe. The loop continues
+ * until every device in the active list is removed and retried.
+ *
+ * Note: Once the device is removed from the list and the mutex is
+ * released, it is possible for the device get freed by another thread
+ * and cause a illegal pointer dereference. This code uses
+ * get/put_device() to ensure the device structure cannot disappear
+ * from under our feet.
+ */
+ mutex_lock(&deferred_probe_mutex);
+ while (!list_empty(&deferred_probe_active_list)) {
+ private = list_first_entry(&deferred_probe_active_list,
+ typeof(*dev->p), deferred_probe);
+ dev = private->device;
+ list_del_init(&private->deferred_probe);
+
+ get_device(dev);
+
+ /*
+ * Drop the mutex while probing each device; the probe path may
+ * manipulate the deferred list
+ */
+ mutex_unlock(&deferred_probe_mutex);
+ dev_dbg(dev, "Retrying from deferred list\n");
+ bus_probe_device(dev);
+ mutex_lock(&deferred_probe_mutex);
+
+ put_device(dev);
+ }
+ mutex_unlock(&deferred_probe_mutex);
+}
+static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
+
+static void driver_deferred_probe_add(struct device *dev)
+{
+ mutex_lock(&deferred_probe_mutex);
+ if (list_empty(&dev->p->deferred_probe)) {
+ dev_dbg(dev, "Added to deferred list\n");
+ list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
+ }
+ mutex_unlock(&deferred_probe_mutex);
+}
+
+void driver_deferred_probe_del(struct device *dev)
+{
+ mutex_lock(&deferred_probe_mutex);
+ if (!list_empty(&dev->p->deferred_probe)) {
+ dev_dbg(dev, "Removed from deferred list\n");
+ list_del_init(&dev->p->deferred_probe);
+ }
+ mutex_unlock(&deferred_probe_mutex);
+}
+
+static bool driver_deferred_probe_enable = false;
+/**
+ * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
+ *
+ * This functions moves all devices from the pending list to the active
+ * list and schedules the deferred probe workqueue to process them. It
+ * should be called anytime a driver is successfully bound to a device.
+ */
+static void driver_deferred_probe_trigger(void)
+{
+ if (!driver_deferred_probe_enable)
+ return;
+
+ /*
+ * A successful probe means that all the devices in the pending list
+ * should be triggered to be reprobed. Move all the deferred devices
+ * into the active list so they can be retried by the workqueue
+ */
+ mutex_lock(&deferred_probe_mutex);
+ list_splice_tail_init(&deferred_probe_pending_list,
+ &deferred_probe_active_list);
+ mutex_unlock(&deferred_probe_mutex);
+
+ /*
+ * Kick the re-probe thread. It may already be scheduled, but it is
+ * safe to kick it again.
+ */
+ queue_work(deferred_wq, &deferred_probe_work);
+}
+
+/**
+ * deferred_probe_initcall() - Enable probing of deferred devices
+ *
+ * We don't want to get in the way when the bulk of drivers are getting probed.
+ * Instead, this initcall makes sure that deferred probing is delayed until
+ * late_initcall time.
+ */
+static int deferred_probe_initcall(void)
+{
+ deferred_wq = create_singlethread_workqueue("deferwq");
+ if (WARN_ON(!deferred_wq))
+ return -ENOMEM;
+
+ driver_deferred_probe_enable = true;
+ driver_deferred_probe_trigger();
+ return 0;
+}
+late_initcall(deferred_probe_initcall);
static void driver_bound(struct device *dev)
{
@@ -42,6 +177,13 @@ static void driver_bound(struct device *dev)
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+ /*
+ * Make sure the device is no longer in one of the deferred lists and
+ * kick off retrying all pending devices
+ */
+ driver_deferred_probe_del(dev);
+ driver_deferred_probe_trigger();
+
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
@@ -142,7 +284,11 @@ probe_failed:
driver_sysfs_remove(dev);
dev->driver = NULL;
- if (ret != -ENODEV && ret != -ENXIO) {
+ if (ret == -EPROBE_DEFER) {
+ /* Driver requested deferred probing */
+ dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
+ driver_deferred_probe_add(dev);
+ } else if (ret != -ENODEV && ret != -ENXIO) {
/* driver matched but the probe failed */
printk(KERN_WARNING
"%s: probe of %s failed with error %d\n",
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index b631f7c5945..3ec3896c83a 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -123,64 +123,6 @@ void driver_remove_file(struct device_driver *drv,
}
EXPORT_SYMBOL_GPL(driver_remove_file);
-/**
- * driver_add_kobj - add a kobject below the specified driver
- * @drv: requesting device driver
- * @kobj: kobject to add below this driver
- * @fmt: format string that names the kobject
- *
- * You really don't want to do this, this is only here due to one looney
- * iseries driver, go poke those developers if you are annoyed about
- * this...
- */
-int driver_add_kobj(struct device_driver *drv, struct kobject *kobj,
- const char *fmt, ...)
-{
- va_list args;
- char *name;
- int ret;
-
- va_start(args, fmt);
- name = kvasprintf(GFP_KERNEL, fmt, args);
- va_end(args);
-
- if (!name)
- return -ENOMEM;
-
- ret = kobject_add(kobj, &drv->p->kobj, "%s", name);
- kfree(name);
- return ret;
-}
-EXPORT_SYMBOL_GPL(driver_add_kobj);
-
-/**
- * get_driver - increment driver reference count.
- * @drv: driver.
- */
-struct device_driver *get_driver(struct device_driver *drv)
-{
- if (drv) {
- struct driver_private *priv;
- struct kobject *kobj;
-
- kobj = kobject_get(&drv->p->kobj);
- priv = to_driver(kobj);
- return priv->driver;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(get_driver);
-
-/**
- * put_driver - decrement driver's refcount.
- * @drv: driver.
- */
-void put_driver(struct device_driver *drv)
-{
- kobject_put(&drv->p->kobj);
-}
-EXPORT_SYMBOL_GPL(put_driver);
-
static int driver_add_groups(struct device_driver *drv,
const struct attribute_group **groups)
{
@@ -234,7 +176,6 @@ int driver_register(struct device_driver *drv)
other = driver_find(drv->name, drv->bus);
if (other) {
- put_driver(other);
printk(KERN_ERR "Error: Driver '%s' is already registered, "
"aborting...\n", drv->name);
return -EBUSY;
@@ -275,7 +216,9 @@ EXPORT_SYMBOL_GPL(driver_unregister);
* Call kset_find_obj() to iterate over list of drivers on
* a bus to find driver by name. Return driver if found.
*
- * Note that kset_find_obj increments driver's reference count.
+ * This routine provides no locking to prevent the driver it returns
+ * from being unregistered or unloaded while the caller is using it.
+ * The caller is responsible for preventing this.
*/
struct device_driver *driver_find(const char *name, struct bus_type *bus)
{
@@ -283,6 +226,8 @@ struct device_driver *driver_find(const char *name, struct bus_type *bus)
struct driver_private *priv;
if (k) {
+ /* Drop reference added by kset_find_obj() */
+ kobject_put(k);
priv = to_driver(k);
return priv->driver;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9e60dbe9fd9..7dda4f790f0 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -466,7 +466,7 @@ store_hard_offline_page(struct device *dev,
if (strict_strtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
- ret = __memory_failure(pfn, 0, 0);
+ ret = memory_failure(pfn, 0, 0);
return ret ? ret : count;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f0c605e99ad..a1a72250258 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -621,7 +621,7 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
int rc;
/* Some devices have extra OF data and an OF-style MODALIAS */
- rc = of_device_uevent(dev,env);
+ rc = of_device_uevent_modalias(dev,env);
if (rc != -ENODEV)
return rc;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 978bbf7ac6a..73ce9fbe983 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -366,7 +366,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
not_suspended = 0;
list_for_each_entry(pdd, &genpd->dev_list, list_node)
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
- || pdd->dev->power.irq_safe))
+ || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
not_suspended++;
if (not_suspended > genpd->in_progress)
@@ -503,6 +503,9 @@ static int pm_genpd_runtime_suspend(struct device *dev)
might_sleep_if(!genpd->dev_irq_safe);
+ if (dev_gpd_data(dev)->always_on)
+ return -EBUSY;
+
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
if (stop_ok && !stop_ok(dev))
return -EBUSY;
@@ -764,8 +767,10 @@ static int pm_genpd_prepare(struct device *dev)
genpd_acquire_lock(genpd);
- if (genpd->prepared_count++ == 0)
+ if (genpd->prepared_count++ == 0) {
+ genpd->suspended_count = 0;
genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
+ }
genpd_release_lock(genpd);
@@ -820,17 +825,16 @@ static int pm_genpd_suspend(struct device *dev)
}
/**
- * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
+ * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
* @dev: Device to suspend.
*
* Carry out a late suspend of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
*/
-static int pm_genpd_suspend_noirq(struct device *dev)
+static int pm_genpd_suspend_late(struct device *dev)
{
struct generic_pm_domain *genpd;
- int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -838,14 +842,28 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off)
- return 0;
+ return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
+}
- ret = genpd_suspend_late(genpd, dev);
- if (ret)
- return ret;
+/**
+ * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int pm_genpd_suspend_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
- if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+ || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0;
genpd_stop_dev(genpd, dev);
@@ -862,13 +880,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
}
/**
- * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
+ * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
* @dev: Device to resume.
*
- * Carry out an early resume of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
+ * Restore power to the device's PM domain, if necessary, and start the device.
*/
static int pm_genpd_resume_noirq(struct device *dev)
{
@@ -880,7 +895,8 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off)
+ if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+ || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0;
/*
@@ -890,13 +906,34 @@ static int pm_genpd_resume_noirq(struct device *dev)
*/
pm_genpd_poweron(genpd);
genpd->suspended_count--;
- genpd_start_dev(genpd, dev);
- return genpd_resume_early(genpd, dev);
+ return genpd_start_dev(genpd, dev);
}
/**
- * pm_genpd_resume - Resume a device belonging to an I/O power domain.
+ * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Carry out an early resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_resume_early(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
+}
+
+/**
+ * pm_genpd_resume - Resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Resume a device under the assumption that its pm_domain field points to the
@@ -917,7 +954,7 @@ static int pm_genpd_resume(struct device *dev)
}
/**
- * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
+ * pm_genpd_freeze - Freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Freeze a device under the assumption that its pm_domain field points to the
@@ -938,7 +975,29 @@ static int pm_genpd_freeze(struct device *dev)
}
/**
- * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
+ * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
+ * @dev: Device to freeze.
+ *
+ * Carry out a late freeze of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_freeze_late(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
+}
+
+/**
+ * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Carry out a late freeze of a device under the assumption that its
@@ -949,7 +1008,6 @@ static int pm_genpd_freeze(struct device *dev)
static int pm_genpd_freeze_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
- int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -957,20 +1015,33 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off)
- return 0;
+ return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
+ 0 : genpd_stop_dev(genpd, dev);
+}
- ret = genpd_freeze_late(genpd, dev);
- if (ret)
- return ret;
+/**
+ * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
+ * @dev: Device to thaw.
+ *
+ * Start the device, unless power has been removed from the domain already
+ * before the system transition.
+ */
+static int pm_genpd_thaw_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
- genpd_stop_dev(genpd, dev);
+ dev_dbg(dev, "%s()\n", __func__);
- return 0;
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
+ 0 : genpd_start_dev(genpd, dev);
}
/**
- * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
+ * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
* @dev: Device to thaw.
*
* Carry out an early thaw of a device under the assumption that its
@@ -978,7 +1049,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
-static int pm_genpd_thaw_noirq(struct device *dev)
+static int pm_genpd_thaw_early(struct device *dev)
{
struct generic_pm_domain *genpd;
@@ -988,12 +1059,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off)
- return 0;
-
- genpd_start_dev(genpd, dev);
-
- return genpd_thaw_early(genpd, dev);
+ return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
}
/**
@@ -1018,13 +1084,11 @@ static int pm_genpd_thaw(struct device *dev)
}
/**
- * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
+ * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
* @dev: Device to resume.
*
- * Carry out an early restore of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
+ * Make sure the domain will be in the same power state as before the
+ * hibernation the system is resuming from and start the device if necessary.
*/
static int pm_genpd_restore_noirq(struct device *dev)
{
@@ -1040,23 +1104,35 @@ static int pm_genpd_restore_noirq(struct device *dev)
* Since all of the "noirq" callbacks are executed sequentially, it is
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
+ *
+ * At this point suspended_count == 0 means we are being run for the
+ * first time for the given domain in the present cycle.
*/
- genpd->status = GPD_STATE_POWER_OFF;
- if (genpd->suspend_power_off) {
+ if (genpd->suspended_count++ == 0) {
/*
- * The boot kernel might put the domain into the power on state,
- * so make sure it really is powered off.
+ * The boot kernel might put the domain into arbitrary state,
+ * so make it appear as powered off to pm_genpd_poweron(), so
+ * that it tries to power it on in case it was really off.
*/
- if (genpd->power_off)
- genpd->power_off(genpd);
- return 0;
+ genpd->status = GPD_STATE_POWER_OFF;
+ if (genpd->suspend_power_off) {
+ /*
+ * If the domain was off before the hibernation, make
+ * sure it will be off going forward.
+ */
+ if (genpd->power_off)
+ genpd->power_off(genpd);
+
+ return 0;
+ }
}
+ if (genpd->suspend_power_off)
+ return 0;
+
pm_genpd_poweron(genpd);
- genpd->suspended_count--;
- genpd_start_dev(genpd, dev);
- return genpd_resume_early(genpd, dev);
+ return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
}
/**
@@ -1099,11 +1175,15 @@ static void pm_genpd_complete(struct device *dev)
#define pm_genpd_prepare NULL
#define pm_genpd_suspend NULL
+#define pm_genpd_suspend_late NULL
#define pm_genpd_suspend_noirq NULL
+#define pm_genpd_resume_early NULL
#define pm_genpd_resume_noirq NULL
#define pm_genpd_resume NULL
#define pm_genpd_freeze NULL
+#define pm_genpd_freeze_late NULL
#define pm_genpd_freeze_noirq NULL
+#define pm_genpd_thaw_early NULL
#define pm_genpd_thaw_noirq NULL
#define pm_genpd_thaw NULL
#define pm_genpd_restore_noirq NULL
@@ -1171,6 +1251,38 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
}
/**
+ * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
+ * @genpd_node: Device tree node pointer representing a PM domain to which the
+ * the device is added to.
+ * @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
+ */
+int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
+ struct gpd_timing_data *td)
+{
+ struct generic_pm_domain *genpd = NULL, *gpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
+ return -EINVAL;
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (gpd->of_node == genpd_node) {
+ genpd = gpd;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ if (!genpd)
+ return -EINVAL;
+
+ return __pm_genpd_add_device(genpd, dev, td);
+}
+
+/**
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
* @genpd: PM domain to remove the device from.
* @dev: Device to be removed.
@@ -1216,6 +1328,26 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
}
/**
+ * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
+ * @dev: Device to set/unset the flag for.
+ * @val: The new value of the device's "always on" flag.
+ */
+void pm_genpd_dev_always_on(struct device *dev, bool val)
+{
+ struct pm_subsys_data *psd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ psd = dev_to_psd(dev);
+ if (psd && psd->domain_data)
+ to_gpd_data(psd->domain_data)->always_on = val;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
+
+/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
* @genpd: Master PM domain to add the subdomain to.
* @subdomain: Subdomain to be added.
@@ -1450,7 +1582,7 @@ static int pm_genpd_default_suspend_late(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
- return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
+ return cb ? cb(dev) : pm_generic_suspend_late(dev);
}
/**
@@ -1461,7 +1593,7 @@ static int pm_genpd_default_resume_early(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
- return cb ? cb(dev) : pm_generic_resume_noirq(dev);
+ return cb ? cb(dev) : pm_generic_resume_early(dev);
}
/**
@@ -1494,7 +1626,7 @@ static int pm_genpd_default_freeze_late(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
- return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
+ return cb ? cb(dev) : pm_generic_freeze_late(dev);
}
/**
@@ -1505,7 +1637,7 @@ static int pm_genpd_default_thaw_early(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
- return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
+ return cb ? cb(dev) : pm_generic_thaw_early(dev);
}
/**
@@ -1557,23 +1689,28 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->poweroff_task = NULL;
genpd->resume_count = 0;
genpd->device_count = 0;
- genpd->suspended_count = 0;
genpd->max_off_time_ns = -1;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
genpd->domain.ops.prepare = pm_genpd_prepare;
genpd->domain.ops.suspend = pm_genpd_suspend;
+ genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
+ genpd->domain.ops.resume_early = pm_genpd_resume_early;
genpd->domain.ops.resume = pm_genpd_resume;
genpd->domain.ops.freeze = pm_genpd_freeze;
+ genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
+ genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
genpd->domain.ops.thaw = pm_genpd_thaw;
genpd->domain.ops.poweroff = pm_genpd_suspend;
+ genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
+ genpd->domain.ops.restore_early = pm_genpd_resume_early;
genpd->domain.ops.restore = pm_genpd_resume;
genpd->domain.ops.complete = pm_genpd_complete;
genpd->dev_ops.save_state = pm_genpd_default_save_state;
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 10bdd793f0b..d03d290f31c 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -92,59 +92,28 @@ int pm_generic_prepare(struct device *dev)
}
/**
- * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
- * @dev: Device to handle.
- * @event: PM transition of the system under way.
- * @bool: Whether or not this is the "noirq" stage.
- *
- * Execute the PM callback corresponding to @event provided by the driver of
- * @dev, if defined, and return its error code. Return 0 if the callback is
- * not present.
+ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * @dev: Device to suspend.
*/
-static int __pm_generic_call(struct device *dev, int event, bool noirq)
+int pm_generic_suspend_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int (*callback)(struct device *);
-
- if (!pm)
- return 0;
-
- switch (event) {
- case PM_EVENT_SUSPEND:
- callback = noirq ? pm->suspend_noirq : pm->suspend;
- break;
- case PM_EVENT_FREEZE:
- callback = noirq ? pm->freeze_noirq : pm->freeze;
- break;
- case PM_EVENT_HIBERNATE:
- callback = noirq ? pm->poweroff_noirq : pm->poweroff;
- break;
- case PM_EVENT_RESUME:
- callback = noirq ? pm->resume_noirq : pm->resume;
- break;
- case PM_EVENT_THAW:
- callback = noirq ? pm->thaw_noirq : pm->thaw;
- break;
- case PM_EVENT_RESTORE:
- callback = noirq ? pm->restore_noirq : pm->restore;
- break;
- default:
- callback = NULL;
- break;
- }
- return callback ? callback(dev) : 0;
+ return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
/**
- * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
* @dev: Device to suspend.
*/
-int pm_generic_suspend_noirq(struct device *dev)
+int pm_generic_suspend_late(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
}
-EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
/**
* pm_generic_suspend - Generic suspend callback for subsystems.
@@ -152,7 +121,9 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
*/
int pm_generic_suspend(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->suspend ? pm->suspend(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
@@ -162,17 +133,33 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
*/
int pm_generic_freeze_noirq(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
+ * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_late(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
+
+/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze ? pm->freeze(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
@@ -182,17 +169,33 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
*/
int pm_generic_poweroff_noirq(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
/**
+ * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_late(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
+
+/**
* pm_generic_poweroff - Generic poweroff callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff ? pm->poweroff(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
@@ -202,17 +205,33 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
*/
int pm_generic_thaw_noirq(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_THAW, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
+ * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
+
+/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_THAW, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw ? pm->thaw(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -222,17 +241,33 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
*/
int pm_generic_resume_noirq(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_RESUME, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
/**
+ * pm_generic_resume_early - Generic resume_early callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume_early ? pm->resume_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_early);
+
+/**
* pm_generic_resume - Generic resume callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_RESUME, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume ? pm->resume(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -242,17 +277,33 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
/**
+ * pm_generic_restore_early - Generic restore_early callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_restore_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore_early ? pm->restore_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_early);
+
+/**
* pm_generic_restore - Generic restore callback for subsystems.
* @dev: Device to restore.
*/
int pm_generic_restore(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore ? pm->restore(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e2cc3d2e0ec..b462c0e341c 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -47,6 +47,7 @@ typedef int (*pm_callback_t)(struct device *);
LIST_HEAD(dpm_list);
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
+LIST_HEAD(dpm_late_early_list);
LIST_HEAD(dpm_noirq_list);
struct suspend_stats suspend_stats;
@@ -246,6 +247,40 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
}
/**
+ * pm_late_early_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
+ pm_message_t state)
+{
+ switch (state.event) {
+#ifdef CONFIG_SUSPEND
+ case PM_EVENT_SUSPEND:
+ return ops->suspend_late;
+ case PM_EVENT_RESUME:
+ return ops->resume_early;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return ops->freeze_late;
+ case PM_EVENT_HIBERNATE:
+ return ops->poweroff_late;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RECOVER:
+ return ops->thaw_early;
+ case PM_EVENT_RESTORE:
+ return ops->restore_early;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+ }
+
+ return NULL;
+}
+
+/**
* pm_noirq_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
@@ -374,21 +409,21 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_RESUME(0);
if (dev->pm_domain) {
- info = "EARLY power domain ";
+ info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- info = "EARLY type ";
+ info = "noirq type ";
callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- info = "EARLY class ";
+ info = "noirq class ";
callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- info = "EARLY bus ";
+ info = "noirq bus ";
callback = pm_noirq_op(dev->bus->pm, state);
}
if (!callback && dev->driver && dev->driver->pm) {
- info = "EARLY driver ";
+ info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
@@ -399,13 +434,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
}
/**
- * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
+ * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
- * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
+ * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
* enable device drivers to receive interrupts.
*/
-void dpm_resume_noirq(pm_message_t state)
+static void dpm_resume_noirq(pm_message_t state)
{
ktime_t starttime = ktime_get();
@@ -415,7 +450,7 @@ void dpm_resume_noirq(pm_message_t state)
int error;
get_device(dev);
- list_move_tail(&dev->power.entry, &dpm_suspended_list);
+ list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx);
error = device_resume_noirq(dev, state);
@@ -423,6 +458,80 @@ void dpm_resume_noirq(pm_message_t state)
suspend_stats.failed_resume_noirq++;
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " noirq", error);
+ }
+
+ mutex_lock(&dpm_list_mtx);
+ put_device(dev);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ dpm_show_time(starttime, state, "noirq");
+ resume_device_irqs();
+}
+
+/**
+ * device_resume_early - Execute an "early resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static int device_resume_early(struct device *dev, pm_message_t state)
+{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
+ int error = 0;
+
+ TRACE_DEVICE(dev);
+ TRACE_RESUME(0);
+
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "early driver ";
+ callback = pm_late_early_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
+ TRACE_RESUME(error);
+ return error;
+}
+
+/**
+ * dpm_resume_early - Execute "early resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ */
+static void dpm_resume_early(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+
+ mutex_lock(&dpm_list_mtx);
+ while (!list_empty(&dpm_late_early_list)) {
+ struct device *dev = to_device(dpm_late_early_list.next);
+ int error;
+
+ get_device(dev);
+ list_move_tail(&dev->power.entry, &dpm_suspended_list);
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_resume_early(dev, state);
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " early", error);
}
@@ -431,9 +540,18 @@ void dpm_resume_noirq(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
dpm_show_time(starttime, state, "early");
- resume_device_irqs();
}
-EXPORT_SYMBOL_GPL(dpm_resume_noirq);
+
+/**
+ * dpm_resume_start - Execute "noirq" and "early" device callbacks.
+ * @state: PM transition of the system being carried out.
+ */
+void dpm_resume_start(pm_message_t state)
+{
+ dpm_resume_noirq(state);
+ dpm_resume_early(state);
+}
+EXPORT_SYMBOL_GPL(dpm_resume_start);
/**
* device_resume - Execute "resume" callbacks for given device.
@@ -716,21 +834,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
char *info = NULL;
if (dev->pm_domain) {
- info = "LATE power domain ";
+ info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- info = "LATE type ";
+ info = "noirq type ";
callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- info = "LATE class ";
+ info = "noirq class ";
callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- info = "LATE bus ";
+ info = "noirq bus ";
callback = pm_noirq_op(dev->bus->pm, state);
}
if (!callback && dev->driver && dev->driver->pm) {
- info = "LATE driver ";
+ info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
@@ -738,21 +856,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
}
/**
- * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
+ * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Prevent device drivers from receiving interrupts and call the "noirq" suspend
* handlers for all non-sysdev devices.
*/
-int dpm_suspend_noirq(pm_message_t state)
+static int dpm_suspend_noirq(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
+ while (!list_empty(&dpm_late_early_list)) {
+ struct device *dev = to_device(dpm_late_early_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -761,7 +879,7 @@ int dpm_suspend_noirq(pm_message_t state)
mutex_lock(&dpm_list_mtx);
if (error) {
- pm_dev_err(dev, state, " late", error);
+ pm_dev_err(dev, state, " noirq", error);
suspend_stats.failed_suspend_noirq++;
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
@@ -776,10 +894,95 @@ int dpm_suspend_noirq(pm_message_t state)
if (error)
dpm_resume_noirq(resume_event(state));
else
+ dpm_show_time(starttime, state, "noirq");
+ return error;
+}
+
+/**
+ * device_suspend_late - Execute a "late suspend" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static int device_suspend_late(struct device *dev, pm_message_t state)
+{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "late driver ";
+ callback = pm_late_early_op(dev->driver->pm, state);
+ }
+
+ return dpm_run_callback(callback, dev, state, info);
+}
+
+/**
+ * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ */
+static int dpm_suspend_late(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error = 0;
+
+ mutex_lock(&dpm_list_mtx);
+ while (!list_empty(&dpm_suspended_list)) {
+ struct device *dev = to_device(dpm_suspended_list.prev);
+
+ get_device(dev);
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_suspend_late(dev, state);
+
+ mutex_lock(&dpm_list_mtx);
+ if (error) {
+ pm_dev_err(dev, state, " late", error);
+ suspend_stats.failed_suspend_late++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
+ dpm_save_failed_dev(dev_name(dev));
+ put_device(dev);
+ break;
+ }
+ if (!list_empty(&dev->power.entry))
+ list_move(&dev->power.entry, &dpm_late_early_list);
+ put_device(dev);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ if (error)
+ dpm_resume_early(resume_event(state));
+ else
dpm_show_time(starttime, state, "late");
+
return error;
}
-EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
+
+/**
+ * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend_end(pm_message_t state)
+{
+ int error = dpm_suspend_late(state);
+
+ return error ? : dpm_suspend_noirq(state);
+}
+EXPORT_SYMBOL_GPL(dpm_suspend_end);
/**
* legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 9bf62323aaf..eeb4bff9505 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -71,6 +71,8 @@ extern void dpm_sysfs_remove(struct device *dev);
extern void rpm_sysfs_remove(struct device *dev);
extern int wakeup_sysfs_add(struct device *dev);
extern void wakeup_sysfs_remove(struct device *dev);
+extern int pm_qos_sysfs_add(struct device *dev);
+extern void pm_qos_sysfs_remove(struct device *dev);
#else /* CONFIG_PM */
@@ -79,5 +81,7 @@ static inline void dpm_sysfs_remove(struct device *dev) {}
static inline void rpm_sysfs_remove(struct device *dev) {}
static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
static inline void wakeup_sysfs_remove(struct device *dev) {}
+static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
+static inline void pm_qos_sysfs_remove(struct device *dev) {}
#endif
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index c5d35883746..71855570922 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
#include <linux/mutex.h>
#include <linux/export.h>
+#include "power.h"
static DEFINE_MUTEX(dev_pm_qos_mtx);
@@ -166,6 +167,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct dev_pm_qos_request *req, *tmp;
struct pm_qos_constraints *c;
+ /*
+ * If the device's PM QoS resume latency limit has been exposed to user
+ * space, it has to be hidden at this point.
+ */
+ dev_pm_qos_hide_latency_limit(dev);
+
mutex_lock(&dev_pm_qos_mtx);
dev->power.power_state = PMSG_INVALID;
@@ -445,3 +452,57 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
return error;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
+
+#ifdef CONFIG_PM_RUNTIME
+static void __dev_pm_qos_drop_user_request(struct device *dev)
+{
+ dev_pm_qos_remove_request(dev->power.pq_req);
+ dev->power.pq_req = 0;
+}
+
+/**
+ * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
+ * @dev: Device whose PM QoS latency limit is to be exposed to user space.
+ * @value: Initial value of the latency limit.
+ */
+int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
+{
+ struct dev_pm_qos_request *req;
+ int ret;
+
+ if (!device_is_registered(dev) || value < 0)
+ return -EINVAL;
+
+ if (dev->power.pq_req)
+ return -EEXIST;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = dev_pm_qos_add_request(dev, req, value);
+ if (ret < 0)
+ return ret;
+
+ dev->power.pq_req = req;
+ ret = pm_qos_sysfs_add(dev);
+ if (ret)
+ __dev_pm_qos_drop_user_request(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
+
+/**
+ * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
+ * @dev: Device whose PM QoS latency limit is to be hidden from user space.
+ */
+void dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+ if (dev->power.pq_req) {
+ pm_qos_sysfs_remove(dev);
+ __dev_pm_qos_drop_user_request(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
+#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index adf41be0ea6..95c12f6cb5b 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,6 +5,7 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/export.h>
+#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
@@ -217,6 +218,31 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
autosuspend_delay_ms_store);
+static ssize_t pm_qos_latency_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
+}
+
+static ssize_t pm_qos_latency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ s32 value;
+ int ret;
+
+ if (kstrtos32(buf, 0, &value))
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ ret = dev_pm_qos_update_request(dev->power.pq_req, value);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
+ pm_qos_latency_show, pm_qos_latency_store);
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
@@ -490,6 +516,17 @@ static struct attribute_group pm_runtime_attr_group = {
.attrs = runtime_attrs,
};
+static struct attribute *pm_qos_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_pm_qos_resume_latency_us.attr,
+#endif /* CONFIG_PM_RUNTIME */
+ NULL,
+};
+static struct attribute_group pm_qos_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_attrs,
+};
+
int dpm_sysfs_add(struct device *dev)
{
int rc;
@@ -530,6 +567,16 @@ void wakeup_sysfs_remove(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
+int pm_qos_sysfs_add(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
+}
+
+void pm_qos_sysfs_remove(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
+}
+
void rpm_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index caf995fb774..2a3e581b8dc 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -53,6 +53,23 @@ static void pm_wakeup_timer_fn(unsigned long data);
static LIST_HEAD(wakeup_sources);
/**
+ * wakeup_source_prepare - Prepare a new wakeup source for initialization.
+ * @ws: Wakeup source to prepare.
+ * @name: Pointer to the name of the new wakeup source.
+ *
+ * Callers must ensure that the @name string won't be freed when @ws is still in
+ * use.
+ */
+void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
+{
+ if (ws) {
+ memset(ws, 0, sizeof(*ws));
+ ws->name = name;
+ }
+}
+EXPORT_SYMBOL_GPL(wakeup_source_prepare);
+
+/**
* wakeup_source_create - Create a struct wakeup_source object.
* @name: Name of the new wakeup source.
*/
@@ -60,37 +77,44 @@ struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
- ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ ws = kmalloc(sizeof(*ws), GFP_KERNEL);
if (!ws)
return NULL;
- spin_lock_init(&ws->lock);
- if (name)
- ws->name = kstrdup(name, GFP_KERNEL);
-
+ wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
return ws;
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
/**
+ * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
+ * @ws: Wakeup source to prepare for destruction.
+ *
+ * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
+ * be run in parallel with this function for the same wakeup source object.
+ */
+void wakeup_source_drop(struct wakeup_source *ws)
+{
+ if (!ws)
+ return;
+
+ del_timer_sync(&ws->timer);
+ __pm_relax(ws);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_drop);
+
+/**
* wakeup_source_destroy - Destroy a struct wakeup_source object.
* @ws: Wakeup source to destroy.
+ *
+ * Use only for wakeup source objects created with wakeup_source_create().
*/
void wakeup_source_destroy(struct wakeup_source *ws)
{
if (!ws)
return;
- spin_lock_irq(&ws->lock);
- while (ws->active) {
- spin_unlock_irq(&ws->lock);
-
- schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
-
- spin_lock_irq(&ws->lock);
- }
- spin_unlock_irq(&ws->lock);
-
+ wakeup_source_drop(ws);
kfree(ws->name);
kfree(ws);
}
@@ -105,6 +129,7 @@ void wakeup_source_add(struct wakeup_source *ws)
if (WARN_ON(!ws))
return;
+ spin_lock_init(&ws->lock);
setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
ws->active = false;
@@ -152,8 +177,10 @@ EXPORT_SYMBOL_GPL(wakeup_source_register);
*/
void wakeup_source_unregister(struct wakeup_source *ws)
{
- wakeup_source_remove(ws);
- wakeup_source_destroy(ws);
+ if (ws) {
+ wakeup_source_remove(ws);
+ wakeup_source_destroy(ws);
+ }
}
EXPORT_SYMBOL_GPL(wakeup_source_unregister);
@@ -349,7 +376,6 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
ws->active = true;
ws->active_count++;
- ws->timer_expires = jiffies;
ws->last_time = ktime_get();
/* Increment the counter of events in progress. */
@@ -370,9 +396,14 @@ void __pm_stay_awake(struct wakeup_source *ws)
return;
spin_lock_irqsave(&ws->lock, flags);
+
ws->event_count++;
if (!ws->active)
wakeup_source_activate(ws);
+
+ del_timer(&ws->timer);
+ ws->timer_expires = 0;
+
spin_unlock_irqrestore(&ws->lock, flags);
}
EXPORT_SYMBOL_GPL(__pm_stay_awake);
@@ -438,6 +469,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
ws->max_time = duration;
del_timer(&ws->timer);
+ ws->timer_expires = 0;
/*
* Increment the counter of registered wakeup events and decrement the
@@ -492,11 +524,22 @@ EXPORT_SYMBOL_GPL(pm_relax);
* pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
* @data: Address of the wakeup source object associated with the event source.
*
- * Call __pm_relax() for the wakeup source whose address is stored in @data.
+ * Call wakeup_source_deactivate() for the wakeup source whose address is stored
+ * in @data if it is currently active and its timer has not been canceled and
+ * the expiration time of the timer is not in future.
*/
static void pm_wakeup_timer_fn(unsigned long data)
{
- __pm_relax((struct wakeup_source *)data);
+ struct wakeup_source *ws = (struct wakeup_source *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ if (ws->active && ws->timer_expires
+ && time_after_eq(jiffies, ws->timer_expires))
+ wakeup_source_deactivate(ws);
+
+ spin_unlock_irqrestore(&ws->lock, flags);
}
/**
@@ -534,7 +577,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
if (!expires)
expires = 1;
- if (time_after(expires, ws->timer_expires)) {
+ if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
mod_timer(&ws->timer, expires);
ws->timer_expires = expires;
}
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
new file mode 100644
index 00000000000..05f150382da
--- /dev/null
+++ b/drivers/base/soc.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
+#include <linux/err.h>
+
+static DEFINE_IDR(soc_ida);
+static DEFINE_SPINLOCK(soc_lock);
+
+static ssize_t soc_info_get(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+struct soc_device {
+ struct device dev;
+ struct soc_device_attribute *attr;
+ int soc_dev_num;
+};
+
+static struct bus_type soc_bus_type = {
+ .name = "soc",
+};
+
+static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL);
+static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL);
+static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL);
+static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL);
+
+struct device *soc_device_to_device(struct soc_device *soc_dev)
+{
+ return &soc_dev->dev;
+}
+
+static mode_t soc_attribute_mode(struct kobject *kobj,
+ struct attribute *attr,
+ int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+ if ((attr == &dev_attr_machine.attr)
+ && (soc_dev->attr->machine != NULL))
+ return attr->mode;
+ if ((attr == &dev_attr_family.attr)
+ && (soc_dev->attr->family != NULL))
+ return attr->mode;
+ if ((attr == &dev_attr_revision.attr)
+ && (soc_dev->attr->revision != NULL))
+ return attr->mode;
+ if ((attr == &dev_attr_soc_id.attr)
+ && (soc_dev->attr->soc_id != NULL))
+ return attr->mode;
+
+ /* Unknown or unfilled attribute. */
+ return 0;
+}
+
+static ssize_t soc_info_get(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+ if (attr == &dev_attr_machine)
+ return sprintf(buf, "%s\n", soc_dev->attr->machine);
+ if (attr == &dev_attr_family)
+ return sprintf(buf, "%s\n", soc_dev->attr->family);
+ if (attr == &dev_attr_revision)
+ return sprintf(buf, "%s\n", soc_dev->attr->revision);
+ if (attr == &dev_attr_soc_id)
+ return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
+
+ return -EINVAL;
+
+}
+
+static struct attribute *soc_attr[] = {
+ &dev_attr_machine.attr,
+ &dev_attr_family.attr,
+ &dev_attr_soc_id.attr,
+ &dev_attr_revision.attr,
+ NULL,
+};
+
+static const struct attribute_group soc_attr_group = {
+ .attrs = soc_attr,
+ .is_visible = soc_attribute_mode,
+};
+
+static const struct attribute_group *soc_attr_groups[] = {
+ &soc_attr_group,
+ NULL,
+};
+
+static void soc_release(struct device *dev)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+ kfree(soc_dev);
+}
+
+struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
+{
+ struct soc_device *soc_dev;
+ int ret;
+
+ soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
+ if (!soc_dev) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ /* Fetch a unique (reclaimable) SOC ID. */
+ do {
+ if (!ida_pre_get(&soc_ida, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+
+ spin_lock(&soc_lock);
+ ret = ida_get_new(&soc_ida, &soc_dev->soc_dev_num);
+ spin_unlock(&soc_lock);
+
+ } while (ret == -EAGAIN);
+
+ if (ret)
+ goto out2;
+
+ soc_dev->attr = soc_dev_attr;
+ soc_dev->dev.bus = &soc_bus_type;
+ soc_dev->dev.groups = soc_attr_groups;
+ soc_dev->dev.release = soc_release;
+
+ dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
+
+ ret = device_register(&soc_dev->dev);
+ if (ret)
+ goto out3;
+
+ return soc_dev;
+
+out3:
+ ida_remove(&soc_ida, soc_dev->soc_dev_num);
+out2:
+ kfree(soc_dev);
+out1:
+ return ERR_PTR(ret);
+}
+
+/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
+void soc_device_unregister(struct soc_device *soc_dev)
+{
+ ida_remove(&soc_ida, soc_dev->soc_dev_num);
+
+ device_unregister(&soc_dev->dev);
+}
+
+static int __init soc_bus_register(void)
+{
+ spin_lock_init(&soc_lock);
+
+ return bus_register(&soc_bus_type);
+}
+core_initcall(soc_bus_register);
+
+static void __exit soc_bus_unregister(void)
+{
+ ida_destroy(&soc_ida);
+
+ bus_unregister(&soc_bus_type);
+}
+module_exit(soc_bus_unregister);