summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/base.h6
-rw-r--r--drivers/base/bus.c6
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/base/cpu.c11
-rw-r--r--drivers/base/dd.c148
-rw-r--r--drivers/base/driver.c65
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/domain.c253
-rw-r--r--drivers/base/power/generic_ops.c157
-rw-r--r--drivers/base/power/main.c247
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/qos.c61
-rw-r--r--drivers/base/power/sysfs.c47
-rw-r--r--drivers/base/power/wakeup.c85
-rw-r--r--drivers/base/regmap/internal.h14
-rw-r--r--drivers/base/regmap/regcache-lzo.c16
-rw-r--r--drivers/base/regmap/regcache-rbtree.c27
-rw-r--r--drivers/base/regmap/regcache.c102
-rw-r--r--drivers/base/regmap/regmap-debugfs.c84
-rw-r--r--drivers/base/regmap/regmap-i2c.c17
-rw-r--r--drivers/base/regmap/regmap-spi.c17
-rw-r--r--drivers/base/regmap/regmap.c307
-rw-r--r--drivers/base/soc.c183
26 files changed, 1593 insertions, 279 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 7be9f79018e..9aa618acfe9 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -176,6 +176,9 @@ config GENERIC_CPU_DEVICES
bool
default n
+config SOC_BUS
+ bool
+
source "drivers/base/regmap/Kconfig"
config DMA_SHARED_BUFFER
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 610f9997a40..b6d1b9c4200 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MODULES) += module.o
endif
obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
obj-$(CONFIG_REGMAP) += regmap/
+obj-$(CONFIG_SOC_BUS) += soc.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/base.h b/drivers/base/base.h
index b858dfd9a37..6ee17bb391a 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -59,6 +59,10 @@ struct driver_private {
* @knode_parent - node in sibling list
* @knode_driver - node in driver list
* @knode_bus - node in bus list
+ * @deferred_probe - entry in deferred_probe_list which is used to retry the
+ * binding of drivers which were unable to get all the resources needed by
+ * the device; typically because it depends on another driver getting
+ * probed first.
* @driver_data - private pointer for driver specific info. Will turn into a
* list soon.
* @device - pointer back to the struct class that this structure is
@@ -71,6 +75,7 @@ struct device_private {
struct klist_node knode_parent;
struct klist_node knode_driver;
struct klist_node knode_bus;
+ struct list_head deferred_probe;
void *driver_data;
struct device *device;
};
@@ -105,6 +110,7 @@ extern void bus_remove_driver(struct device_driver *drv);
extern void driver_detach(struct device_driver *drv);
extern int driver_probe_device(struct device_driver *drv, struct device *dev);
+extern void driver_deferred_probe_del(struct device *dev);
static inline int driver_match_device(struct device_driver *drv,
struct device *dev)
{
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 40fb12288ce..26a06b801b5 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1194,13 +1194,15 @@ EXPORT_SYMBOL_GPL(subsys_interface_register);
void subsys_interface_unregister(struct subsys_interface *sif)
{
- struct bus_type *subsys = sif->subsys;
+ struct bus_type *subsys;
struct subsys_dev_iter iter;
struct device *dev;
- if (!sif)
+ if (!sif || !sif->subsys)
return;
+ subsys = sif->subsys;
+
mutex_lock(&subsys->p->mutex);
list_del_init(&sif->node);
if (sif->remove_dev) {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 74dda4f697f..e28ce9898af 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -18,6 +18,8 @@
#include <linux/string.h>
#include <linux/kdev_t.h>
#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/genhd.h>
#include <linux/kallsyms.h>
#include <linux/mutex.h>
@@ -267,6 +269,9 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
if (dev->driver)
add_uevent_var(env, "DRIVER=%s", dev->driver->name);
+ /* Add common DT information about the device */
+ of_device_uevent(dev, env);
+
/* have the bus specific function add its stuff */
if (dev->bus && dev->bus->uevent) {
retval = dev->bus->uevent(dev, env);
@@ -921,6 +926,7 @@ int device_private_init(struct device *dev)
dev->p->device = dev;
klist_init(&dev->p->klist_children, klist_children_get,
klist_children_put);
+ INIT_LIST_HEAD(&dev->p->deferred_probe);
return 0;
}
@@ -1188,6 +1194,7 @@ void device_del(struct device *dev)
device_remove_file(dev, &uevent_attr);
device_remove_attrs(dev);
bus_remove_device(dev);
+ driver_deferred_probe_del(dev);
/*
* Some platform devices are driven without driver attached
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4dabf5077c4..adf937bf409 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -11,6 +11,7 @@
#include <linux/device.h>
#include <linux/node.h>
#include <linux/gfp.h>
+#include <linux/slab.h>
#include <linux/percpu.h>
#include "base.h"
@@ -244,6 +245,9 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
cpu->dev.id = num;
cpu->dev.bus = &cpu_subsys;
cpu->dev.release = cpu_device_release;
+#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+ cpu->dev.bus->uevent = arch_cpu_uevent;
+#endif
error = device_register(&cpu->dev);
if (!error && cpu->hotpluggable)
register_cpu_control(cpu);
@@ -268,6 +272,10 @@ struct device *get_cpu_device(unsigned cpu)
}
EXPORT_SYMBOL_GPL(get_cpu_device);
+#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
+#endif
+
static struct attribute *cpu_root_attrs[] = {
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
&dev_attr_probe.attr,
@@ -278,6 +286,9 @@ static struct attribute *cpu_root_attrs[] = {
&cpu_attrs[2].attr.attr,
&dev_attr_kernel_max.attr,
&dev_attr_offline.attr,
+#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
+ &dev_attr_modalias.attr,
+#endif
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 142e3d600f1..1b1cbb571d3 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -28,6 +28,141 @@
#include "base.h"
#include "power/power.h"
+/*
+ * Deferred Probe infrastructure.
+ *
+ * Sometimes driver probe order matters, but the kernel doesn't always have
+ * dependency information which means some drivers will get probed before a
+ * resource it depends on is available. For example, an SDHCI driver may
+ * first need a GPIO line from an i2c GPIO controller before it can be
+ * initialized. If a required resource is not available yet, a driver can
+ * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
+ *
+ * Deferred probe maintains two lists of devices, a pending list and an active
+ * list. A driver returning -EPROBE_DEFER causes the device to be added to the
+ * pending list. A successful driver probe will trigger moving all devices
+ * from the pending to the active list so that the workqueue will eventually
+ * retry them.
+ *
+ * The deferred_probe_mutex must be held any time the deferred_probe_*_list
+ * of the (struct device*)->p->deferred_probe pointers are manipulated
+ */
+static DEFINE_MUTEX(deferred_probe_mutex);
+static LIST_HEAD(deferred_probe_pending_list);
+static LIST_HEAD(deferred_probe_active_list);
+static struct workqueue_struct *deferred_wq;
+
+/**
+ * deferred_probe_work_func() - Retry probing devices in the active list.
+ */
+static void deferred_probe_work_func(struct work_struct *work)
+{
+ struct device *dev;
+ struct device_private *private;
+ /*
+ * This block processes every device in the deferred 'active' list.
+ * Each device is removed from the active list and passed to
+ * bus_probe_device() to re-attempt the probe. The loop continues
+ * until every device in the active list is removed and retried.
+ *
+ * Note: Once the device is removed from the list and the mutex is
+ * released, it is possible for the device get freed by another thread
+ * and cause a illegal pointer dereference. This code uses
+ * get/put_device() to ensure the device structure cannot disappear
+ * from under our feet.
+ */
+ mutex_lock(&deferred_probe_mutex);
+ while (!list_empty(&deferred_probe_active_list)) {
+ private = list_first_entry(&deferred_probe_active_list,
+ typeof(*dev->p), deferred_probe);
+ dev = private->device;
+ list_del_init(&private->deferred_probe);
+
+ get_device(dev);
+
+ /*
+ * Drop the mutex while probing each device; the probe path may
+ * manipulate the deferred list
+ */
+ mutex_unlock(&deferred_probe_mutex);
+ dev_dbg(dev, "Retrying from deferred list\n");
+ bus_probe_device(dev);
+ mutex_lock(&deferred_probe_mutex);
+
+ put_device(dev);
+ }
+ mutex_unlock(&deferred_probe_mutex);
+}
+static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
+
+static void driver_deferred_probe_add(struct device *dev)
+{
+ mutex_lock(&deferred_probe_mutex);
+ if (list_empty(&dev->p->deferred_probe)) {
+ dev_dbg(dev, "Added to deferred list\n");
+ list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
+ }
+ mutex_unlock(&deferred_probe_mutex);
+}
+
+void driver_deferred_probe_del(struct device *dev)
+{
+ mutex_lock(&deferred_probe_mutex);
+ if (!list_empty(&dev->p->deferred_probe)) {
+ dev_dbg(dev, "Removed from deferred list\n");
+ list_del_init(&dev->p->deferred_probe);
+ }
+ mutex_unlock(&deferred_probe_mutex);
+}
+
+static bool driver_deferred_probe_enable = false;
+/**
+ * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
+ *
+ * This functions moves all devices from the pending list to the active
+ * list and schedules the deferred probe workqueue to process them. It
+ * should be called anytime a driver is successfully bound to a device.
+ */
+static void driver_deferred_probe_trigger(void)
+{
+ if (!driver_deferred_probe_enable)
+ return;
+
+ /*
+ * A successful probe means that all the devices in the pending list
+ * should be triggered to be reprobed. Move all the deferred devices
+ * into the active list so they can be retried by the workqueue
+ */
+ mutex_lock(&deferred_probe_mutex);
+ list_splice_tail_init(&deferred_probe_pending_list,
+ &deferred_probe_active_list);
+ mutex_unlock(&deferred_probe_mutex);
+
+ /*
+ * Kick the re-probe thread. It may already be scheduled, but it is
+ * safe to kick it again.
+ */
+ queue_work(deferred_wq, &deferred_probe_work);
+}
+
+/**
+ * deferred_probe_initcall() - Enable probing of deferred devices
+ *
+ * We don't want to get in the way when the bulk of drivers are getting probed.
+ * Instead, this initcall makes sure that deferred probing is delayed until
+ * late_initcall time.
+ */
+static int deferred_probe_initcall(void)
+{
+ deferred_wq = create_singlethread_workqueue("deferwq");
+ if (WARN_ON(!deferred_wq))
+ return -ENOMEM;
+
+ driver_deferred_probe_enable = true;
+ driver_deferred_probe_trigger();
+ return 0;
+}
+late_initcall(deferred_probe_initcall);
static void driver_bound(struct device *dev)
{
@@ -42,6 +177,13 @@ static void driver_bound(struct device *dev)
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
+ /*
+ * Make sure the device is no longer in one of the deferred lists and
+ * kick off retrying all pending devices
+ */
+ driver_deferred_probe_del(dev);
+ driver_deferred_probe_trigger();
+
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
@@ -142,7 +284,11 @@ probe_failed:
driver_sysfs_remove(dev);
dev->driver = NULL;
- if (ret != -ENODEV && ret != -ENXIO) {
+ if (ret == -EPROBE_DEFER) {
+ /* Driver requested deferred probing */
+ dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
+ driver_deferred_probe_add(dev);
+ } else if (ret != -ENODEV && ret != -ENXIO) {
/* driver matched but the probe failed */
printk(KERN_WARNING
"%s: probe of %s failed with error %d\n",
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index b631f7c5945..3ec3896c83a 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -123,64 +123,6 @@ void driver_remove_file(struct device_driver *drv,
}
EXPORT_SYMBOL_GPL(driver_remove_file);
-/**
- * driver_add_kobj - add a kobject below the specified driver
- * @drv: requesting device driver
- * @kobj: kobject to add below this driver
- * @fmt: format string that names the kobject
- *
- * You really don't want to do this, this is only here due to one looney
- * iseries driver, go poke those developers if you are annoyed about
- * this...
- */
-int driver_add_kobj(struct device_driver *drv, struct kobject *kobj,
- const char *fmt, ...)
-{
- va_list args;
- char *name;
- int ret;
-
- va_start(args, fmt);
- name = kvasprintf(GFP_KERNEL, fmt, args);
- va_end(args);
-
- if (!name)
- return -ENOMEM;
-
- ret = kobject_add(kobj, &drv->p->kobj, "%s", name);
- kfree(name);
- return ret;
-}
-EXPORT_SYMBOL_GPL(driver_add_kobj);
-
-/**
- * get_driver - increment driver reference count.
- * @drv: driver.
- */
-struct device_driver *get_driver(struct device_driver *drv)
-{
- if (drv) {
- struct driver_private *priv;
- struct kobject *kobj;
-
- kobj = kobject_get(&drv->p->kobj);
- priv = to_driver(kobj);
- return priv->driver;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(get_driver);
-
-/**
- * put_driver - decrement driver's refcount.
- * @drv: driver.
- */
-void put_driver(struct device_driver *drv)
-{
- kobject_put(&drv->p->kobj);
-}
-EXPORT_SYMBOL_GPL(put_driver);
-
static int driver_add_groups(struct device_driver *drv,
const struct attribute_group **groups)
{
@@ -234,7 +176,6 @@ int driver_register(struct device_driver *drv)
other = driver_find(drv->name, drv->bus);
if (other) {
- put_driver(other);
printk(KERN_ERR "Error: Driver '%s' is already registered, "
"aborting...\n", drv->name);
return -EBUSY;
@@ -275,7 +216,9 @@ EXPORT_SYMBOL_GPL(driver_unregister);
* Call kset_find_obj() to iterate over list of drivers on
* a bus to find driver by name. Return driver if found.
*
- * Note that kset_find_obj increments driver's reference count.
+ * This routine provides no locking to prevent the driver it returns
+ * from being unregistered or unloaded while the caller is using it.
+ * The caller is responsible for preventing this.
*/
struct device_driver *driver_find(const char *name, struct bus_type *bus)
{
@@ -283,6 +226,8 @@ struct device_driver *driver_find(const char *name, struct bus_type *bus)
struct driver_private *priv;
if (k) {
+ /* Drop reference added by kset_find_obj() */
+ kobject_put(k);
priv = to_driver(k);
return priv->driver;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9e60dbe9fd9..7dda4f790f0 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -466,7 +466,7 @@ store_hard_offline_page(struct device *dev,
if (strict_strtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
- ret = __memory_failure(pfn, 0, 0);
+ ret = memory_failure(pfn, 0, 0);
return ret ? ret : count;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f0c605e99ad..a1a72250258 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -621,7 +621,7 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
int rc;
/* Some devices have extra OF data and an OF-style MODALIAS */
- rc = of_device_uevent(dev,env);
+ rc = of_device_uevent_modalias(dev,env);
if (rc != -ENODEV)
return rc;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 978bbf7ac6a..73ce9fbe983 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -366,7 +366,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
not_suspended = 0;
list_for_each_entry(pdd, &genpd->dev_list, list_node)
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
- || pdd->dev->power.irq_safe))
+ || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
not_suspended++;
if (not_suspended > genpd->in_progress)
@@ -503,6 +503,9 @@ static int pm_genpd_runtime_suspend(struct device *dev)
might_sleep_if(!genpd->dev_irq_safe);
+ if (dev_gpd_data(dev)->always_on)
+ return -EBUSY;
+
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
if (stop_ok && !stop_ok(dev))
return -EBUSY;
@@ -764,8 +767,10 @@ static int pm_genpd_prepare(struct device *dev)
genpd_acquire_lock(genpd);
- if (genpd->prepared_count++ == 0)
+ if (genpd->prepared_count++ == 0) {
+ genpd->suspended_count = 0;
genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
+ }
genpd_release_lock(genpd);
@@ -820,17 +825,16 @@ static int pm_genpd_suspend(struct device *dev)
}
/**
- * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
+ * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
* @dev: Device to suspend.
*
* Carry out a late suspend of a device under the assumption that its
* pm_domain field points to the domain member of an object of type
* struct generic_pm_domain representing a PM domain consisting of I/O devices.
*/
-static int pm_genpd_suspend_noirq(struct device *dev)
+static int pm_genpd_suspend_late(struct device *dev)
{
struct generic_pm_domain *genpd;
- int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -838,14 +842,28 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off)
- return 0;
+ return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
+}
- ret = genpd_suspend_late(genpd, dev);
- if (ret)
- return ret;
+/**
+ * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Stop the device and remove power from the domain if all devices in it have
+ * been stopped.
+ */
+static int pm_genpd_suspend_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
- if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+ || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0;
genpd_stop_dev(genpd, dev);
@@ -862,13 +880,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
}
/**
- * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
+ * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
* @dev: Device to resume.
*
- * Carry out an early resume of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
+ * Restore power to the device's PM domain, if necessary, and start the device.
*/
static int pm_genpd_resume_noirq(struct device *dev)
{
@@ -880,7 +895,8 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off)
+ if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+ || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0;
/*
@@ -890,13 +906,34 @@ static int pm_genpd_resume_noirq(struct device *dev)
*/
pm_genpd_poweron(genpd);
genpd->suspended_count--;
- genpd_start_dev(genpd, dev);
- return genpd_resume_early(genpd, dev);
+ return genpd_start_dev(genpd, dev);
}
/**
- * pm_genpd_resume - Resume a device belonging to an I/O power domain.
+ * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Carry out an early resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_resume_early(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
+}
+
+/**
+ * pm_genpd_resume - Resume of device in an I/O PM domain.
* @dev: Device to resume.
*
* Resume a device under the assumption that its pm_domain field points to the
@@ -917,7 +954,7 @@ static int pm_genpd_resume(struct device *dev)
}
/**
- * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
+ * pm_genpd_freeze - Freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Freeze a device under the assumption that its pm_domain field points to the
@@ -938,7 +975,29 @@ static int pm_genpd_freeze(struct device *dev)
}
/**
- * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
+ * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
+ * @dev: Device to freeze.
+ *
+ * Carry out a late freeze of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_freeze_late(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
+}
+
+/**
+ * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
* @dev: Device to freeze.
*
* Carry out a late freeze of a device under the assumption that its
@@ -949,7 +1008,6 @@ static int pm_genpd_freeze(struct device *dev)
static int pm_genpd_freeze_noirq(struct device *dev)
{
struct generic_pm_domain *genpd;
- int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -957,20 +1015,33 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off)
- return 0;
+ return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
+ 0 : genpd_stop_dev(genpd, dev);
+}
- ret = genpd_freeze_late(genpd, dev);
- if (ret)
- return ret;
+/**
+ * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
+ * @dev: Device to thaw.
+ *
+ * Start the device, unless power has been removed from the domain already
+ * before the system transition.
+ */
+static int pm_genpd_thaw_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
- genpd_stop_dev(genpd, dev);
+ dev_dbg(dev, "%s()\n", __func__);
- return 0;
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
+ 0 : genpd_start_dev(genpd, dev);
}
/**
- * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
+ * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
* @dev: Device to thaw.
*
* Carry out an early thaw of a device under the assumption that its
@@ -978,7 +1049,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
* struct generic_pm_domain representing a power domain consisting of I/O
* devices.
*/
-static int pm_genpd_thaw_noirq(struct device *dev)
+static int pm_genpd_thaw_early(struct device *dev)
{
struct generic_pm_domain *genpd;
@@ -988,12 +1059,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off)
- return 0;
-
- genpd_start_dev(genpd, dev);
-
- return genpd_thaw_early(genpd, dev);
+ return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
}
/**
@@ -1018,13 +1084,11 @@ static int pm_genpd_thaw(struct device *dev)
}
/**
- * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
+ * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
* @dev: Device to resume.
*
- * Carry out an early restore of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a power domain consisting of I/O
- * devices.
+ * Make sure the domain will be in the same power state as before the
+ * hibernation the system is resuming from and start the device if necessary.
*/
static int pm_genpd_restore_noirq(struct device *dev)
{
@@ -1040,23 +1104,35 @@ static int pm_genpd_restore_noirq(struct device *dev)
* Since all of the "noirq" callbacks are executed sequentially, it is
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
+ *
+ * At this point suspended_count == 0 means we are being run for the
+ * first time for the given domain in the present cycle.
*/
- genpd->status = GPD_STATE_POWER_OFF;
- if (genpd->suspend_power_off) {
+ if (genpd->suspended_count++ == 0) {
/*
- * The boot kernel might put the domain into the power on state,
- * so make sure it really is powered off.
+ * The boot kernel might put the domain into arbitrary state,
+ * so make it appear as powered off to pm_genpd_poweron(), so
+ * that it tries to power it on in case it was really off.
*/
- if (genpd->power_off)
- genpd->power_off(genpd);
- return 0;
+ genpd->status = GPD_STATE_POWER_OFF;
+ if (genpd->suspend_power_off) {
+ /*
+ * If the domain was off before the hibernation, make
+ * sure it will be off going forward.
+ */
+ if (genpd->power_off)
+ genpd->power_off(genpd);
+
+ return 0;
+ }
}
+ if (genpd->suspend_power_off)
+ return 0;
+
pm_genpd_poweron(genpd);
- genpd->suspended_count--;
- genpd_start_dev(genpd, dev);
- return genpd_resume_early(genpd, dev);
+ return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
}
/**
@@ -1099,11 +1175,15 @@ static void pm_genpd_complete(struct device *dev)
#define pm_genpd_prepare NULL
#define pm_genpd_suspend NULL
+#define pm_genpd_suspend_late NULL
#define pm_genpd_suspend_noirq NULL
+#define pm_genpd_resume_early NULL
#define pm_genpd_resume_noirq NULL
#define pm_genpd_resume NULL
#define pm_genpd_freeze NULL
+#define pm_genpd_freeze_late NULL
#define pm_genpd_freeze_noirq NULL
+#define pm_genpd_thaw_early NULL
#define pm_genpd_thaw_noirq NULL
#define pm_genpd_thaw NULL
#define pm_genpd_restore_noirq NULL
@@ -1171,6 +1251,38 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
}
/**
+ * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
+ * @genpd_node: Device tree node pointer representing a PM domain to which the
+ * the device is added to.
+ * @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
+ */
+int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
+ struct gpd_timing_data *td)
+{
+ struct generic_pm_domain *genpd = NULL, *gpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
+ return -EINVAL;
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (gpd->of_node == genpd_node) {
+ genpd = gpd;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ if (!genpd)
+ return -EINVAL;
+
+ return __pm_genpd_add_device(genpd, dev, td);
+}
+
+/**
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
* @genpd: PM domain to remove the device from.
* @dev: Device to be removed.
@@ -1216,6 +1328,26 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
}
/**
+ * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
+ * @dev: Device to set/unset the flag for.
+ * @val: The new value of the device's "always on" flag.
+ */
+void pm_genpd_dev_always_on(struct device *dev, bool val)
+{
+ struct pm_subsys_data *psd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ psd = dev_to_psd(dev);
+ if (psd && psd->domain_data)
+ to_gpd_data(psd->domain_data)->always_on = val;
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
+
+/**
* pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
* @genpd: Master PM domain to add the subdomain to.
* @subdomain: Subdomain to be added.
@@ -1450,7 +1582,7 @@ static int pm_genpd_default_suspend_late(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
- return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
+ return cb ? cb(dev) : pm_generic_suspend_late(dev);
}
/**
@@ -1461,7 +1593,7 @@ static int pm_genpd_default_resume_early(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
- return cb ? cb(dev) : pm_generic_resume_noirq(dev);
+ return cb ? cb(dev) : pm_generic_resume_early(dev);
}
/**
@@ -1494,7 +1626,7 @@ static int pm_genpd_default_freeze_late(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
- return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
+ return cb ? cb(dev) : pm_generic_freeze_late(dev);
}
/**
@@ -1505,7 +1637,7 @@ static int pm_genpd_default_thaw_early(struct device *dev)
{
int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
- return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
+ return cb ? cb(dev) : pm_generic_thaw_early(dev);
}
/**
@@ -1557,23 +1689,28 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->poweroff_task = NULL;
genpd->resume_count = 0;
genpd->device_count = 0;
- genpd->suspended_count = 0;
genpd->max_off_time_ns = -1;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
genpd->domain.ops.prepare = pm_genpd_prepare;
genpd->domain.ops.suspend = pm_genpd_suspend;
+ genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
+ genpd->domain.ops.resume_early = pm_genpd_resume_early;
genpd->domain.ops.resume = pm_genpd_resume;
genpd->domain.ops.freeze = pm_genpd_freeze;
+ genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
+ genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
genpd->domain.ops.thaw = pm_genpd_thaw;
genpd->domain.ops.poweroff = pm_genpd_suspend;
+ genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
+ genpd->domain.ops.restore_early = pm_genpd_resume_early;
genpd->domain.ops.restore = pm_genpd_resume;
genpd->domain.ops.complete = pm_genpd_complete;
genpd->dev_ops.save_state = pm_genpd_default_save_state;
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 10bdd793f0b..d03d290f31c 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -92,59 +92,28 @@ int pm_generic_prepare(struct device *dev)
}
/**
- * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
- * @dev: Device to handle.
- * @event: PM transition of the system under way.
- * @bool: Whether or not this is the "noirq" stage.
- *
- * Execute the PM callback corresponding to @event provided by the driver of
- * @dev, if defined, and return its error code. Return 0 if the callback is
- * not present.
+ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * @dev: Device to suspend.
*/
-static int __pm_generic_call(struct device *dev, int event, bool noirq)
+int pm_generic_suspend_noirq(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int (*callback)(struct device *);
-
- if (!pm)
- return 0;
-
- switch (event) {
- case PM_EVENT_SUSPEND:
- callback = noirq ? pm->suspend_noirq : pm->suspend;
- break;
- case PM_EVENT_FREEZE:
- callback = noirq ? pm->freeze_noirq : pm->freeze;
- break;
- case PM_EVENT_HIBERNATE:
- callback = noirq ? pm->poweroff_noirq : pm->poweroff;
- break;
- case PM_EVENT_RESUME:
- callback = noirq ? pm->resume_noirq : pm->resume;
- break;
- case PM_EVENT_THAW:
- callback = noirq ? pm->thaw_noirq : pm->thaw;
- break;
- case PM_EVENT_RESTORE:
- callback = noirq ? pm->restore_noirq : pm->restore;
- break;
- default:
- callback = NULL;
- break;
- }
- return callback ? callback(dev) : 0;
+ return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
/**
- * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
* @dev: Device to suspend.
*/
-int pm_generic_suspend_noirq(struct device *dev)
+int pm_generic_suspend_late(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
}
-EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
/**
* pm_generic_suspend - Generic suspend callback for subsystems.
@@ -152,7 +121,9 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
*/
int pm_generic_suspend(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->suspend ? pm->suspend(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
@@ -162,17 +133,33 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
*/
int pm_generic_freeze_noirq(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
/**
+ * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_late(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
+
+/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->freeze ? pm->freeze(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
@@ -182,17 +169,33 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
*/
int pm_generic_poweroff_noirq(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
/**
+ * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_late(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
+
+/**
* pm_generic_poweroff - Generic poweroff callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->poweroff ? pm->poweroff(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
@@ -202,17 +205,33 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
*/
int pm_generic_thaw_noirq(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_THAW, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
/**
+ * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
+
+/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_THAW, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->thaw ? pm->thaw(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -222,17 +241,33 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
*/
int pm_generic_resume_noirq(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_RESUME, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
/**
+ * pm_generic_resume_early - Generic resume_early callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume_early ? pm->resume_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_early);
+
+/**
* pm_generic_resume - Generic resume callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_RESUME, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->resume ? pm->resume(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -242,17 +277,33 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
/**
+ * pm_generic_restore_early - Generic restore_early callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_restore_early(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore_early ? pm->restore_early(dev) : 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_early);
+
+/**
* pm_generic_restore - Generic restore callback for subsystems.
* @dev: Device to restore.
*/
int pm_generic_restore(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ return pm && pm->restore ? pm->restore(dev) : 0;
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e2cc3d2e0ec..b462c0e341c 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -47,6 +47,7 @@ typedef int (*pm_callback_t)(struct device *);
LIST_HEAD(dpm_list);
LIST_HEAD(dpm_prepared_list);
LIST_HEAD(dpm_suspended_list);
+LIST_HEAD(dpm_late_early_list);
LIST_HEAD(dpm_noirq_list);
struct suspend_stats suspend_stats;
@@ -246,6 +247,40 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
}
/**
+ * pm_late_early_op - Return the PM operation appropriate for given PM event.
+ * @ops: PM operations to choose from.
+ * @state: PM transition of the system being carried out.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
+ pm_message_t state)
+{
+ switch (state.event) {
+#ifdef CONFIG_SUSPEND
+ case PM_EVENT_SUSPEND:
+ return ops->suspend_late;
+ case PM_EVENT_RESUME:
+ return ops->resume_early;
+#endif /* CONFIG_SUSPEND */
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_QUIESCE:
+ return ops->freeze_late;
+ case PM_EVENT_HIBERNATE:
+ return ops->poweroff_late;
+ case PM_EVENT_THAW:
+ case PM_EVENT_RECOVER:
+ return ops->thaw_early;
+ case PM_EVENT_RESTORE:
+ return ops->restore_early;
+#endif /* CONFIG_HIBERNATE_CALLBACKS */
+ }
+
+ return NULL;
+}
+
+/**
* pm_noirq_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
@@ -374,21 +409,21 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_RESUME(0);
if (dev->pm_domain) {
- info = "EARLY power domain ";
+ info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- info = "EARLY type ";
+ info = "noirq type ";
callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- info = "EARLY class ";
+ info = "noirq class ";
callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- info = "EARLY bus ";
+ info = "noirq bus ";
callback = pm_noirq_op(dev->bus->pm, state);
}
if (!callback && dev->driver && dev->driver->pm) {
- info = "EARLY driver ";
+ info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
@@ -399,13 +434,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
}
/**
- * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
+ * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
- * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
+ * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
* enable device drivers to receive interrupts.
*/
-void dpm_resume_noirq(pm_message_t state)
+static void dpm_resume_noirq(pm_message_t state)
{
ktime_t starttime = ktime_get();
@@ -415,7 +450,7 @@ void dpm_resume_noirq(pm_message_t state)
int error;
get_device(dev);
- list_move_tail(&dev->power.entry, &dpm_suspended_list);
+ list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx);
error = device_resume_noirq(dev, state);
@@ -423,6 +458,80 @@ void dpm_resume_noirq(pm_message_t state)
suspend_stats.failed_resume_noirq++;
dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " noirq", error);
+ }
+
+ mutex_lock(&dpm_list_mtx);
+ put_device(dev);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ dpm_show_time(starttime, state, "noirq");
+ resume_device_irqs();
+}
+
+/**
+ * device_resume_early - Execute an "early resume" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static int device_resume_early(struct device *dev, pm_message_t state)
+{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
+ int error = 0;
+
+ TRACE_DEVICE(dev);
+ TRACE_RESUME(0);
+
+ if (dev->pm_domain) {
+ info = "early power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "early type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "early class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "early bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "early driver ";
+ callback = pm_late_early_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
+ TRACE_RESUME(error);
+ return error;
+}
+
+/**
+ * dpm_resume_early - Execute "early resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ */
+static void dpm_resume_early(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+
+ mutex_lock(&dpm_list_mtx);
+ while (!list_empty(&dpm_late_early_list)) {
+ struct device *dev = to_device(dpm_late_early_list.next);
+ int error;
+
+ get_device(dev);
+ list_move_tail(&dev->power.entry, &dpm_suspended_list);
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_resume_early(dev, state);
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, " early", error);
}
@@ -431,9 +540,18 @@ void dpm_resume_noirq(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
dpm_show_time(starttime, state, "early");
- resume_device_irqs();
}
-EXPORT_SYMBOL_GPL(dpm_resume_noirq);
+
+/**
+ * dpm_resume_start - Execute "noirq" and "early" device callbacks.
+ * @state: PM transition of the system being carried out.
+ */
+void dpm_resume_start(pm_message_t state)
+{
+ dpm_resume_noirq(state);
+ dpm_resume_early(state);
+}
+EXPORT_SYMBOL_GPL(dpm_resume_start);
/**
* device_resume - Execute "resume" callbacks for given device.
@@ -716,21 +834,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
char *info = NULL;
if (dev->pm_domain) {
- info = "LATE power domain ";
+ info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- info = "LATE type ";
+ info = "noirq type ";
callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- info = "LATE class ";
+ info = "noirq class ";
callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- info = "LATE bus ";
+ info = "noirq bus ";
callback = pm_noirq_op(dev->bus->pm, state);
}
if (!callback && dev->driver && dev->driver->pm) {
- info = "LATE driver ";
+ info = "noirq driver ";
callback = pm_noirq_op(dev->driver->pm, state);
}
@@ -738,21 +856,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
}
/**
- * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
+ * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*
* Prevent device drivers from receiving interrupts and call the "noirq" suspend
* handlers for all non-sysdev devices.
*/
-int dpm_suspend_noirq(pm_message_t state)
+static int dpm_suspend_noirq(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_suspended_list)) {
- struct device *dev = to_device(dpm_suspended_list.prev);
+ while (!list_empty(&dpm_late_early_list)) {
+ struct device *dev = to_device(dpm_late_early_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
@@ -761,7 +879,7 @@ int dpm_suspend_noirq(pm_message_t state)
mutex_lock(&dpm_list_mtx);
if (error) {
- pm_dev_err(dev, state, " late", error);
+ pm_dev_err(dev, state, " noirq", error);
suspend_stats.failed_suspend_noirq++;
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
@@ -776,10 +894,95 @@ int dpm_suspend_noirq(pm_message_t state)
if (error)
dpm_resume_noirq(resume_event(state));
else
+ dpm_show_time(starttime, state, "noirq");
+ return error;
+}
+
+/**
+ * device_suspend_late - Execute a "late suspend" callback for given device.
+ * @dev: Device to handle.
+ * @state: PM transition of the system being carried out.
+ *
+ * Runtime PM is disabled for @dev while this function is being executed.
+ */
+static int device_suspend_late(struct device *dev, pm_message_t state)
+{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
+
+ if (dev->pm_domain) {
+ info = "late power domain ";
+ callback = pm_late_early_op(&dev->pm_domain->ops, state);
+ } else if (dev->type && dev->type->pm) {
+ info = "late type ";
+ callback = pm_late_early_op(dev->type->pm, state);
+ } else if (dev->class && dev->class->pm) {
+ info = "late class ";
+ callback = pm_late_early_op(dev->class->pm, state);
+ } else if (dev->bus && dev->bus->pm) {
+ info = "late bus ";
+ callback = pm_late_early_op(dev->bus->pm, state);
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "late driver ";
+ callback = pm_late_early_op(dev->driver->pm, state);
+ }
+
+ return dpm_run_callback(callback, dev, state, info);
+}
+
+/**
+ * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ */
+static int dpm_suspend_late(pm_message_t state)
+{
+ ktime_t starttime = ktime_get();
+ int error = 0;
+
+ mutex_lock(&dpm_list_mtx);
+ while (!list_empty(&dpm_suspended_list)) {
+ struct device *dev = to_device(dpm_suspended_list.prev);
+
+ get_device(dev);
+ mutex_unlock(&dpm_list_mtx);
+
+ error = device_suspend_late(dev, state);
+
+ mutex_lock(&dpm_list_mtx);
+ if (error) {
+ pm_dev_err(dev, state, " late", error);
+ suspend_stats.failed_suspend_late++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
+ dpm_save_failed_dev(dev_name(dev));
+ put_device(dev);
+ break;
+ }
+ if (!list_empty(&dev->power.entry))
+ list_move(&dev->power.entry, &dpm_late_early_list);
+ put_device(dev);
+ }
+ mutex_unlock(&dpm_list_mtx);
+ if (error)
+ dpm_resume_early(resume_event(state));
+ else
dpm_show_time(starttime, state, "late");
+
return error;
}
-EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
+
+/**
+ * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
+ * @state: PM transition of the system being carried out.
+ */
+int dpm_suspend_end(pm_message_t state)
+{
+ int error = dpm_suspend_late(state);
+
+ return error ? : dpm_suspend_noirq(state);
+}
+EXPORT_SYMBOL_GPL(dpm_suspend_end);
/**
* legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 9bf62323aaf..eeb4bff9505 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -71,6 +71,8 @@ extern void dpm_sysfs_remove(struct device *dev);
extern void rpm_sysfs_remove(struct device *dev);
extern int wakeup_sysfs_add(struct device *dev);
extern void wakeup_sysfs_remove(struct device *dev);
+extern int pm_qos_sysfs_add(struct device *dev);
+extern void pm_qos_sysfs_remove(struct device *dev);
#else /* CONFIG_PM */
@@ -79,5 +81,7 @@ static inline void dpm_sysfs_remove(struct device *dev) {}
static inline void rpm_sysfs_remove(struct device *dev) {}
static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
static inline void wakeup_sysfs_remove(struct device *dev) {}
+static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
+static inline void pm_qos_sysfs_remove(struct device *dev) {}
#endif
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index c5d35883746..71855570922 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
#include <linux/mutex.h>
#include <linux/export.h>
+#include "power.h"
static DEFINE_MUTEX(dev_pm_qos_mtx);
@@ -166,6 +167,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct dev_pm_qos_request *req, *tmp;
struct pm_qos_constraints *c;
+ /*
+ * If the device's PM QoS resume latency limit has been exposed to user
+ * space, it has to be hidden at this point.
+ */
+ dev_pm_qos_hide_latency_limit(dev);
+
mutex_lock(&dev_pm_qos_mtx);
dev->power.power_state = PMSG_INVALID;
@@ -445,3 +452,57 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
return error;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
+
+#ifdef CONFIG_PM_RUNTIME
+static void __dev_pm_qos_drop_user_request(struct device *dev)
+{
+ dev_pm_qos_remove_request(dev->power.pq_req);
+ dev->power.pq_req = 0;
+}
+
+/**
+ * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
+ * @dev: Device whose PM QoS latency limit is to be exposed to user space.
+ * @value: Initial value of the latency limit.
+ */
+int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
+{
+ struct dev_pm_qos_request *req;
+ int ret;
+
+ if (!device_is_registered(dev) || value < 0)
+ return -EINVAL;
+
+ if (dev->power.pq_req)
+ return -EEXIST;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ ret = dev_pm_qos_add_request(dev, req, value);
+ if (ret < 0)
+ return ret;
+
+ dev->power.pq_req = req;
+ ret = pm_qos_sysfs_add(dev);
+ if (ret)
+ __dev_pm_qos_drop_user_request(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
+
+/**
+ * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
+ * @dev: Device whose PM QoS latency limit is to be hidden from user space.
+ */
+void dev_pm_qos_hide_latency_limit(struct device *dev)
+{
+ if (dev->power.pq_req) {
+ pm_qos_sysfs_remove(dev);
+ __dev_pm_qos_drop_user_request(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
+#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index adf41be0ea6..95c12f6cb5b 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,6 +5,7 @@
#include <linux/device.h>
#include <linux/string.h>
#include <linux/export.h>
+#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
@@ -217,6 +218,31 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
autosuspend_delay_ms_store);
+static ssize_t pm_qos_latency_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
+}
+
+static ssize_t pm_qos_latency_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ s32 value;
+ int ret;
+
+ if (kstrtos32(buf, 0, &value))
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ ret = dev_pm_qos_update_request(dev->power.pq_req, value);
+ return ret < 0 ? ret : n;
+}
+
+static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
+ pm_qos_latency_show, pm_qos_latency_store);
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
@@ -490,6 +516,17 @@ static struct attribute_group pm_runtime_attr_group = {
.attrs = runtime_attrs,
};
+static struct attribute *pm_qos_attrs[] = {
+#ifdef CONFIG_PM_RUNTIME
+ &dev_attr_pm_qos_resume_latency_us.attr,
+#endif /* CONFIG_PM_RUNTIME */
+ NULL,
+};
+static struct attribute_group pm_qos_attr_group = {
+ .name = power_group_name,
+ .attrs = pm_qos_attrs,
+};
+
int dpm_sysfs_add(struct device *dev)
{
int rc;
@@ -530,6 +567,16 @@ void wakeup_sysfs_remove(struct device *dev)
sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
}
+int pm_qos_sysfs_add(struct device *dev)
+{
+ return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
+}
+
+void pm_qos_sysfs_remove(struct device *dev)
+{
+ sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
+}
+
void rpm_sysfs_remove(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index caf995fb774..2a3e581b8dc 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -53,6 +53,23 @@ static void pm_wakeup_timer_fn(unsigned long data);
static LIST_HEAD(wakeup_sources);
/**
+ * wakeup_source_prepare - Prepare a new wakeup source for initialization.
+ * @ws: Wakeup source to prepare.
+ * @name: Pointer to the name of the new wakeup source.
+ *
+ * Callers must ensure that the @name string won't be freed when @ws is still in
+ * use.
+ */
+void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
+{
+ if (ws) {
+ memset(ws, 0, sizeof(*ws));
+ ws->name = name;
+ }
+}
+EXPORT_SYMBOL_GPL(wakeup_source_prepare);
+
+/**
* wakeup_source_create - Create a struct wakeup_source object.
* @name: Name of the new wakeup source.
*/
@@ -60,37 +77,44 @@ struct wakeup_source *wakeup_source_create(const char *name)
{
struct wakeup_source *ws;
- ws = kzalloc(sizeof(*ws), GFP_KERNEL);
+ ws = kmalloc(sizeof(*ws), GFP_KERNEL);
if (!ws)
return NULL;
- spin_lock_init(&ws->lock);
- if (name)
- ws->name = kstrdup(name, GFP_KERNEL);
-
+ wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
return ws;
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
/**
+ * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
+ * @ws: Wakeup source to prepare for destruction.
+ *
+ * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
+ * be run in parallel with this function for the same wakeup source object.
+ */
+void wakeup_source_drop(struct wakeup_source *ws)
+{
+ if (!ws)
+ return;
+
+ del_timer_sync(&ws->timer);
+ __pm_relax(ws);
+}
+EXPORT_SYMBOL_GPL(wakeup_source_drop);
+
+/**
* wakeup_source_destroy - Destroy a struct wakeup_source object.
* @ws: Wakeup source to destroy.
+ *
+ * Use only for wakeup source objects created with wakeup_source_create().
*/
void wakeup_source_destroy(struct wakeup_source *ws)
{
if (!ws)
return;
- spin_lock_irq(&ws->lock);
- while (ws->active) {
- spin_unlock_irq(&ws->lock);
-
- schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
-
- spin_lock_irq(&ws->lock);
- }
- spin_unlock_irq(&ws->lock);
-
+ wakeup_source_drop(ws);
kfree(ws->name);
kfree(ws);
}
@@ -105,6 +129,7 @@ void wakeup_source_add(struct wakeup_source *ws)
if (WARN_ON(!ws))
return;
+ spin_lock_init(&ws->lock);
setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
ws->active = false;
@@ -152,8 +177,10 @@ EXPORT_SYMBOL_GPL(wakeup_source_register);
*/
void wakeup_source_unregister(struct wakeup_source *ws)
{
- wakeup_source_remove(ws);
- wakeup_source_destroy(ws);
+ if (ws) {
+ wakeup_source_remove(ws);
+ wakeup_source_destroy(ws);
+ }
}
EXPORT_SYMBOL_GPL(wakeup_source_unregister);
@@ -349,7 +376,6 @@ static void wakeup_source_activate(struct wakeup_source *ws)
{
ws->active = true;
ws->active_count++;
- ws->timer_expires = jiffies;
ws->last_time = ktime_get();
/* Increment the counter of events in progress. */
@@ -370,9 +396,14 @@ void __pm_stay_awake(struct wakeup_source *ws)
return;
spin_lock_irqsave(&ws->lock, flags);
+
ws->event_count++;
if (!ws->active)
wakeup_source_activate(ws);
+
+ del_timer(&ws->timer);
+ ws->timer_expires = 0;
+
spin_unlock_irqrestore(&ws->lock, flags);
}
EXPORT_SYMBOL_GPL(__pm_stay_awake);
@@ -438,6 +469,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
ws->max_time = duration;
del_timer(&ws->timer);
+ ws->timer_expires = 0;
/*
* Increment the counter of registered wakeup events and decrement the
@@ -492,11 +524,22 @@ EXPORT_SYMBOL_GPL(pm_relax);
* pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
* @data: Address of the wakeup source object associated with the event source.
*
- * Call __pm_relax() for the wakeup source whose address is stored in @data.
+ * Call wakeup_source_deactivate() for the wakeup source whose address is stored
+ * in @data if it is currently active and its timer has not been canceled and
+ * the expiration time of the timer is not in future.
*/
static void pm_wakeup_timer_fn(unsigned long data)
{
- __pm_relax((struct wakeup_source *)data);
+ struct wakeup_source *ws = (struct wakeup_source *)data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ws->lock, flags);
+
+ if (ws->active && ws->timer_expires
+ && time_after_eq(jiffies, ws->timer_expires))
+ wakeup_source_deactivate(ws);
+
+ spin_unlock_irqrestore(&ws->lock, flags);
}
/**
@@ -534,7 +577,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
if (!expires)
expires = 1;
- if (time_after(expires, ws->timer_expires)) {
+ if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
mod_timer(&ws->timer, expires);
ws->timer_expires = expires;
}
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 1a02b7537c8..fcafc5b2e65 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -22,6 +22,7 @@ struct regcache_ops;
struct regmap_format {
size_t buf_size;
size_t reg_bytes;
+ size_t pad_bytes;
size_t val_bytes;
void (*format_write)(struct regmap *map,
unsigned int reg, unsigned int val);
@@ -65,16 +66,19 @@ struct regmap {
unsigned int num_reg_defaults_raw;
/* if set, only the cache is modified not the HW */
- unsigned int cache_only:1;
+ u32 cache_only;
/* if set, only the HW is modified not the cache */
- unsigned int cache_bypass:1;
+ u32 cache_bypass;
/* if set, remember to free reg_defaults_raw */
- unsigned int cache_free:1;
+ bool cache_free;
struct reg_default *reg_defaults;
const void *reg_defaults_raw;
void *cache;
- bool cache_dirty;
+ u32 cache_dirty;
+
+ struct reg_default *patch;
+ int patch_regs;
};
struct regcache_ops {
@@ -84,7 +88,7 @@ struct regcache_ops {
int (*exit)(struct regmap *map);
int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
- int (*sync)(struct regmap *map);
+ int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
};
bool regmap_writeable(struct regmap *map, unsigned int reg);
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index b7d16143ede..77dc5327228 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -331,7 +331,8 @@ out:
return ret;
}
-static int regcache_lzo_sync(struct regmap *map)
+static int regcache_lzo_sync(struct regmap *map, unsigned int min,
+ unsigned int max)
{
struct regcache_lzo_ctx **lzo_blocks;
unsigned int val;
@@ -339,10 +340,21 @@ static int regcache_lzo_sync(struct regmap *map)
int ret;
lzo_blocks = map->cache;
- for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) {
+ i = min;
+ for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
+ lzo_blocks[0]->sync_bmp_nbits) {
+ if (i > max)
+ continue;
+
ret = regcache_read(map, i, &val);
if (ret)
return ret;
+
+ /* Is this the hardware default? If so skip. */
+ ret = regcache_lookup_reg(map, i);
+ if (ret > 0 && val == map->reg_defaults[ret].def)
+ continue;
+
map->cache_bypass = 1;
ret = _regmap_write(map, i, val);
map->cache_bypass = 0;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 32620c4f168..8d51916a283 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -357,7 +357,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
return 0;
}
-static int regcache_rbtree_sync(struct regmap *map)
+static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
+ unsigned int max)
{
struct regcache_rbtree_ctx *rbtree_ctx;
struct rb_node *node;
@@ -365,19 +366,37 @@ static int regcache_rbtree_sync(struct regmap *map)
unsigned int regtmp;
unsigned int val;
int ret;
- int i;
+ int i, base, end;
rbtree_ctx = map->cache;
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
- for (i = 0; i < rbnode->blklen; i++) {
+
+ if (rbnode->base_reg < min)
+ continue;
+ if (rbnode->base_reg > max)
+ break;
+ if (rbnode->base_reg + rbnode->blklen < min)
+ continue;
+
+ if (min > rbnode->base_reg)
+ base = min - rbnode->base_reg;
+ else
+ base = 0;
+
+ if (max < rbnode->base_reg + rbnode->blklen)
+ end = rbnode->base_reg + rbnode->blklen - max;
+ else
+ end = rbnode->blklen;
+
+ for (i = base; i < end; i++) {
regtmp = rbnode->base_reg + i;
val = regcache_rbtree_get_register(rbnode, i,
map->cache_word_size);
/* Is this the hardware default? If so skip. */
ret = regcache_lookup_reg(map, i);
- if (ret > 0 && val == map->reg_defaults[ret].def)
+ if (ret >= 0 && val == map->reg_defaults[ret].def)
continue;
map->cache_bypass = 1;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d1daa5e9fad..938cb1d2ea2 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -35,12 +35,17 @@ static int regcache_hw_init(struct regmap *map)
return -EINVAL;
if (!map->reg_defaults_raw) {
+ u32 cache_bypass = map->cache_bypass;
dev_warn(map->dev, "No cache defaults, reading back from HW\n");
+
+ /* Bypass the cache access till data read from HW*/
+ map->cache_bypass = 1;
tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
if (!tmp_buf)
return -EINVAL;
ret = regmap_bulk_read(map, 0, tmp_buf,
map->num_reg_defaults_raw);
+ map->cache_bypass = cache_bypass;
if (ret < 0) {
kfree(tmp_buf);
return ret;
@@ -211,7 +216,6 @@ int regcache_read(struct regmap *map,
return -EINVAL;
}
-EXPORT_SYMBOL_GPL(regcache_read);
/**
* regcache_write: Set the value of a given register in the cache.
@@ -238,7 +242,6 @@ int regcache_write(struct regmap *map,
return 0;
}
-EXPORT_SYMBOL_GPL(regcache_write);
/**
* regcache_sync: Sync the register cache with the hardware.
@@ -254,12 +257,11 @@ EXPORT_SYMBOL_GPL(regcache_write);
int regcache_sync(struct regmap *map)
{
int ret = 0;
- unsigned int val;
unsigned int i;
const char *name;
unsigned int bypass;
- BUG_ON(!map->cache_ops);
+ BUG_ON(!map->cache_ops || !map->cache_ops->sync);
mutex_lock(&map->lock);
/* Remember the initial bypass state */
@@ -268,26 +270,27 @@ int regcache_sync(struct regmap *map)
map->cache_ops->name);
name = map->cache_ops->name;
trace_regcache_sync(map->dev, name, "start");
+
if (!map->cache_dirty)
goto out;
- if (map->cache_ops->sync) {
- ret = map->cache_ops->sync(map);
- } else {
- for (i = 0; i < map->num_reg_defaults; i++) {
- ret = regcache_read(map, i, &val);
- if (ret < 0)
- goto out;
- map->cache_bypass = 1;
- ret = _regmap_write(map, i, val);
- map->cache_bypass = 0;
- if (ret < 0)
- goto out;
- dev_dbg(map->dev, "Synced register %#x, value %#x\n",
- map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- }
+ /* Apply any patch first */
+ map->cache_bypass = 1;
+ for (i = 0; i < map->patch_regs; i++) {
+ ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to write %x = %x: %d\n",
+ map->patch[i].reg, map->patch[i].def, ret);
+ goto out;
+ }
}
+ map->cache_bypass = 0;
+
+ ret = map->cache_ops->sync(map, 0, map->max_register);
+
+ if (ret == 0)
+ map->cache_dirty = false;
+
out:
trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
@@ -299,6 +302,51 @@ out:
EXPORT_SYMBOL_GPL(regcache_sync);
/**
+ * regcache_sync_region: Sync part of the register cache with the hardware.
+ *
+ * @map: map to sync.
+ * @min: first register to sync
+ * @max: last register to sync
+ *
+ * Write all non-default register values in the specified region to
+ * the hardware.
+ *
+ * Return a negative value on failure, 0 on success.
+ */
+int regcache_sync_region(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ int ret = 0;
+ const char *name;
+ unsigned int bypass;
+
+ BUG_ON(!map->cache_ops || !map->cache_ops->sync);
+
+ mutex_lock(&map->lock);
+
+ /* Remember the initial bypass state */
+ bypass = map->cache_bypass;
+
+ name = map->cache_ops->name;
+ dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
+
+ trace_regcache_sync(map->dev, name, "start region");
+
+ if (!map->cache_dirty)
+ goto out;
+
+ ret = map->cache_ops->sync(map, min, max);
+
+out:
+ trace_regcache_sync(map->dev, name, "stop region");
+ /* Restore the bypass state */
+ map->cache_bypass = bypass;
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+
+/**
* regcache_cache_only: Put a register map into cache only mode
*
* @map: map to configure
@@ -315,6 +363,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
mutex_lock(&map->lock);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
+ trace_regmap_cache_only(map->dev, enable);
mutex_unlock(&map->lock);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -352,6 +401,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
mutex_lock(&map->lock);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
+ trace_regmap_cache_bypass(map->dev, enable);
mutex_unlock(&map->lock);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
@@ -374,10 +424,16 @@ bool regcache_set_val(void *base, unsigned int idx,
cache[idx] = val;
break;
}
+ case 4: {
+ u32 *cache = base;
+ if (cache[idx] == val)
+ return true;
+ cache[idx] = val;
+ break;
+ }
default:
BUG();
}
- /* unreachable */
return false;
}
@@ -396,6 +452,10 @@ unsigned int regcache_get_val(const void *base, unsigned int idx,
const u16 *cache = base;
return cache[idx];
}
+ case 4: {
+ const u32 *cache = base;
+ return cache[idx];
+ }
default:
BUG();
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 6f397476e27..372f81a2120 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -11,7 +11,6 @@
*/
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
@@ -33,6 +32,35 @@ static int regmap_open_file(struct inode *inode, struct file *file)
return 0;
}
+static ssize_t regmap_name_read_file(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct regmap *map = file->private_data;
+ int ret;
+ char *buf;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name);
+ if (ret < 0) {
+ kfree(buf);
+ return ret;
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations regmap_name_fops = {
+ .open = regmap_open_file,
+ .read = regmap_name_read_file,
+ .llseek = default_llseek,
+};
+
static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -103,9 +131,51 @@ out:
return ret;
}
+#undef REGMAP_ALLOW_WRITE_DEBUGFS
+#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
+/*
+ * This can be dangerous especially when we have clients such as
+ * PMICs, therefore don't provide any real compile time configuration option
+ * for this feature, people who want to use this will need to modify
+ * the source code directly.
+ */
+static ssize_t regmap_map_write_file(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ size_t buf_size;
+ char *start = buf;
+ unsigned long reg, value;
+ struct regmap *map = file->private_data;
+
+ buf_size = min(count, (sizeof(buf)-1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ buf[buf_size] = 0;
+
+ while (*start == ' ')
+ start++;
+ reg = simple_strtoul(start, &start, 16);
+ while (*start == ' ')
+ start++;
+ if (strict_strtoul(start, 16, &value))
+ return -EINVAL;
+
+ /* Userspace has been fiddling around behind the kernel's back */
+ add_taint(TAINT_USER);
+
+ regmap_write(map, reg, value);
+ return buf_size;
+}
+#else
+#define regmap_map_write_file NULL
+#endif
+
static const struct file_operations regmap_map_fops = {
.open = regmap_open_file,
.read = regmap_map_read_file,
+ .write = regmap_map_write_file,
.llseek = default_llseek,
};
@@ -186,12 +256,24 @@ void regmap_debugfs_init(struct regmap *map)
return;
}
+ debugfs_create_file("name", 0400, map->debugfs,
+ map, &regmap_name_fops);
+
if (map->max_register) {
debugfs_create_file("registers", 0400, map->debugfs,
map, &regmap_map_fops);
debugfs_create_file("access", 0400, map->debugfs,
map, &regmap_access_fops);
}
+
+ if (map->cache_type) {
+ debugfs_create_bool("cache_only", 0400, map->debugfs,
+ &map->cache_only);
+ debugfs_create_bool("cache_dirty", 0400, map->debugfs,
+ &map->cache_dirty);
+ debugfs_create_bool("cache_bypass", 0400, map->debugfs,
+ &map->cache_bypass);
+ }
}
void regmap_debugfs_exit(struct regmap *map)
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 38621ec87c0..9a3a8c56438 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -111,4 +111,21 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
}
EXPORT_SYMBOL_GPL(regmap_init_i2c);
+/**
+ * devm_regmap_init_i2c(): Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init(&i2c->dev, &regmap_i2c, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_i2c);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 2560658de34..7c0c35a39c3 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -70,4 +70,21 @@ struct regmap *regmap_init_spi(struct spi_device *spi,
}
EXPORT_SYMBOL_GPL(regmap_init_spi);
+/**
+ * devm_regmap_init_spi(): Initialise register map
+ *
+ * @spi: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The map will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_spi(struct spi_device *spi,
+ const struct regmap_config *config)
+{
+ return devm_regmap_init(&spi->dev, &regmap_spi, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_spi);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 65558034318..7a3f535e481 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -10,8 +10,9 @@
* published by the Free Software Foundation.
*/
+#include <linux/device.h>
#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/err.h>
@@ -36,6 +37,9 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
if (map->max_register && reg > map->max_register)
return false;
+ if (map->format.format_write)
+ return false;
+
if (map->readable_reg)
return map->readable_reg(map->dev, reg);
@@ -44,7 +48,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
bool regmap_volatile(struct regmap *map, unsigned int reg)
{
- if (map->max_register && reg > map->max_register)
+ if (!regmap_readable(map, reg))
return false;
if (map->volatile_reg)
@@ -55,7 +59,7 @@ bool regmap_volatile(struct regmap *map, unsigned int reg)
bool regmap_precious(struct regmap *map, unsigned int reg)
{
- if (map->max_register && reg > map->max_register)
+ if (!regmap_readable(map, reg))
return false;
if (map->precious_reg)
@@ -76,6 +80,14 @@ static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
return true;
}
+static void regmap_format_2_6_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ *out = (reg << 6) | val;
+}
+
static void regmap_format_4_12_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
@@ -114,6 +126,13 @@ static void regmap_format_16(void *buf, unsigned int val)
b[0] = cpu_to_be16(val);
}
+static void regmap_format_32(void *buf, unsigned int val)
+{
+ __be32 *b = buf;
+
+ b[0] = cpu_to_be32(val);
+}
+
static unsigned int regmap_parse_8(void *buf)
{
u8 *b = buf;
@@ -130,6 +149,15 @@ static unsigned int regmap_parse_16(void *buf)
return b[0];
}
+static unsigned int regmap_parse_32(void *buf)
+{
+ __be32 *b = buf;
+
+ b[0] = be32_to_cpu(b[0]);
+
+ return b[0];
+}
+
/**
* regmap_init(): Initialise register map
*
@@ -159,8 +187,10 @@ struct regmap *regmap_init(struct device *dev,
mutex_init(&map->lock);
map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
- map->format.reg_bytes = config->reg_bits / 8;
- map->format.val_bytes = config->val_bits / 8;
+ map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
+ map->format.pad_bytes = config->pad_bits / 8;
+ map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
+ map->format.buf_size += map->format.pad_bytes;
map->dev = dev;
map->bus = bus;
map->max_register = config->max_register;
@@ -178,6 +208,16 @@ struct regmap *regmap_init(struct device *dev,
}
switch (config->reg_bits) {
+ case 2:
+ switch (config->val_bits) {
+ case 6:
+ map->format.format_write = regmap_format_2_6_write;
+ break;
+ default:
+ goto err_map;
+ }
+ break;
+
case 4:
switch (config->val_bits) {
case 12:
@@ -216,6 +256,10 @@ struct regmap *regmap_init(struct device *dev,
map->format.format_reg = regmap_format_16;
break;
+ case 32:
+ map->format.format_reg = regmap_format_32;
+ break;
+
default:
goto err_map;
}
@@ -229,13 +273,17 @@ struct regmap *regmap_init(struct device *dev,
map->format.format_val = regmap_format_16;
map->format.parse_val = regmap_parse_16;
break;
+ case 32:
+ map->format.format_val = regmap_format_32;
+ map->format.parse_val = regmap_parse_32;
+ break;
}
if (!map->format.format_write &&
!(map->format.format_reg && map->format.format_val))
goto err_map;
- map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
+ map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
if (map->work_buf == NULL) {
ret = -ENOMEM;
goto err_map;
@@ -258,6 +306,45 @@ err:
}
EXPORT_SYMBOL_GPL(regmap_init);
+static void devm_regmap_release(struct device *dev, void *res)
+{
+ regmap_exit(*(struct regmap **)res);
+}
+
+/**
+ * devm_regmap_init(): Initialise managed register map
+ *
+ * @dev: Device that will be interacted with
+ * @bus: Bus-specific callbacks to use with device
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. This function should generally not be called
+ * directly, it should be called by bus-specific init functions. The
+ * map will be automatically freed by the device management code.
+ */
+struct regmap *devm_regmap_init(struct device *dev,
+ const struct regmap_bus *bus,
+ const struct regmap_config *config)
+{
+ struct regmap **ptr, *regmap;
+
+ ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ regmap = regmap_init(dev, bus, config);
+ if (!IS_ERR(regmap)) {
+ *ptr = regmap;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return regmap;
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init);
+
/**
* regmap_reinit_cache(): Reinitialise the current register cache
*
@@ -276,6 +363,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
mutex_lock(&map->lock);
regcache_exit(map);
+ regmap_debugfs_exit(map);
map->max_register = config->max_register;
map->writeable_reg = config->writeable_reg;
@@ -284,6 +372,8 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
+ regmap_debugfs_init(map);
+
map->cache_bypass = false;
map->cache_only = false;
@@ -321,6 +411,26 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
if (!map->writeable_reg(map->dev, reg + i))
return -EINVAL;
+ if (!map->cache_bypass && map->format.parse_val) {
+ unsigned int ival;
+ int val_bytes = map->format.val_bytes;
+ for (i = 0; i < val_len / val_bytes; i++) {
+ memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
+ ival = map->format.parse_val(map->work_buf);
+ ret = regcache_write(map, reg + i, ival);
+ if (ret) {
+ dev_err(map->dev,
+ "Error in caching of register: %u ret: %d\n",
+ reg + i, ret);
+ return ret;
+ }
+ }
+ if (map->cache_only) {
+ map->cache_dirty = true;
+ return 0;
+ }
+ }
+
map->format.format_reg(map->work_buf, reg);
u8[0] |= map->write_flag_mask;
@@ -332,23 +442,28 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
* send the work_buf directly, otherwise try to do a gather
* write.
*/
- if (val == map->work_buf + map->format.reg_bytes)
+ if (val == (map->work_buf + map->format.pad_bytes +
+ map->format.reg_bytes))
ret = map->bus->write(map->dev, map->work_buf,
- map->format.reg_bytes + val_len);
+ map->format.reg_bytes +
+ map->format.pad_bytes +
+ val_len);
else if (map->bus->gather_write)
ret = map->bus->gather_write(map->dev, map->work_buf,
- map->format.reg_bytes,
+ map->format.reg_bytes +
+ map->format.pad_bytes,
val, val_len);
/* If that didn't work fall back on linearising by hand. */
if (ret == -ENOTSUPP) {
- len = map->format.reg_bytes + val_len;
- buf = kmalloc(len, GFP_KERNEL);
+ len = map->format.reg_bytes + map->format.pad_bytes + val_len;
+ buf = kzalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
memcpy(buf, map->work_buf, map->format.reg_bytes);
- memcpy(buf + map->format.reg_bytes, val, val_len);
+ memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
+ val, val_len);
ret = map->bus->write(map->dev, buf, len);
kfree(buf);
@@ -366,7 +481,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
int ret;
BUG_ON(!map->format.format_write && !map->format.format_val);
- if (!map->cache_bypass) {
+ if (!map->cache_bypass && map->format.format_write) {
ret = regcache_write(map, reg, val);
if (ret != 0)
return ret;
@@ -390,10 +505,12 @@ int _regmap_write(struct regmap *map, unsigned int reg,
return ret;
} else {
- map->format.format_val(map->work_buf + map->format.reg_bytes,
- val);
+ map->format.format_val(map->work_buf + map->format.reg_bytes
+ + map->format.pad_bytes, val);
return _regmap_raw_write(map, reg,
- map->work_buf + map->format.reg_bytes,
+ map->work_buf +
+ map->format.reg_bytes +
+ map->format.pad_bytes,
map->format.val_bytes);
}
}
@@ -441,12 +558,8 @@ EXPORT_SYMBOL_GPL(regmap_write);
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
- size_t val_count = val_len / map->format.val_bytes;
int ret;
- WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
- map->cache_type != REGCACHE_NONE);
-
mutex_lock(&map->lock);
ret = _regmap_raw_write(map, reg, val, val_len);
@@ -457,6 +570,56 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_raw_write);
+/*
+ * regmap_bulk_write(): Write multiple registers to the device
+ *
+ * @map: Register map to write to
+ * @reg: First register to be write from
+ * @val: Block of data to be written, in native register size for device
+ * @val_count: Number of registers to write
+ *
+ * This function is intended to be used for writing a large block of
+ * data to be device either in single transfer or multiple transfer.
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
+ size_t val_count)
+{
+ int ret = 0, i;
+ size_t val_bytes = map->format.val_bytes;
+ void *wval;
+
+ if (!map->format.parse_val)
+ return -EINVAL;
+
+ mutex_lock(&map->lock);
+
+ /* No formatting is require if val_byte is 1 */
+ if (val_bytes == 1) {
+ wval = (void *)val;
+ } else {
+ wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
+ if (!wval) {
+ ret = -ENOMEM;
+ dev_err(map->dev, "Error in memory allocation\n");
+ goto out;
+ }
+ for (i = 0; i < val_count * val_bytes; i += val_bytes)
+ map->format.parse_val(wval + i);
+ }
+ ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
+
+ if (val_bytes != 1)
+ kfree(wval);
+
+out:
+ mutex_unlock(&map->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_bulk_write);
+
static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
unsigned int val_len)
{
@@ -476,7 +639,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
trace_regmap_hw_read_start(map->dev, reg,
val_len / map->format.val_bytes);
- ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes,
+ ret = map->bus->read(map->dev, map->work_buf,
+ map->format.reg_bytes + map->format.pad_bytes,
val, val_len);
trace_regmap_hw_read_done(map->dev, reg,
@@ -549,16 +713,32 @@ EXPORT_SYMBOL_GPL(regmap_read);
int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
size_t val_len)
{
- size_t val_count = val_len / map->format.val_bytes;
- int ret;
-
- WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
- map->cache_type != REGCACHE_NONE);
+ size_t val_bytes = map->format.val_bytes;
+ size_t val_count = val_len / val_bytes;
+ unsigned int v;
+ int ret, i;
mutex_lock(&map->lock);
- ret = _regmap_raw_read(map, reg, val, val_len);
+ if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
+ map->cache_type == REGCACHE_NONE) {
+ /* Physical block read if there's no cache involved */
+ ret = _regmap_raw_read(map, reg, val, val_len);
+ } else {
+ /* Otherwise go word by word for the cache; should be low
+ * cost as we expect to hit the cache.
+ */
+ for (i = 0; i < val_count; i++) {
+ ret = _regmap_read(map, reg + i, &v);
+ if (ret != 0)
+ goto out;
+
+ map->format.format_val(val + (i * val_bytes), v);
+ }
+ }
+
+ out:
mutex_unlock(&map->lock);
return ret;
@@ -672,6 +852,79 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
}
EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+/**
+ * regmap_register_patch: Register and apply register updates to be applied
+ * on device initialistion
+ *
+ * @map: Register map to apply updates to.
+ * @regs: Values to update.
+ * @num_regs: Number of entries in regs.
+ *
+ * Register a set of register updates to be applied to the device
+ * whenever the device registers are synchronised with the cache and
+ * apply them immediately. Typically this is used to apply
+ * corrections to be applied to the device defaults on startup, such
+ * as the updates some vendors provide to undocumented registers.
+ */
+int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
+ int num_regs)
+{
+ int i, ret;
+ bool bypass;
+
+ /* If needed the implementation can be extended to support this */
+ if (map->patch)
+ return -EBUSY;
+
+ mutex_lock(&map->lock);
+
+ bypass = map->cache_bypass;
+
+ map->cache_bypass = true;
+
+ /* Write out first; it's useful to apply even if we fail later. */
+ for (i = 0; i < num_regs; i++) {
+ ret = _regmap_write(map, regs[i].reg, regs[i].def);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to write %x = %x: %d\n",
+ regs[i].reg, regs[i].def, ret);
+ goto out;
+ }
+ }
+
+ map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
+ if (map->patch != NULL) {
+ memcpy(map->patch, regs,
+ num_regs * sizeof(struct reg_default));
+ map->patch_regs = num_regs;
+ } else {
+ ret = -ENOMEM;
+ }
+
+out:
+ map->cache_bypass = bypass;
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_register_patch);
+
+/*
+ * regmap_get_val_bytes(): Report the size of a register value
+ *
+ * Report the size of a register value, mainly intended to for use by
+ * generic infrastructure built on top of regmap.
+ */
+int regmap_get_val_bytes(struct regmap *map)
+{
+ if (map->format.format_write)
+ return -EINVAL;
+
+ return map->format.val_bytes;
+}
+EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
+
static int __init regmap_initcall(void)
{
regmap_debugfs_initcall();
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
new file mode 100644
index 00000000000..05f150382da
--- /dev/null
+++ b/drivers/base/soc.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2011
+ *
+ * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
+#include <linux/err.h>
+
+static DEFINE_IDR(soc_ida);
+static DEFINE_SPINLOCK(soc_lock);
+
+static ssize_t soc_info_get(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+struct soc_device {
+ struct device dev;
+ struct soc_device_attribute *attr;
+ int soc_dev_num;
+};
+
+static struct bus_type soc_bus_type = {
+ .name = "soc",
+};
+
+static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL);
+static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL);
+static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL);
+static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL);
+
+struct device *soc_device_to_device(struct soc_device *soc_dev)
+{
+ return &soc_dev->dev;
+}
+
+static mode_t soc_attribute_mode(struct kobject *kobj,
+ struct attribute *attr,
+ int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+ if ((attr == &dev_attr_machine.attr)
+ && (soc_dev->attr->machine != NULL))
+ return attr->mode;
+ if ((attr == &dev_attr_family.attr)
+ && (soc_dev->attr->family != NULL))
+ return attr->mode;
+ if ((attr == &dev_attr_revision.attr)
+ && (soc_dev->attr->revision != NULL))
+ return attr->mode;
+ if ((attr == &dev_attr_soc_id.attr)
+ && (soc_dev->attr->soc_id != NULL))
+ return attr->mode;
+
+ /* Unknown or unfilled attribute. */
+ return 0;
+}
+
+static ssize_t soc_info_get(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+ if (attr == &dev_attr_machine)
+ return sprintf(buf, "%s\n", soc_dev->attr->machine);
+ if (attr == &dev_attr_family)
+ return sprintf(buf, "%s\n", soc_dev->attr->family);
+ if (attr == &dev_attr_revision)
+ return sprintf(buf, "%s\n", soc_dev->attr->revision);
+ if (attr == &dev_attr_soc_id)
+ return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
+
+ return -EINVAL;
+
+}
+
+static struct attribute *soc_attr[] = {
+ &dev_attr_machine.attr,
+ &dev_attr_family.attr,
+ &dev_attr_soc_id.attr,
+ &dev_attr_revision.attr,
+ NULL,
+};
+
+static const struct attribute_group soc_attr_group = {
+ .attrs = soc_attr,
+ .is_visible = soc_attribute_mode,
+};
+
+static const struct attribute_group *soc_attr_groups[] = {
+ &soc_attr_group,
+ NULL,
+};
+
+static void soc_release(struct device *dev)
+{
+ struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
+
+ kfree(soc_dev);
+}
+
+struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
+{
+ struct soc_device *soc_dev;
+ int ret;
+
+ soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
+ if (!soc_dev) {
+ ret = -ENOMEM;
+ goto out1;
+ }
+
+ /* Fetch a unique (reclaimable) SOC ID. */
+ do {
+ if (!ida_pre_get(&soc_ida, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out2;
+ }
+
+ spin_lock(&soc_lock);
+ ret = ida_get_new(&soc_ida, &soc_dev->soc_dev_num);
+ spin_unlock(&soc_lock);
+
+ } while (ret == -EAGAIN);
+
+ if (ret)
+ goto out2;
+
+ soc_dev->attr = soc_dev_attr;
+ soc_dev->dev.bus = &soc_bus_type;
+ soc_dev->dev.groups = soc_attr_groups;
+ soc_dev->dev.release = soc_release;
+
+ dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
+
+ ret = device_register(&soc_dev->dev);
+ if (ret)
+ goto out3;
+
+ return soc_dev;
+
+out3:
+ ida_remove(&soc_ida, soc_dev->soc_dev_num);
+out2:
+ kfree(soc_dev);
+out1:
+ return ERR_PTR(ret);
+}
+
+/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
+void soc_device_unregister(struct soc_device *soc_dev)
+{
+ ida_remove(&soc_ida, soc_dev->soc_dev_num);
+
+ device_unregister(&soc_dev->dev);
+}
+
+static int __init soc_bus_register(void)
+{
+ spin_lock_init(&soc_lock);
+
+ return bus_register(&soc_bus_type);
+}
+core_initcall(soc_bus_register);
+
+static void __exit soc_bus_unregister(void)
+{
+ ida_destroy(&soc_ida);
+
+ bus_unregister(&soc_bus_type);
+}
+module_exit(soc_bus_unregister);