summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/devtmpfs.c4
-rw-r--r--drivers/base/dma-buf.c3
-rw-r--r--drivers/base/dma-coherent.c5
-rw-r--r--drivers/base/dma-contiguous.c23
-rw-r--r--drivers/base/firmware_class.c208
-rw-r--r--drivers/base/memory.c40
-rw-r--r--drivers/base/platform.c9
-rw-r--r--drivers/base/power/domain.c249
-rw-r--r--drivers/base/power/main.c66
-rw-r--r--drivers/base/power/opp.c47
-rw-r--r--drivers/base/power/power.h36
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--drivers/base/power/wakeup.c46
-rw-r--r--drivers/base/regmap/Kconfig2
15 files changed, 587 insertions, 156 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 08b4c520938..b34b5cda5ae 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -236,7 +236,7 @@ config CMA_SIZE_PERCENTAGE
choice
prompt "Selected region size"
- default CMA_SIZE_SEL_ABSOLUTE
+ default CMA_SIZE_SEL_MBYTES
config CMA_SIZE_SEL_MBYTES
bool "Use mega bytes value only"
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index deb4a456cf8..147d1a4dd26 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -309,8 +309,8 @@ static int handle_remove(const char *nodename, struct device *dev)
* before unlinking this node, reset permissions
* of possible references like hardlinks
*/
- newattrs.ia_uid = 0;
- newattrs.ia_gid = 0;
+ newattrs.ia_uid = GLOBAL_ROOT_UID;
+ newattrs.ia_gid = GLOBAL_ROOT_GID;
newattrs.ia_mode = stat.mode & ~0777;
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index c30f3e1d0ef..460e22dee36 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -460,8 +460,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
if (vma->vm_file)
fput(vma->vm_file);
- vma->vm_file = dmabuf->file;
- get_file(vma->vm_file);
+ vma->vm_file = get_file(dmabuf->file);
vma->vm_pgoff = pgoff;
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 560a7173f81..bc256b64102 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -191,9 +191,8 @@ EXPORT_SYMBOL(dma_release_from_coherent);
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
- * Returns 1 if we correctly mapped the memory, or 0 if
- * dma_release_coherent() should proceed with mapping memory from
- * generic pools.
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
*/
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 34d94c762a1..612afcc5a93 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -27,15 +27,12 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/page-isolation.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/mm_types.h>
#include <linux/dma-contiguous.h>
-#ifndef SZ_1M
-#define SZ_1M (1 << 20)
-#endif
-
struct cma {
unsigned long base_pfn;
unsigned long count;
@@ -315,6 +312,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
{
unsigned long mask, pfn, pageno, start = 0;
struct cma *cma = dev_get_cma_area(dev);
+ struct page *page = NULL;
int ret;
if (!cma || !cma->count)
@@ -336,18 +334,17 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
for (;;) {
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
start, count, mask);
- if (pageno >= cma->count) {
- ret = -ENOMEM;
- goto error;
- }
+ if (pageno >= cma->count)
+ break;
pfn = cma->base_pfn + pageno;
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
if (ret == 0) {
bitmap_set(cma->bitmap, pageno, count);
+ page = pfn_to_page(pfn);
break;
} else if (ret != -EBUSY) {
- goto error;
+ break;
}
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
@@ -356,12 +353,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
}
mutex_unlock(&cma_mutex);
-
- pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
- return pfn_to_page(pfn);
-error:
- mutex_unlock(&cma_mutex);
- return NULL;
+ pr_debug("%s(): returned %p\n", __func__, page);
+ return page;
}
/**
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 6e210802c37..8945f4e489e 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -21,12 +21,15 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/file.h>
#include <linux/list.h>
#include <linux/async.h>
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
+#include <generated/utsrelease.h>
+
#include "base.h"
MODULE_AUTHOR("Manuel Estrada Sainz");
@@ -85,6 +88,11 @@ enum {
FW_STATUS_ABORT,
};
+enum fw_buf_fmt {
+ VMALLOC_BUF, /* used in direct loading */
+ PAGE_BUF, /* used in loading via userspace */
+};
+
static int loading_timeout = 60; /* In seconds */
static inline long firmware_loading_timeout(void)
@@ -108,8 +116,6 @@ struct firmware_cache {
spinlock_t name_lock;
struct list_head fw_names;
- wait_queue_head_t wait_queue;
- int cnt;
struct delayed_work work;
struct notifier_block pm_notify;
@@ -122,6 +128,7 @@ struct firmware_buf {
struct completion completion;
struct firmware_cache *fwc;
unsigned long status;
+ enum fw_buf_fmt fmt;
void *data;
size_t size;
struct page **pages;
@@ -175,6 +182,7 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
strcpy(buf->fw_id, fw_name);
buf->fwc = fwc;
init_completion(&buf->completion);
+ buf->fmt = VMALLOC_BUF;
pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
@@ -242,10 +250,14 @@ static void __fw_free_buf(struct kref *ref)
list_del(&buf->list);
spin_unlock(&fwc->lock);
- vunmap(buf->data);
- for (i = 0; i < buf->nr_pages; i++)
- __free_page(buf->pages[i]);
- kfree(buf->pages);
+
+ if (buf->fmt == PAGE_BUF) {
+ vunmap(buf->data);
+ for (i = 0; i < buf->nr_pages; i++)
+ __free_page(buf->pages[i]);
+ kfree(buf->pages);
+ } else
+ vfree(buf->data);
kfree(buf);
}
@@ -254,6 +266,69 @@ static void fw_free_buf(struct firmware_buf *buf)
kref_put(&buf->ref, __fw_free_buf);
}
+/* direct firmware loading support */
+static const char *fw_path[] = {
+ "/lib/firmware/updates/" UTS_RELEASE,
+ "/lib/firmware/updates",
+ "/lib/firmware/" UTS_RELEASE,
+ "/lib/firmware"
+};
+
+/* Don't inline this: 'struct kstat' is biggish */
+static noinline long fw_file_size(struct file *file)
+{
+ struct kstat st;
+ if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
+ return -1;
+ if (!S_ISREG(st.mode))
+ return -1;
+ if (st.size != (long)st.size)
+ return -1;
+ return st.size;
+}
+
+static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
+{
+ long size;
+ char *buf;
+
+ size = fw_file_size(file);
+ if (size < 0)
+ return false;
+ buf = vmalloc(size);
+ if (!buf)
+ return false;
+ if (kernel_read(file, 0, buf, size) != size) {
+ vfree(buf);
+ return false;
+ }
+ fw_buf->data = buf;
+ fw_buf->size = size;
+ return true;
+}
+
+static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
+{
+ int i;
+ bool success = false;
+ char *path = __getname();
+
+ for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
+ struct file *file;
+ snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
+
+ file = filp_open(path, O_RDONLY, 0);
+ if (IS_ERR(file))
+ continue;
+ success = fw_read_file_contents(file, buf);
+ fput(file);
+ if (success)
+ break;
+ }
+ __putname(path);
+ return success;
+}
+
static struct firmware_priv *to_firmware_priv(struct device *dev)
{
return container_of(dev, struct firmware_priv, dev);
@@ -346,7 +421,11 @@ static ssize_t firmware_loading_show(struct device *dev,
/* firmware holds the ownership of pages */
static void firmware_free_data(const struct firmware *fw)
{
- WARN_ON(!fw->priv);
+ /* Loaded directly? */
+ if (!fw->priv) {
+ vfree(fw->data);
+ return;
+ }
fw_free_buf(fw->priv);
}
@@ -354,6 +433,21 @@ static void firmware_free_data(const struct firmware *fw)
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
#endif
+
+/* one pages buffer should be mapped/unmapped only once */
+static int fw_map_pages_buf(struct firmware_buf *buf)
+{
+ if (buf->fmt != PAGE_BUF)
+ return 0;
+
+ if (buf->data)
+ vunmap(buf->data);
+ buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
+ if (!buf->data)
+ return -ENOMEM;
+ return 0;
+}
+
/**
* firmware_loading_store - set value in the 'loading' control file
* @dev: device pointer
@@ -398,6 +492,14 @@ static ssize_t firmware_loading_store(struct device *dev,
if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
set_bit(FW_STATUS_DONE, &fw_buf->status);
clear_bit(FW_STATUS_LOADING, &fw_buf->status);
+
+ /*
+ * Several loading requests may be pending on
+ * one same firmware buf, so let all requests
+ * see the mapped 'buf->data' once the loading
+ * is completed.
+ * */
+ fw_map_pages_buf(fw_buf);
complete_all(&fw_buf->completion);
break;
}
@@ -601,15 +703,6 @@ exit:
return fw_priv;
}
-/* one pages buffer is mapped/unmapped only once */
-static int fw_map_pages_buf(struct firmware_buf *buf)
-{
- buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
- if (!buf->data)
- return -ENOMEM;
- return 0;
-}
-
/* store the pages buffer info firmware from buf */
static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
{
@@ -758,6 +851,21 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
struct device *f_dev = &fw_priv->dev;
struct firmware_buf *buf = fw_priv->buf;
struct firmware_cache *fwc = &fw_cache;
+ int direct_load = 0;
+
+ /* try direct loading from fs first */
+ if (fw_get_filesystem_firmware(buf)) {
+ dev_dbg(f_dev->parent, "firmware: direct-loading"
+ " firmware %s\n", buf->fw_id);
+
+ set_bit(FW_STATUS_DONE, &buf->status);
+ complete_all(&buf->completion);
+ direct_load = 1;
+ goto handle_fw;
+ }
+
+ /* fall back on userspace loading */
+ buf->fmt = PAGE_BUF;
dev_set_uevent_suppress(f_dev, true);
@@ -796,6 +904,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
del_timer_sync(&fw_priv->timeout);
+handle_fw:
mutex_lock(&fw_lock);
if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
retval = -ENOENT;
@@ -810,9 +919,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
if (!retval && f_dev->parent)
fw_add_devm_name(f_dev->parent, buf->fw_id);
- if (!retval)
- retval = fw_map_pages_buf(buf);
-
/*
* After caching firmware image is started, let it piggyback
* on request firmware.
@@ -828,6 +934,9 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
fw_priv->buf = NULL;
mutex_unlock(&fw_lock);
+ if (direct_load)
+ goto err_put_dev;
+
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
device_remove_bin_file(f_dev, &firmware_attr_data);
@@ -1055,6 +1164,8 @@ int uncache_firmware(const char *fw_name)
}
#ifdef CONFIG_PM_SLEEP
+static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
+
static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
{
struct fw_cache_entry *fce;
@@ -1068,17 +1179,27 @@ exit:
return fce;
}
-static int fw_cache_piggyback_on_request(const char *name)
+static int __fw_entry_found(const char *name)
{
struct firmware_cache *fwc = &fw_cache;
struct fw_cache_entry *fce;
- int ret = 0;
- spin_lock(&fwc->name_lock);
list_for_each_entry(fce, &fwc->fw_names, list) {
if (!strcmp(fce->name, name))
- goto found;
+ return 1;
}
+ return 0;
+}
+
+static int fw_cache_piggyback_on_request(const char *name)
+{
+ struct firmware_cache *fwc = &fw_cache;
+ struct fw_cache_entry *fce;
+ int ret = 0;
+
+ spin_lock(&fwc->name_lock);
+ if (__fw_entry_found(name))
+ goto found;
fce = alloc_fw_cache_entry(name);
if (fce) {
@@ -1111,12 +1232,6 @@ static void __async_dev_cache_fw_image(void *fw_entry,
free_fw_cache_entry(fce);
}
-
- spin_lock(&fwc->name_lock);
- fwc->cnt--;
- spin_unlock(&fwc->name_lock);
-
- wake_up(&fwc->wait_queue);
}
/* called with dev->devres_lock held */
@@ -1155,11 +1270,19 @@ static void dev_cache_fw_image(struct device *dev, void *data)
list_del(&fce->list);
spin_lock(&fwc->name_lock);
- fwc->cnt++;
- list_add(&fce->list, &fwc->fw_names);
+ /* only one cache entry for one firmware */
+ if (!__fw_entry_found(fce->name)) {
+ list_add(&fce->list, &fwc->fw_names);
+ } else {
+ free_fw_cache_entry(fce);
+ fce = NULL;
+ }
spin_unlock(&fwc->name_lock);
- async_schedule(__async_dev_cache_fw_image, (void *)fce);
+ if (fce)
+ async_schedule_domain(__async_dev_cache_fw_image,
+ (void *)fce,
+ &fw_cache_domain);
}
}
@@ -1201,6 +1324,9 @@ static void device_cache_fw_images(void)
pr_debug("%s\n", __func__);
+ /* cancel uncache work */
+ cancel_delayed_work_sync(&fwc->work);
+
/*
* use small loading timeout for caching devices' firmware
* because all these firmware images have been loaded
@@ -1218,21 +1344,7 @@ static void device_cache_fw_images(void)
mutex_unlock(&fw_lock);
/* wait for completion of caching firmware for all devices */
- spin_lock(&fwc->name_lock);
- for (;;) {
- prepare_to_wait(&fwc->wait_queue, &wait,
- TASK_UNINTERRUPTIBLE);
- if (!fwc->cnt)
- break;
-
- spin_unlock(&fwc->name_lock);
-
- schedule();
-
- spin_lock(&fwc->name_lock);
- }
- spin_unlock(&fwc->name_lock);
- finish_wait(&fwc->wait_queue, &wait);
+ async_synchronize_full_domain(&fw_cache_domain);
loading_timeout = old_timeout;
}
@@ -1320,9 +1432,7 @@ static void __init fw_cache_init(void)
#ifdef CONFIG_PM_SLEEP
spin_lock_init(&fw_cache.name_lock);
INIT_LIST_HEAD(&fw_cache.fw_names);
- fw_cache.cnt = 0;
- init_waitqueue_head(&fw_cache.wait_queue);
INIT_DELAYED_WORK(&fw_cache.work,
device_uncache_fw_images_work);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 7dda4f790f0..86c88216a50 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -248,26 +248,23 @@ static bool pages_correctly_reserved(unsigned long start_pfn,
static int
memory_block_action(unsigned long phys_index, unsigned long action)
{
- unsigned long start_pfn, start_paddr;
+ unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct page *first_page;
int ret;
first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
+ start_pfn = page_to_pfn(first_page);
switch (action) {
case MEM_ONLINE:
- start_pfn = page_to_pfn(first_page);
-
if (!pages_correctly_reserved(start_pfn, nr_pages))
return -EBUSY;
ret = online_pages(start_pfn, nr_pages);
break;
case MEM_OFFLINE:
- start_paddr = page_to_pfn(first_page) << PAGE_SHIFT;
- ret = remove_memory(start_paddr,
- nr_pages << PAGE_SHIFT);
+ ret = offline_pages(start_pfn, nr_pages);
break;
default:
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
@@ -278,13 +275,11 @@ memory_block_action(unsigned long phys_index, unsigned long action)
return ret;
}
-static int memory_block_change_state(struct memory_block *mem,
+static int __memory_block_change_state(struct memory_block *mem,
unsigned long to_state, unsigned long from_state_req)
{
int ret = 0;
- mutex_lock(&mem->state_mutex);
-
if (mem->state != from_state_req) {
ret = -EINVAL;
goto out;
@@ -312,10 +307,20 @@ static int memory_block_change_state(struct memory_block *mem,
break;
}
out:
- mutex_unlock(&mem->state_mutex);
return ret;
}
+static int memory_block_change_state(struct memory_block *mem,
+ unsigned long to_state, unsigned long from_state_req)
+{
+ int ret;
+
+ mutex_lock(&mem->state_mutex);
+ ret = __memory_block_change_state(mem, to_state, from_state_req);
+ mutex_unlock(&mem->state_mutex);
+
+ return ret;
+}
static ssize_t
store_mem_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
@@ -656,6 +661,21 @@ int unregister_memory_section(struct mem_section *section)
}
/*
+ * offline one memory block. If the memory block has been offlined, do nothing.
+ */
+int offline_memory_block(struct memory_block *mem)
+{
+ int ret = 0;
+
+ mutex_lock(&mem->state_mutex);
+ if (mem->state != MEM_OFFLINE)
+ ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+ mutex_unlock(&mem->state_mutex);
+
+ return ret;
+}
+
+/*
* Initialize the sysfs support for memory devices...
*/
int __init memory_dev_init(void)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ddeca142293..72c776f2a1f 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -23,6 +23,7 @@
#include <linux/idr.h>
#include "base.h"
+#include "power/power.h"
/* For automatically allocated device IDs */
static DEFINE_IDA(platform_devid_ida);
@@ -82,9 +83,16 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
*/
int platform_get_irq(struct platform_device *dev, unsigned int num)
{
+#ifdef CONFIG_SPARC
+ /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
+ if (!dev || num >= dev->archdata.num_irqs)
+ return -ENXIO;
+ return dev->archdata.irqs[num];
+#else
struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO;
+#endif
}
EXPORT_SYMBOL_GPL(platform_get_irq);
@@ -983,6 +991,7 @@ void __init early_platform_add_devices(struct platform_device **devs, int num)
dev = &devs[i]->dev;
if (!dev->devres_head.next) {
+ pm_runtime_early_init(dev);
INIT_LIST_HEAD(&dev->devres_head);
list_add_tail(&dev->devres_head,
&early_platform_device_list);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ba3487c9835..96b71b6536d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -53,6 +53,24 @@
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
+static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
+{
+ struct generic_pm_domain *genpd = NULL, *gpd;
+
+ if (IS_ERR_OR_NULL(domain_name))
+ return NULL;
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (!strcmp(gpd->name, domain_name)) {
+ genpd = gpd;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+ return genpd;
+}
+
#ifdef CONFIG_PM
struct generic_pm_domain *dev_to_genpd(struct device *dev)
@@ -256,10 +274,28 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
return ret;
}
+/**
+ * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
+ * @domain_name: Name of the PM domain to power up.
+ */
+int pm_genpd_name_poweron(const char *domain_name)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = pm_genpd_lookup_name(domain_name);
+ return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
+}
+
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
+static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, start, dev);
+}
+
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{
return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
@@ -436,7 +472,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
not_suspended = 0;
list_for_each_entry(pdd, &genpd->dev_list, list_node)
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
- || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
+ || pdd->dev->power.irq_safe))
not_suspended++;
if (not_suspended > genpd->in_progress)
@@ -578,9 +614,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
might_sleep_if(!genpd->dev_irq_safe);
- if (dev_gpd_data(dev)->always_on)
- return -EBUSY;
-
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
if (stop_ok && !stop_ok(dev))
return -EBUSY;
@@ -629,7 +662,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
/* If power.irq_safe, the PM domain is never powered off. */
if (dev->power.irq_safe)
- return genpd_start_dev(genpd, dev);
+ return genpd_start_dev_no_timing(genpd, dev);
mutex_lock(&genpd->lock);
ret = __pm_genpd_poweron(genpd);
@@ -697,6 +730,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
#ifdef CONFIG_PM_SLEEP
+/**
+ * pm_genpd_present - Check if the given PM domain has been initialized.
+ * @genpd: PM domain to check.
+ */
+static bool pm_genpd_present(struct generic_pm_domain *genpd)
+{
+ struct generic_pm_domain *gpd;
+
+ if (IS_ERR_OR_NULL(genpd))
+ return false;
+
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node)
+ if (gpd == genpd)
+ return true;
+
+ return false;
+}
+
static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
struct device *dev)
{
@@ -750,9 +801,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
* Check if the given PM domain can be powered off (during system suspend or
* hibernation) and do that if so. Also, in that case propagate to its masters.
*
- * This function is only called in "noirq" stages of system power transitions,
- * so it need not acquire locks (all of the "noirq" callbacks are executed
- * sequentially, so it is guaranteed that it will never run twice in parallel).
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions, so it need not acquire locks (all of the "noirq" callbacks are
+ * executed sequentially, so it is guaranteed that it will never run twice in
+ * parallel).
*/
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
{
@@ -777,6 +829,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
}
/**
+ * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
+ * @genpd: PM domain to power on.
+ *
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions, so it need not acquire locks (all of the "noirq" callbacks are
+ * executed sequentially, so it is guaranteed that it will never run twice in
+ * parallel).
+ */
+static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
+{
+ struct gpd_link *link;
+
+ if (genpd->status != GPD_STATE_POWER_OFF)
+ return;
+
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ pm_genpd_sync_poweron(link->master);
+ genpd_sd_counter_inc(link->master);
+ }
+
+ if (genpd->power_on)
+ genpd->power_on(genpd);
+
+ genpd->status = GPD_STATE_ACTIVE;
+}
+
+/**
* resume_needed - Check whether to resume a device before system suspend.
* @dev: Device to check.
* @genpd: PM domain the device belongs to.
@@ -937,7 +1016,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+ if (genpd->suspend_power_off
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0;
@@ -970,7 +1049,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+ if (genpd->suspend_power_off
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0;
@@ -979,7 +1058,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*/
- pm_genpd_poweron(genpd);
+ pm_genpd_sync_poweron(genpd);
genpd->suspended_count--;
return genpd_start_dev(genpd, dev);
@@ -1090,8 +1169,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
- 0 : genpd_stop_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
}
/**
@@ -1111,8 +1189,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
- 0 : genpd_start_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
}
/**
@@ -1186,8 +1263,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
if (genpd->suspended_count++ == 0) {
/*
* The boot kernel might put the domain into arbitrary state,
- * so make it appear as powered off to pm_genpd_poweron(), so
- * that it tries to power it on in case it was really off.
+ * so make it appear as powered off to pm_genpd_sync_poweron(),
+ * so that it tries to power it on in case it was really off.
*/
genpd->status = GPD_STATE_POWER_OFF;
if (genpd->suspend_power_off) {
@@ -1205,9 +1282,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- pm_genpd_poweron(genpd);
+ pm_genpd_sync_poweron(genpd);
- return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
+ return genpd_start_dev(genpd, dev);
}
/**
@@ -1246,6 +1323,31 @@ static void pm_genpd_complete(struct device *dev)
}
}
+/**
+ * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
+ * @dev: Device that normally is marked as "always on" to switch power for.
+ *
+ * This routine may only be called during the system core (syscore) suspend or
+ * resume phase for devices whose "always on" flags are set.
+ */
+void pm_genpd_syscore_switch(struct device *dev, bool suspend)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = dev_to_genpd(dev);
+ if (!pm_genpd_present(genpd))
+ return;
+
+ if (suspend) {
+ genpd->suspended_count++;
+ pm_genpd_sync_poweroff(genpd);
+ } else {
+ pm_genpd_sync_poweron(genpd);
+ genpd->suspended_count--;
+ }
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
+
#else
#define pm_genpd_prepare NULL
@@ -1393,6 +1495,19 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
return __pm_genpd_add_device(genpd, dev, td);
}
+
+/**
+ * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
+ * @domain_name: Name of the PM domain to add the device to.
+ * @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
+ */
+int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
+ struct gpd_timing_data *td)
+{
+ return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
+}
+
/**
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
* @genpd: PM domain to remove the device from.
@@ -1455,26 +1570,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
}
/**
- * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
- * @dev: Device to set/unset the flag for.
- * @val: The new value of the device's "always on" flag.
- */
-void pm_genpd_dev_always_on(struct device *dev, bool val)
-{
- struct pm_subsys_data *psd;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- psd = dev_to_psd(dev);
- if (psd && psd->domain_data)
- to_gpd_data(psd->domain_data)->always_on = val;
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
-
-/**
* pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
* @dev: Device to set/unset the flag for.
* @val: The new value of the device's "need restore" flag.
@@ -1505,7 +1600,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct gpd_link *link;
int ret = 0;
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
+ || genpd == subdomain)
return -EINVAL;
start:
@@ -1552,6 +1648,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
}
/**
+ * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
+ * @master_name: Name of the master PM domain to add the subdomain to.
+ * @subdomain_name: Name of the subdomain to be added.
+ */
+int pm_genpd_add_subdomain_names(const char *master_name,
+ const char *subdomain_name)
+{
+ struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
+
+ if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
+ return -EINVAL;
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (!master && !strcmp(gpd->name, master_name))
+ master = gpd;
+
+ if (!subdomain && !strcmp(gpd->name, subdomain_name))
+ subdomain = gpd;
+
+ if (master && subdomain)
+ break;
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return pm_genpd_add_subdomain(master, subdomain);
+}
+
+/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
* @genpd: Master PM domain to remove the subdomain from.
* @subdomain: Subdomain to be removed.
@@ -1704,7 +1829,16 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
}
EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
-int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+/**
+ * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
+ * @genpd: PM domain to be connected with cpuidle.
+ * @state: cpuidle state this domain can disable/enable.
+ *
+ * Make a PM domain behave as though it contained a CPU core, that is, instead
+ * of calling its power down routine it will enable the given cpuidle state so
+ * that the cpuidle subsystem can power it down (if possible and desirable).
+ */
+int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
{
struct cpuidle_driver *cpuidle_drv;
struct gpd_cpu_data *cpu_data;
@@ -1728,7 +1862,7 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
cpuidle_drv = cpuidle_driver_ref();
if (!cpuidle_drv) {
ret = -ENODEV;
- goto out;
+ goto err_drv;
}
if (cpuidle_drv->state_count <= state) {
ret = -EINVAL;
@@ -1750,10 +1884,30 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
err:
cpuidle_driver_unref();
+
+ err_drv:
+ kfree(cpu_data);
goto out;
}
-int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+/**
+ * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
+ * @name: Name of the domain to connect to cpuidle.
+ * @state: cpuidle state this domain can manipulate.
+ */
+int pm_genpd_name_attach_cpuidle(const char *name, int state)
+{
+ return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
+}
+
+/**
+ * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
+ * @genpd: PM domain to remove the cpuidle connection from.
+ *
+ * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
+ * given PM domain.
+ */
+int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
{
struct gpd_cpu_data *cpu_data;
struct cpuidle_state *idle_state;
@@ -1784,6 +1938,15 @@ int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
return ret;
}
+/**
+ * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
+ * @name: Name of the domain to disconnect cpuidle from.
+ */
+int pm_genpd_name_detach_cpuidle(const char *name)
+{
+ return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
+}
+
/* Default device callbacks for generic PM domains. */
/**
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index b0b072a88f5..a3c1404c793 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,20 +57,17 @@ static pm_message_t pm_transition;
static int async_error;
/**
- * device_pm_init - Initialize the PM-related part of a device object.
+ * device_pm_sleep_init - Initialize system suspend-related device fields.
* @dev: Device object being initialized.
*/
-void device_pm_init(struct device *dev)
+void device_pm_sleep_init(struct device *dev)
{
dev->power.is_prepared = false;
dev->power.is_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
- spin_lock_init(&dev->power.lock);
- pm_runtime_init(dev);
INIT_LIST_HEAD(&dev->power.entry);
- dev->power.power_state = PMSG_INVALID;
}
/**
@@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
+ if (dev->power.syscore)
+ goto Out;
+
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
error = dpm_run_callback(callback, dev, state, info);
+ Out:
TRACE_RESUME(error);
return error;
}
@@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
+ if (dev->power.syscore)
+ goto Out;
+
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state)
error = dpm_run_callback(callback, dev, state, info);
+ Out:
TRACE_RESUME(error);
return error;
}
@@ -565,11 +570,13 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
- bool put = false;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
+ if (dev->power.syscore)
+ goto Complete;
+
dpm_wait(dev->parent, async);
device_lock(dev);
@@ -583,7 +590,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Unlock;
pm_runtime_enable(dev);
- put = true;
if (dev->pm_domain) {
info = "power domain ";
@@ -632,13 +638,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
Unlock:
device_unlock(dev);
+
+ Complete:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
- if (put)
- pm_runtime_put_sync(dev);
-
return error;
}
@@ -722,6 +727,9 @@ static void device_complete(struct device *dev, pm_message_t state)
void (*callback)(struct device *) = NULL;
char *info = NULL;
+ if (dev->power.syscore)
+ return;
+
device_lock(dev);
if (dev->pm_domain) {
@@ -749,6 +757,8 @@ static void device_complete(struct device *dev, pm_message_t state)
}
device_unlock(dev);
+
+ pm_runtime_put_sync(dev);
}
/**
@@ -834,6 +844,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
pm_callback_t callback = NULL;
char *info = NULL;
+ if (dev->power.syscore)
+ return 0;
+
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -917,6 +930,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
pm_callback_t callback = NULL;
char *info = NULL;
+ if (dev->power.syscore)
+ return 0;
+
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -996,7 +1012,7 @@ int dpm_suspend_end(pm_message_t state)
error = dpm_suspend_noirq(state);
if (error) {
- dpm_resume_early(state);
+ dpm_resume_early(resume_event(state));
return error;
}
@@ -1043,16 +1059,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (async_error)
goto Complete;
- pm_runtime_get_noresume(dev);
+ /*
+ * If a device configured to wake up the system from sleep states
+ * has been suspended at run time and there's a resume request pending
+ * for it, this is equivalent to the device signaling wakeup, so the
+ * system suspend operation should be aborted.
+ */
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) {
- pm_runtime_put_sync(dev);
async_error = -EBUSY;
goto Complete;
}
+ if (dev->power.syscore)
+ goto Complete;
+
device_lock(dev);
if (dev->pm_domain) {
@@ -1111,12 +1134,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
Complete:
complete_all(&dev->power.completion);
- if (error) {
- pm_runtime_put_sync(dev);
+ if (error)
async_error = error;
- } else if (dev->power.is_suspended) {
+ else if (dev->power.is_suspended)
__pm_runtime_disable(dev, false);
- }
return error;
}
@@ -1209,6 +1230,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
char *info = NULL;
int error = 0;
+ if (dev->power.syscore)
+ return 0;
+
+ /*
+ * If a device's parent goes into runtime suspend at the wrong time,
+ * it won't be possible to resume the device. To prevent this we
+ * block runtime suspend here, during the prepare phase, and allow
+ * it again during the complete phase.
+ */
+ pm_runtime_get_noresume(dev);
+
device_lock(dev);
dev->power.wakeup_path = device_may_wakeup(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ac993eafec8..d9468642fc4 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -22,6 +22,7 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/opp.h>
+#include <linux/of.h>
/*
* Internal data structure organization with the OPP layer library is as
@@ -674,3 +675,49 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
return &dev_opp->head;
}
+
+#ifdef CONFIG_OF
+/**
+ * of_init_opp_table() - Initialize opp table from device tree
+ * @dev: device pointer used to lookup device OPPs.
+ *
+ * Register the initial OPP table with the OPP library for given device.
+ */
+int of_init_opp_table(struct device *dev)
+{
+ const struct property *prop;
+ const __be32 *val;
+ int nr;
+
+ prop = of_find_property(dev->of_node, "operating-points", NULL);
+ if (!prop)
+ return -ENODEV;
+ if (!prop->value)
+ return -ENODATA;
+
+ /*
+ * Each OPP is a set of tuples consisting of frequency and
+ * voltage like <freq-kHz vol-uV>.
+ */
+ nr = prop->length / sizeof(u32);
+ if (nr % 2) {
+ dev_err(dev, "%s: Invalid OPP list\n", __func__);
+ return -EINVAL;
+ }
+
+ val = prop->value;
+ while (nr) {
+ unsigned long freq = be32_to_cpup(val++) * 1000;
+ unsigned long volt = be32_to_cpup(val++);
+
+ if (opp_add(dev, freq, volt)) {
+ dev_warn(dev, "%s: Failed to add OPP %ld\n",
+ __func__, freq);
+ continue;
+ }
+ nr -= 2;
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index eeb4bff9505..0dbfdf4419a 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,12 +1,32 @@
#include <linux/pm_qos.h>
+static inline void device_pm_init_common(struct device *dev)
+{
+ if (!dev->power.early_init) {
+ spin_lock_init(&dev->power.lock);
+ dev->power.power_state = PMSG_INVALID;
+ dev->power.early_init = true;
+ }
+}
+
#ifdef CONFIG_PM_RUNTIME
+static inline void pm_runtime_early_init(struct device *dev)
+{
+ dev->power.disable_depth = 1;
+ device_pm_init_common(dev);
+}
+
extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
#else /* !CONFIG_PM_RUNTIME */
+static inline void pm_runtime_early_init(struct device *dev)
+{
+ device_pm_init_common(dev);
+}
+
static inline void pm_runtime_init(struct device *dev) {}
static inline void pm_runtime_remove(struct device *dev) {}
@@ -25,7 +45,7 @@ static inline struct device *to_device(struct list_head *entry)
return container_of(entry, struct device, power.entry);
}
-extern void device_pm_init(struct device *dev);
+extern void device_pm_sleep_init(struct device *dev);
extern void device_pm_add(struct device *);
extern void device_pm_remove(struct device *);
extern void device_pm_move_before(struct device *, struct device *);
@@ -34,12 +54,7 @@ extern void device_pm_move_last(struct device *);
#else /* !CONFIG_PM_SLEEP */
-static inline void device_pm_init(struct device *dev)
-{
- spin_lock_init(&dev->power.lock);
- dev->power.power_state = PMSG_INVALID;
- pm_runtime_init(dev);
-}
+static inline void device_pm_sleep_init(struct device *dev) {}
static inline void device_pm_add(struct device *dev)
{
@@ -60,6 +75,13 @@ static inline void device_pm_move_last(struct device *dev) {}
#endif /* !CONFIG_PM_SLEEP */
+static inline void device_pm_init(struct device *dev)
+{
+ device_pm_init_common(dev);
+ device_pm_sleep_init(dev);
+ pm_runtime_init(dev);
+}
+
#ifdef CONFIG_PM
/*
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 7d9c1cb1c39..3148b10dc2e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -509,6 +509,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
repeat:
if (dev->power.runtime_error)
retval = -EINVAL;
+ else if (dev->power.disable_depth == 1 && dev->power.is_suspended
+ && dev->power.runtime_status == RPM_ACTIVE)
+ retval = 1;
else if (dev->power.disable_depth > 0)
retval = -EACCES;
if (retval)
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index cbb463b3a75..e6ee5e80e54 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(wakeup_source_destroy);
*/
void wakeup_source_add(struct wakeup_source *ws)
{
+ unsigned long flags;
+
if (WARN_ON(!ws))
return;
@@ -135,9 +137,9 @@ void wakeup_source_add(struct wakeup_source *ws)
ws->active = false;
ws->last_time = ktime_get();
- spin_lock_irq(&events_lock);
+ spin_lock_irqsave(&events_lock, flags);
list_add_rcu(&ws->entry, &wakeup_sources);
- spin_unlock_irq(&events_lock);
+ spin_unlock_irqrestore(&events_lock, flags);
}
EXPORT_SYMBOL_GPL(wakeup_source_add);
@@ -147,12 +149,14 @@ EXPORT_SYMBOL_GPL(wakeup_source_add);
*/
void wakeup_source_remove(struct wakeup_source *ws)
{
+ unsigned long flags;
+
if (WARN_ON(!ws))
return;
- spin_lock_irq(&events_lock);
+ spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
- spin_unlock_irq(&events_lock);
+ spin_unlock_irqrestore(&events_lock, flags);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -649,6 +653,31 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
}
EXPORT_SYMBOL_GPL(pm_wakeup_event);
+static void print_active_wakeup_sources(void)
+{
+ struct wakeup_source *ws;
+ int active = 0;
+ struct wakeup_source *last_activity_ws = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ if (ws->active) {
+ pr_info("active wakeup source: %s\n", ws->name);
+ active = 1;
+ } else if (!active &&
+ (!last_activity_ws ||
+ ktime_to_ns(ws->last_time) >
+ ktime_to_ns(last_activity_ws->last_time))) {
+ last_activity_ws = ws;
+ }
+ }
+
+ if (!active && last_activity_ws)
+ pr_info("last active wakeup source: %s\n",
+ last_activity_ws->name);
+ rcu_read_unlock();
+}
+
/**
* pm_wakeup_pending - Check if power transition in progress should be aborted.
*
@@ -671,6 +700,10 @@ bool pm_wakeup_pending(void)
events_check_enabled = !ret;
}
spin_unlock_irqrestore(&events_lock, flags);
+
+ if (ret)
+ print_active_wakeup_sources();
+
return ret;
}
@@ -723,15 +756,16 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
bool pm_save_wakeup_count(unsigned int count)
{
unsigned int cnt, inpr;
+ unsigned long flags;
events_check_enabled = false;
- spin_lock_irq(&events_lock);
+ spin_lock_irqsave(&events_lock, flags);
split_counters(&cnt, &inpr);
if (cnt == count && inpr == 0) {
saved_count = count;
events_check_enabled = true;
}
- spin_unlock_irq(&events_lock);
+ spin_unlock_irqrestore(&events_lock, flags);
return events_check_enabled;
}
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 6be390bd8bd..f0d30543fcc 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,7 +3,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ)
select LZO_COMPRESS
select LZO_DECOMPRESS
select IRQ_DOMAIN if REGMAP_IRQ