summaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/core.c105
-rw-r--r--drivers/base/devres.c42
-rw-r--r--drivers/base/devtmpfs.c4
-rw-r--r--drivers/base/dma-buf.c3
-rw-r--r--drivers/base/dma-coherent.c5
-rw-r--r--drivers/base/dma-contiguous.c23
-rw-r--r--drivers/base/firmware_class.c946
-rw-r--r--drivers/base/memory.c40
-rw-r--r--drivers/base/platform.c43
-rw-r--r--drivers/base/power/domain.c249
-rw-r--r--drivers/base/power/main.c88
-rw-r--r--drivers/base/power/opp.c47
-rw-r--r--drivers/base/power/power.h36
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--drivers/base/power/wakeup.c46
-rw-r--r--drivers/base/regmap/Kconfig2
17 files changed, 1434 insertions, 250 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 08b4c520938..b34b5cda5ae 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -236,7 +236,7 @@ config CMA_SIZE_PERCENTAGE
choice
prompt "Selected region size"
- default CMA_SIZE_SEL_ABSOLUTE
+ default CMA_SIZE_SEL_MBYTES
config CMA_SIZE_SEL_MBYTES
bool "Use mega bytes value only"
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5e6e00bc165..abea76c36a4 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -184,6 +184,17 @@ static void device_release(struct kobject *kobj)
struct device *dev = kobj_to_dev(kobj);
struct device_private *p = dev->p;
+ /*
+ * Some platform devices are driven without driver attached
+ * and managed resources may have been acquired. Make sure
+ * all resources are released.
+ *
+ * Drivers still can add resources into device after device
+ * is deleted but alive, so release devres here to avoid
+ * possible memory leak.
+ */
+ devres_release_all(dev);
+
if (dev->release)
dev->release(dev);
else if (dev->type && dev->type->release)
@@ -1196,13 +1207,6 @@ void device_del(struct device *dev)
bus_remove_device(dev);
driver_deferred_probe_del(dev);
- /*
- * Some platform devices are driven without driver attached
- * and managed resources may have been acquired. Make sure
- * all resources are released.
- */
- devres_release_all(dev);
-
/* Notify the platform of the removal, in case they
* need to do anything...
*/
@@ -1861,26 +1865,20 @@ void device_shutdown(void)
*/
#ifdef CONFIG_PRINTK
-int __dev_printk(const char *level, const struct device *dev,
- struct va_format *vaf)
+static int
+create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
{
- char dict[128];
- const char *level_extra = "";
- size_t dictlen = 0;
const char *subsys;
-
- if (!dev)
- return printk("%s(NULL device *): %pV", level, vaf);
+ size_t pos = 0;
if (dev->class)
subsys = dev->class->name;
else if (dev->bus)
subsys = dev->bus->name;
else
- goto skip;
+ return 0;
- dictlen += snprintf(dict + dictlen, sizeof(dict) - dictlen,
- "SUBSYSTEM=%s", subsys);
+ pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
/*
* Add device identifier DEVICE=:
@@ -1896,32 +1894,63 @@ int __dev_printk(const char *level, const struct device *dev,
c = 'b';
else
c = 'c';
- dictlen++;
- dictlen += snprintf(dict + dictlen, sizeof(dict) - dictlen,
- "DEVICE=%c%u:%u",
- c, MAJOR(dev->devt), MINOR(dev->devt));
+ pos++;
+ pos += snprintf(hdr + pos, hdrlen - pos,
+ "DEVICE=%c%u:%u",
+ c, MAJOR(dev->devt), MINOR(dev->devt));
} else if (strcmp(subsys, "net") == 0) {
struct net_device *net = to_net_dev(dev);
- dictlen++;
- dictlen += snprintf(dict + dictlen, sizeof(dict) - dictlen,
- "DEVICE=n%u", net->ifindex);
+ pos++;
+ pos += snprintf(hdr + pos, hdrlen - pos,
+ "DEVICE=n%u", net->ifindex);
} else {
- dictlen++;
- dictlen += snprintf(dict + dictlen, sizeof(dict) - dictlen,
- "DEVICE=+%s:%s", subsys, dev_name(dev));
+ pos++;
+ pos += snprintf(hdr + pos, hdrlen - pos,
+ "DEVICE=+%s:%s", subsys, dev_name(dev));
}
-skip:
- if (level[2])
- level_extra = &level[2]; /* skip past KERN_SOH "L" */
- return printk_emit(0, level[1] - '0',
- dictlen ? dict : NULL, dictlen,
- "%s %s: %s%pV",
- dev_driver_string(dev), dev_name(dev),
- level_extra, vaf);
+ return pos;
+}
+EXPORT_SYMBOL(create_syslog_header);
+
+int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args)
+{
+ char hdr[128];
+ size_t hdrlen;
+
+ hdrlen = create_syslog_header(dev, hdr, sizeof(hdr));
+
+ return vprintk_emit(0, level, hdrlen ? hdr : NULL, hdrlen, fmt, args);
+}
+EXPORT_SYMBOL(dev_vprintk_emit);
+
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ va_start(args, fmt);
+
+ r = dev_vprintk_emit(level, dev, fmt, args);
+
+ va_end(args);
+
+ return r;
+}
+EXPORT_SYMBOL(dev_printk_emit);
+
+static int __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+{
+ if (!dev)
+ return printk("%s(NULL device *): %pV", level, vaf);
+
+ return dev_printk_emit(level[1] - '0', dev,
+ "%s %s: %pV",
+ dev_driver_string(dev), dev_name(dev), vaf);
}
-EXPORT_SYMBOL(__dev_printk);
int dev_printk(const char *level, const struct device *dev,
const char *fmt, ...)
@@ -1936,6 +1965,7 @@ int dev_printk(const char *level, const struct device *dev,
vaf.va = &args;
r = __dev_printk(level, dev, &vaf);
+
va_end(args);
return r;
@@ -1955,6 +1985,7 @@ int func(const struct device *dev, const char *fmt, ...) \
vaf.va = &args; \
\
r = __dev_printk(kern_level, dev, &vaf); \
+ \
va_end(args); \
\
return r; \
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 2360adb7a58..8731979d668 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -144,6 +144,48 @@ EXPORT_SYMBOL_GPL(devres_alloc);
#endif
/**
+ * devres_for_each_res - Resource iterator
+ * @dev: Device to iterate resource from
+ * @release: Look for resources associated with this release function
+ * @match: Match function (optional)
+ * @match_data: Data for the match function
+ * @fn: Function to be called for each matched resource.
+ * @data: Data for @fn, the 3rd parameter of @fn
+ *
+ * Call @fn for each devres of @dev which is associated with @release
+ * and for which @match returns 1.
+ *
+ * RETURNS:
+ * void
+ */
+void devres_for_each_res(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data,
+ void (*fn)(struct device *, void *, void *),
+ void *data)
+{
+ struct devres_node *node;
+ struct devres_node *tmp;
+ unsigned long flags;
+
+ if (!fn)
+ return;
+
+ spin_lock_irqsave(&dev->devres_lock, flags);
+ list_for_each_entry_safe_reverse(node, tmp,
+ &dev->devres_head, entry) {
+ struct devres *dr = container_of(node, struct devres, node);
+
+ if (node->release != release)
+ continue;
+ if (match && !match(dev, dr->data, match_data))
+ continue;
+ fn(dev, dr->data, data);
+ }
+ spin_unlock_irqrestore(&dev->devres_lock, flags);
+}
+EXPORT_SYMBOL_GPL(devres_for_each_res);
+
+/**
* devres_free - Free device resource data
* @res: Pointer to devres data to free
*
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index deb4a456cf8..147d1a4dd26 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -309,8 +309,8 @@ static int handle_remove(const char *nodename, struct device *dev)
* before unlinking this node, reset permissions
* of possible references like hardlinks
*/
- newattrs.ia_uid = 0;
- newattrs.ia_gid = 0;
+ newattrs.ia_uid = GLOBAL_ROOT_UID;
+ newattrs.ia_gid = GLOBAL_ROOT_GID;
newattrs.ia_mode = stat.mode & ~0777;
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index c30f3e1d0ef..460e22dee36 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -460,8 +460,7 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
if (vma->vm_file)
fput(vma->vm_file);
- vma->vm_file = dmabuf->file;
- get_file(vma->vm_file);
+ vma->vm_file = get_file(dmabuf->file);
vma->vm_pgoff = pgoff;
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 560a7173f81..bc256b64102 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -191,9 +191,8 @@ EXPORT_SYMBOL(dma_release_from_coherent);
* This checks whether the memory was allocated from the per-device
* coherent memory pool and if so, maps that memory to the provided vma.
*
- * Returns 1 if we correctly mapped the memory, or 0 if
- * dma_release_coherent() should proceed with mapping memory from
- * generic pools.
+ * Returns 1 if we correctly mapped the memory, or 0 if the caller should
+ * proceed with mapping memory from generic pools.
*/
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
void *vaddr, size_t size, int *ret)
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 34d94c762a1..612afcc5a93 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -27,15 +27,12 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/page-isolation.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/mm_types.h>
#include <linux/dma-contiguous.h>
-#ifndef SZ_1M
-#define SZ_1M (1 << 20)
-#endif
-
struct cma {
unsigned long base_pfn;
unsigned long count;
@@ -315,6 +312,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
{
unsigned long mask, pfn, pageno, start = 0;
struct cma *cma = dev_get_cma_area(dev);
+ struct page *page = NULL;
int ret;
if (!cma || !cma->count)
@@ -336,18 +334,17 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
for (;;) {
pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
start, count, mask);
- if (pageno >= cma->count) {
- ret = -ENOMEM;
- goto error;
- }
+ if (pageno >= cma->count)
+ break;
pfn = cma->base_pfn + pageno;
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
if (ret == 0) {
bitmap_set(cma->bitmap, pageno, count);
+ page = pfn_to_page(pfn);
break;
} else if (ret != -EBUSY) {
- goto error;
+ break;
}
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
@@ -356,12 +353,8 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
}
mutex_unlock(&cma_mutex);
-
- pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
- return pfn_to_page(pfn);
-error:
- mutex_unlock(&cma_mutex);
- return NULL;
+ pr_debug("%s(): returned %p\n", __func__, page);
+ return page;
}
/**
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 803cfc1597a..8945f4e489e 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -21,6 +21,16 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/list.h>
+#include <linux/async.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/syscore_ops.h>
+
+#include <generated/utsrelease.h>
+
+#include "base.h"
MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Multi purpose firmware loading support");
@@ -78,6 +88,11 @@ enum {
FW_STATUS_ABORT,
};
+enum fw_buf_fmt {
+ VMALLOC_BUF, /* used in direct loading */
+ PAGE_BUF, /* used in loading via userspace */
+};
+
static int loading_timeout = 60; /* In seconds */
static inline long firmware_loading_timeout(void)
@@ -85,23 +100,235 @@ static inline long firmware_loading_timeout(void)
return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
}
-/* fw_lock could be moved to 'struct firmware_priv' but since it is just
- * guarding for corner cases a global lock should be OK */
-static DEFINE_MUTEX(fw_lock);
+struct firmware_cache {
+ /* firmware_buf instance will be added into the below list */
+ spinlock_t lock;
+ struct list_head head;
+ int state;
+
+#ifdef CONFIG_PM_SLEEP
+ /*
+ * Names of firmware images which have been cached successfully
+ * will be added into the below list so that device uncache
+ * helper can trace which firmware images have been cached
+ * before.
+ */
+ spinlock_t name_lock;
+ struct list_head fw_names;
+
+ struct delayed_work work;
+
+ struct notifier_block pm_notify;
+#endif
+};
-struct firmware_priv {
+struct firmware_buf {
+ struct kref ref;
+ struct list_head list;
struct completion completion;
- struct firmware *fw;
+ struct firmware_cache *fwc;
unsigned long status;
+ enum fw_buf_fmt fmt;
+ void *data;
+ size_t size;
struct page **pages;
int nr_pages;
int page_array_size;
+ char fw_id[];
+};
+
+struct fw_cache_entry {
+ struct list_head list;
+ char name[];
+};
+
+struct firmware_priv {
struct timer_list timeout;
- struct device dev;
bool nowait;
- char fw_id[];
+ struct device dev;
+ struct firmware_buf *buf;
+ struct firmware *fw;
};
+struct fw_name_devm {
+ unsigned long magic;
+ char name[];
+};
+
+#define to_fwbuf(d) container_of(d, struct firmware_buf, ref)
+
+#define FW_LOADER_NO_CACHE 0
+#define FW_LOADER_START_CACHE 1
+
+static int fw_cache_piggyback_on_request(const char *name);
+
+/* fw_lock could be moved to 'struct firmware_priv' but since it is just
+ * guarding for corner cases a global lock should be OK */
+static DEFINE_MUTEX(fw_lock);
+
+static struct firmware_cache fw_cache;
+
+static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
+ struct firmware_cache *fwc)
+{
+ struct firmware_buf *buf;
+
+ buf = kzalloc(sizeof(*buf) + strlen(fw_name) + 1 , GFP_ATOMIC);
+
+ if (!buf)
+ return buf;
+
+ kref_init(&buf->ref);
+ strcpy(buf->fw_id, fw_name);
+ buf->fwc = fwc;
+ init_completion(&buf->completion);
+ buf->fmt = VMALLOC_BUF;
+
+ pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
+
+ return buf;
+}
+
+static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
+{
+ struct firmware_buf *tmp;
+ struct firmware_cache *fwc = &fw_cache;
+
+ list_for_each_entry(tmp, &fwc->head, list)
+ if (!strcmp(tmp->fw_id, fw_name))
+ return tmp;
+ return NULL;
+}
+
+static int fw_lookup_and_allocate_buf(const char *fw_name,
+ struct firmware_cache *fwc,
+ struct firmware_buf **buf)
+{
+ struct firmware_buf *tmp;
+
+ spin_lock(&fwc->lock);
+ tmp = __fw_lookup_buf(fw_name);
+ if (tmp) {
+ kref_get(&tmp->ref);
+ spin_unlock(&fwc->lock);
+ *buf = tmp;
+ return 1;
+ }
+ tmp = __allocate_fw_buf(fw_name, fwc);
+ if (tmp)
+ list_add(&tmp->list, &fwc->head);
+ spin_unlock(&fwc->lock);
+
+ *buf = tmp;
+
+ return tmp ? 0 : -ENOMEM;
+}
+
+static struct firmware_buf *fw_lookup_buf(const char *fw_name)
+{
+ struct firmware_buf *tmp;
+ struct firmware_cache *fwc = &fw_cache;
+
+ spin_lock(&fwc->lock);
+ tmp = __fw_lookup_buf(fw_name);
+ spin_unlock(&fwc->lock);
+
+ return tmp;
+}
+
+static void __fw_free_buf(struct kref *ref)
+{
+ struct firmware_buf *buf = to_fwbuf(ref);
+ struct firmware_cache *fwc = buf->fwc;
+ int i;
+
+ pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
+ __func__, buf->fw_id, buf, buf->data,
+ (unsigned int)buf->size);
+
+ spin_lock(&fwc->lock);
+ list_del(&buf->list);
+ spin_unlock(&fwc->lock);
+
+
+ if (buf->fmt == PAGE_BUF) {
+ vunmap(buf->data);
+ for (i = 0; i < buf->nr_pages; i++)
+ __free_page(buf->pages[i]);
+ kfree(buf->pages);
+ } else
+ vfree(buf->data);
+ kfree(buf);
+}
+
+static void fw_free_buf(struct firmware_buf *buf)
+{
+ kref_put(&buf->ref, __fw_free_buf);
+}
+
+/* direct firmware loading support */
+static const char *fw_path[] = {
+ "/lib/firmware/updates/" UTS_RELEASE,
+ "/lib/firmware/updates",
+ "/lib/firmware/" UTS_RELEASE,
+ "/lib/firmware"
+};
+
+/* Don't inline this: 'struct kstat' is biggish */
+static noinline long fw_file_size(struct file *file)
+{
+ struct kstat st;
+ if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
+ return -1;
+ if (!S_ISREG(st.mode))
+ return -1;
+ if (st.size != (long)st.size)
+ return -1;
+ return st.size;
+}
+
+static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
+{
+ long size;
+ char *buf;
+
+ size = fw_file_size(file);
+ if (size < 0)
+ return false;
+ buf = vmalloc(size);
+ if (!buf)
+ return false;
+ if (kernel_read(file, 0, buf, size) != size) {
+ vfree(buf);
+ return false;
+ }
+ fw_buf->data = buf;
+ fw_buf->size = size;
+ return true;
+}
+
+static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
+{
+ int i;
+ bool success = false;
+ char *path = __getname();
+
+ for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
+ struct file *file;
+ snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
+
+ file = filp_open(path, O_RDONLY, 0);
+ if (IS_ERR(file))
+ continue;
+ success = fw_read_file_contents(file, buf);
+ fput(file);
+ if (success)
+ break;
+ }
+ __putname(path);
+ return success;
+}
+
static struct firmware_priv *to_firmware_priv(struct device *dev)
{
return container_of(dev, struct firmware_priv, dev);
@@ -109,9 +336,10 @@ static struct firmware_priv *to_firmware_priv(struct device *dev)
static void fw_load_abort(struct firmware_priv *fw_priv)
{
- set_bit(FW_STATUS_ABORT, &fw_priv->status);
- wmb();
- complete(&fw_priv->completion);
+ struct firmware_buf *buf = fw_priv->buf;
+
+ set_bit(FW_STATUS_ABORT, &buf->status);
+ complete_all(&buf->completion);
}
static ssize_t firmware_timeout_show(struct class *class,
@@ -154,11 +382,7 @@ static struct class_attribute firmware_class_attrs[] = {
static void fw_dev_release(struct device *dev)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
- int i;
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- kfree(fw_priv->pages);
kfree(fw_priv);
module_put(THIS_MODULE);
@@ -168,7 +392,7 @@ static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
- if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->fw_id))
+ if (add_uevent_var(env, "FIRMWARE=%s", fw_priv->buf->fw_id))
return -ENOMEM;
if (add_uevent_var(env, "TIMEOUT=%i", loading_timeout))
return -ENOMEM;
@@ -189,26 +413,41 @@ static ssize_t firmware_loading_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
- int loading = test_bit(FW_STATUS_LOADING, &fw_priv->status);
+ int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
return sprintf(buf, "%d\n", loading);
}
+/* firmware holds the ownership of pages */
static void firmware_free_data(const struct firmware *fw)
{
- int i;
- vunmap(fw->data);
- if (fw->pages) {
- for (i = 0; i < PFN_UP(fw->size); i++)
- __free_page(fw->pages[i]);
- kfree(fw->pages);
+ /* Loaded directly? */
+ if (!fw->priv) {
+ vfree(fw->data);
+ return;
}
+ fw_free_buf(fw->priv);
}
/* Some architectures don't have PAGE_KERNEL_RO */
#ifndef PAGE_KERNEL_RO
#define PAGE_KERNEL_RO PAGE_KERNEL
#endif
+
+/* one pages buffer should be mapped/unmapped only once */
+static int fw_map_pages_buf(struct firmware_buf *buf)
+{
+ if (buf->fmt != PAGE_BUF)
+ return 0;
+
+ if (buf->data)
+ vunmap(buf->data);
+ buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
+ if (!buf->data)
+ return -ENOMEM;
+ return 0;
+}
+
/**
* firmware_loading_store - set value in the 'loading' control file
* @dev: device pointer
@@ -227,45 +466,41 @@ static ssize_t firmware_loading_store(struct device *dev,
const char *buf, size_t count)
{
struct firmware_priv *fw_priv = to_firmware_priv(dev);
+ struct firmware_buf *fw_buf = fw_priv->buf;
int loading = simple_strtol(buf, NULL, 10);
int i;
mutex_lock(&fw_lock);
- if (!fw_priv->fw)
+ if (!fw_buf)
goto out;
switch (loading) {
case 1:
- firmware_free_data(fw_priv->fw);
- memset(fw_priv->fw, 0, sizeof(struct firmware));
- /* If the pages are not owned by 'struct firmware' */
- for (i = 0; i < fw_priv->nr_pages; i++)
- __free_page(fw_priv->pages[i]);
- kfree(fw_priv->pages);
- fw_priv->pages = NULL;
- fw_priv->page_array_size = 0;
- fw_priv->nr_pages = 0;
- set_bit(FW_STATUS_LOADING, &fw_priv->status);
+ /* discarding any previous partial load */
+ if (!test_bit(FW_STATUS_DONE, &fw_buf->status)) {
+ for (i = 0; i < fw_buf->nr_pages; i++)
+ __free_page(fw_buf->pages[i]);
+ kfree(fw_buf->pages);
+ fw_buf->pages = NULL;
+ fw_buf->page_array_size = 0;
+ fw_buf->nr_pages = 0;
+ set_bit(FW_STATUS_LOADING, &fw_buf->status);
+ }
break;
case 0:
- if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
- vunmap(fw_priv->fw->data);
- fw_priv->fw->data = vmap(fw_priv->pages,
- fw_priv->nr_pages,
- 0, PAGE_KERNEL_RO);
- if (!fw_priv->fw->data) {
- dev_err(dev, "%s: vmap() failed\n", __func__);
- goto err;
- }
- /* Pages are now owned by 'struct firmware' */
- fw_priv->fw->pages = fw_priv->pages;
- fw_priv->pages = NULL;
-
- fw_priv->page_array_size = 0;
- fw_priv->nr_pages = 0;
- complete(&fw_priv->completion);
- clear_bit(FW_STATUS_LOADING, &fw_priv->status);
+ if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
+ set_bit(FW_STATUS_DONE, &fw_buf->status);
+ clear_bit(FW_STATUS_LOADING, &fw_buf->status);
+
+ /*
+ * Several loading requests may be pending on
+ * one same firmware buf, so let all requests
+ * see the mapped 'buf->data' once the loading
+ * is completed.
+ * */
+ fw_map_pages_buf(fw_buf);
+ complete_all(&fw_buf->completion);
break;
}
/* fallthrough */
@@ -273,7 +508,6 @@ static ssize_t firmware_loading_store(struct device *dev,
dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
/* fallthrough */
case -1:
- err:
fw_load_abort(fw_priv);
break;
}
@@ -290,21 +524,21 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
{
struct device *dev = kobj_to_dev(kobj);
struct firmware_priv *fw_priv = to_firmware_priv(dev);
- struct firmware *fw;
+ struct firmware_buf *buf;
ssize_t ret_count;
mutex_lock(&fw_lock);
- fw = fw_priv->fw;
- if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
+ buf = fw_priv->buf;
+ if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
ret_count = -ENODEV;
goto out;
}
- if (offset > fw->size) {
+ if (offset > buf->size) {
ret_count = 0;
goto out;
}
- if (count > fw->size - offset)
- count = fw->size - offset;
+ if (count > buf->size - offset)
+ count = buf->size - offset;
ret_count = count;
@@ -314,11 +548,11 @@ static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
int page_ofs = offset & (PAGE_SIZE-1);
int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
- page_data = kmap(fw_priv->pages[page_nr]);
+ page_data = kmap(buf->pages[page_nr]);
memcpy(buffer, page_data + page_ofs, page_cnt);
- kunmap(fw_priv->pages[page_nr]);
+ kunmap(buf->pages[page_nr]);
buffer += page_cnt;
offset += page_cnt;
count -= page_cnt;
@@ -330,12 +564,13 @@ out:
static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
{
+ struct firmware_buf *buf = fw_priv->buf;
int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
/* If the array of pages is too small, grow it... */
- if (fw_priv->page_array_size < pages_needed) {
+ if (buf->page_array_size < pages_needed) {
int new_array_size = max(pages_needed,
- fw_priv->page_array_size * 2);
+ buf->page_array_size * 2);
struct page **new_pages;
new_pages = kmalloc(new_array_size * sizeof(void *),
@@ -344,24 +579,24 @@ static int fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
fw_load_abort(fw_priv);
return -ENOMEM;
}
- memcpy(new_pages, fw_priv->pages,
- fw_priv->page_array_size * sizeof(void *));
- memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
- (new_array_size - fw_priv->page_array_size));
- kfree(fw_priv->pages);
- fw_priv->pages = new_pages;
- fw_priv->page_array_size = new_array_size;
+ memcpy(new_pages, buf->pages,
+ buf->page_array_size * sizeof(void *));
+ memset(&new_pages[buf->page_array_size], 0, sizeof(void *) *
+ (new_array_size - buf->page_array_size));
+ kfree(buf->pages);
+ buf->pages = new_pages;
+ buf->page_array_size = new_array_size;
}
- while (fw_priv->nr_pages < pages_needed) {
- fw_priv->pages[fw_priv->nr_pages] =
+ while (buf->nr_pages < pages_needed) {
+ buf->pages[buf->nr_pages] =
alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
- if (!fw_priv->pages[fw_priv->nr_pages]) {
+ if (!buf->pages[buf->nr_pages]) {
fw_load_abort(fw_priv);
return -ENOMEM;
}
- fw_priv->nr_pages++;
+ buf->nr_pages++;
}
return 0;
}
@@ -384,18 +619,19 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
{
struct device *dev = kobj_to_dev(kobj);
struct firmware_priv *fw_priv = to_firmware_priv(dev);
- struct firmware *fw;
+ struct firmware_buf *buf;
ssize_t retval;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
mutex_lock(&fw_lock);
- fw = fw_priv->fw;
- if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
+ buf = fw_priv->buf;
+ if (!buf || test_bit(FW_STATUS_DONE, &buf->status)) {
retval = -ENODEV;
goto out;
}
+
retval = fw_realloc_buffer(fw_priv, offset + count);
if (retval)
goto out;
@@ -408,17 +644,17 @@ static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
int page_ofs = offset & (PAGE_SIZE - 1);
int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
- page_data = kmap(fw_priv->pages[page_nr]);
+ page_data = kmap(buf->pages[page_nr]);
memcpy(page_data + page_ofs, buffer, page_cnt);
- kunmap(fw_priv->pages[page_nr]);
+ kunmap(buf->pages[page_nr]);
buffer += page_cnt;
offset += page_cnt;
count -= page_cnt;
}
- fw->size = max_t(size_t, offset, fw->size);
+ buf->size = max_t(size_t, offset, buf->size);
out:
mutex_unlock(&fw_lock);
return retval;
@@ -445,35 +681,111 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
struct firmware_priv *fw_priv;
struct device *f_dev;
- fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
+ fw_priv = kzalloc(sizeof(*fw_priv), GFP_KERNEL);
if (!fw_priv) {
dev_err(device, "%s: kmalloc failed\n", __func__);
- return ERR_PTR(-ENOMEM);
+ fw_priv = ERR_PTR(-ENOMEM);
+ goto exit;
}
- fw_priv->fw = firmware;
fw_priv->nowait = nowait;
- strcpy(fw_priv->fw_id, fw_name);
- init_completion(&fw_priv->completion);
+ fw_priv->fw = firmware;
setup_timer(&fw_priv->timeout,
firmware_class_timeout, (u_long) fw_priv);
f_dev = &fw_priv->dev;
device_initialize(f_dev);
- dev_set_name(f_dev, "%s", dev_name(device));
+ dev_set_name(f_dev, "%s", fw_name);
f_dev->parent = device;
f_dev->class = &firmware_class;
-
+exit:
return fw_priv;
}
+/* store the pages buffer info firmware from buf */
+static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
+{
+ fw->priv = buf;
+ fw->pages = buf->pages;
+ fw->size = buf->size;
+ fw->data = buf->data;
+
+ pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
+ __func__, buf->fw_id, buf, buf->data,
+ (unsigned int)buf->size);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void fw_name_devm_release(struct device *dev, void *res)
+{
+ struct fw_name_devm *fwn = res;
+
+ if (fwn->magic == (unsigned long)&fw_cache)
+ pr_debug("%s: fw_name-%s devm-%p released\n",
+ __func__, fwn->name, res);
+}
+
+static int fw_devm_match(struct device *dev, void *res,
+ void *match_data)
+{
+ struct fw_name_devm *fwn = res;
+
+ return (fwn->magic == (unsigned long)&fw_cache) &&
+ !strcmp(fwn->name, match_data);
+}
+
+static struct fw_name_devm *fw_find_devm_name(struct device *dev,
+ const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ fwn = devres_find(dev, fw_name_devm_release,
+ fw_devm_match, (void *)name);
+ return fwn;
+}
+
+/* add firmware name into devres list */
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ struct fw_name_devm *fwn;
+
+ fwn = fw_find_devm_name(dev, name);
+ if (fwn)
+ return 1;
+
+ fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
+ strlen(name) + 1, GFP_KERNEL);
+ if (!fwn)
+ return -ENOMEM;
+
+ fwn->magic = (unsigned long)&fw_cache;
+ strcpy(fwn->name, name);
+ devres_add(dev, fwn);
+
+ return 0;
+}
+#else
+static int fw_add_devm_name(struct device *dev, const char *name)
+{
+ return 0;
+}
+#endif
+
+static void _request_firmware_cleanup(const struct firmware **firmware_p)
+{
+ release_firmware(*firmware_p);
+ *firmware_p = NULL;
+}
+
static struct firmware_priv *
_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
struct device *device, bool uevent, bool nowait)
{
struct firmware *firmware;
- struct firmware_priv *fw_priv;
+ struct firmware_priv *fw_priv = NULL;
+ struct firmware_buf *buf;
+ int ret;
if (!firmware_p)
return ERR_PTR(-EINVAL);
@@ -490,18 +802,46 @@ _request_firmware_prepare(const struct firmware **firmware_p, const char *name,
return NULL;
}
- fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
- if (IS_ERR(fw_priv)) {
- release_firmware(firmware);
+ ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
+ if (!ret)
+ fw_priv = fw_create_instance(firmware, name, device,
+ uevent, nowait);
+
+ if (IS_ERR(fw_priv) || ret < 0) {
+ kfree(firmware);
*firmware_p = NULL;
+ return ERR_PTR(-ENOMEM);
+ } else if (fw_priv) {
+ fw_priv->buf = buf;
+
+ /*
+ * bind with 'buf' now to avoid warning in failure path
+ * of requesting firmware.
+ */
+ firmware->priv = buf;
+ return fw_priv;
}
- return fw_priv;
-}
-static void _request_firmware_cleanup(const struct firmware **firmware_p)
-{
- release_firmware(*firmware_p);
- *firmware_p = NULL;
+ /* share the cached buf, which is inprogessing or completed */
+ check_status:
+ mutex_lock(&fw_lock);
+ if (test_bit(FW_STATUS_ABORT, &buf->status)) {
+ fw_priv = ERR_PTR(-ENOENT);
+ firmware->priv = buf;
+ _request_firmware_cleanup(firmware_p);
+ goto exit;
+ } else if (test_bit(FW_STATUS_DONE, &buf->status)) {
+ fw_priv = NULL;
+ fw_set_page_data(buf, firmware);
+ goto exit;
+ }
+ mutex_unlock(&fw_lock);
+ wait_for_completion(&buf->completion);
+ goto check_status;
+
+exit:
+ mutex_unlock(&fw_lock);
+ return fw_priv;
}
static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
@@ -509,6 +849,23 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
{
int retval = 0;
struct device *f_dev = &fw_priv->dev;
+ struct firmware_buf *buf = fw_priv->buf;
+ struct firmware_cache *fwc = &fw_cache;
+ int direct_load = 0;
+
+ /* try direct loading from fs first */
+ if (fw_get_filesystem_firmware(buf)) {
+ dev_dbg(f_dev->parent, "firmware: direct-loading"
+ " firmware %s\n", buf->fw_id);
+
+ set_bit(FW_STATUS_DONE, &buf->status);
+ complete_all(&buf->completion);
+ direct_load = 1;
+ goto handle_fw;
+ }
+
+ /* fall back on userspace loading */
+ buf->fmt = PAGE_BUF;
dev_set_uevent_suppress(f_dev, true);
@@ -535,7 +892,7 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
if (uevent) {
dev_set_uevent_suppress(f_dev, false);
- dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_id);
+ dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
if (timeout != MAX_SCHEDULE_TIMEOUT)
mod_timer(&fw_priv->timeout,
round_jiffies_up(jiffies + timeout));
@@ -543,17 +900,43 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
- wait_for_completion(&fw_priv->completion);
+ wait_for_completion(&buf->completion);
- set_bit(FW_STATUS_DONE, &fw_priv->status);
del_timer_sync(&fw_priv->timeout);
+handle_fw:
mutex_lock(&fw_lock);
- if (!fw_priv->fw->size || test_bit(FW_STATUS_ABORT, &fw_priv->status))
+ if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
retval = -ENOENT;
- fw_priv->fw = NULL;
+
+ /*
+ * add firmware name into devres list so that we can auto cache
+ * and uncache firmware for device.
+ *
+ * f_dev->parent may has been deleted already, but the problem
+ * should be fixed in devres or driver core.
+ */
+ if (!retval && f_dev->parent)
+ fw_add_devm_name(f_dev->parent, buf->fw_id);
+
+ /*
+ * After caching firmware image is started, let it piggyback
+ * on request firmware.
+ */
+ if (!retval && fwc->state == FW_LOADER_START_CACHE) {
+ if (fw_cache_piggyback_on_request(buf->fw_id))
+ kref_get(&buf->ref);
+ }
+
+ /* pass the pages buffer to driver at the last minute */
+ fw_set_page_data(buf, fw_priv->fw);
+
+ fw_priv->buf = NULL;
mutex_unlock(&fw_lock);
+ if (direct_load)
+ goto err_put_dev;
+
device_remove_file(f_dev, &dev_attr_loading);
err_del_bin_attr:
device_remove_bin_file(f_dev, &firmware_attr_data);
@@ -578,6 +961,8 @@ err_put_dev:
* @name will be used as $FIRMWARE in the uevent environment and
* should be distinctive enough not to be confused with any other
* firmware image for this or any other device.
+ *
+ * Caller must hold the reference count of @device.
**/
int
request_firmware(const struct firmware **firmware_p, const char *name,
@@ -659,6 +1044,7 @@ static void request_firmware_work_func(struct work_struct *work)
out:
fw_work->cont(fw, fw_work->context);
+ put_device(fw_work->device);
module_put(fw_work->module);
kfree(fw_work);
@@ -677,9 +1063,15 @@ static void request_firmware_work_func(struct work_struct *work)
* @cont: function will be called asynchronously when the firmware
* request is over.
*
- * Asynchronous variant of request_firmware() for user contexts where
- * it is not possible to sleep for long time. It can't be called
- * in atomic contexts.
+ * Caller must hold the reference count of @device.
+ *
+ * Asynchronous variant of request_firmware() for user contexts:
+ * - sleep for as small periods as possible since it may
+ * increase kernel boot time of built-in device drivers
+ * requesting firmware in their ->probe() methods, if
+ * @gfp is GFP_KERNEL.
+ *
+ * - can't sleep at all if @gfp is GFP_ATOMIC.
**/
int
request_firmware_nowait(
@@ -705,18 +1097,364 @@ request_firmware_nowait(
return -EFAULT;
}
+ get_device(fw_work->device);
INIT_WORK(&fw_work->work, request_firmware_work_func);
schedule_work(&fw_work->work);
return 0;
}
+/**
+ * cache_firmware - cache one firmware image in kernel memory space
+ * @fw_name: the firmware image name
+ *
+ * Cache firmware in kernel memory so that drivers can use it when
+ * system isn't ready for them to request firmware image from userspace.
+ * Once it returns successfully, driver can use request_firmware or its
+ * nowait version to get the cached firmware without any interacting
+ * with userspace
+ *
+ * Return 0 if the firmware image has been cached successfully
+ * Return !0 otherwise
+ *
+ */
+int cache_firmware(const char *fw_name)
+{
+ int ret;
+ const struct firmware *fw;
+
+ pr_debug("%s: %s\n", __func__, fw_name);
+
+ ret = request_firmware(&fw, fw_name, NULL);
+ if (!ret)
+ kfree(fw);
+
+ pr_debug("%s: %s ret=%d\n", __func__, fw_name, ret);
+
+ return ret;
+}
+
+/**
+ * uncache_firmware - remove one cached firmware image
+ * @fw_name: the firmware image name
+ *
+ * Uncache one firmware image which has been cached successfully
+ * before.
+ *
+ * Return 0 if the firmware cache has been removed successfully
+ * Return !0 otherwise
+ *
+ */
+int uncache_firmware(const char *fw_name)
+{
+ struct firmware_buf *buf;
+ struct firmware fw;
+
+ pr_debug("%s: %s\n", __func__, fw_name);
+
+ if (fw_get_builtin_firmware(&fw, fw_name))
+ return 0;
+
+ buf = fw_lookup_buf(fw_name);
+ if (buf) {
+ fw_free_buf(buf);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
+
+static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
+{
+ struct fw_cache_entry *fce;
+
+ fce = kzalloc(sizeof(*fce) + strlen(name) + 1, GFP_ATOMIC);
+ if (!fce)
+ goto exit;
+
+ strcpy(fce->name, name);
+exit:
+ return fce;
+}
+
+static int __fw_entry_found(const char *name)
+{
+ struct firmware_cache *fwc = &fw_cache;
+ struct fw_cache_entry *fce;
+
+ list_for_each_entry(fce, &fwc->fw_names, list) {
+ if (!strcmp(fce->name, name))
+ return 1;
+ }
+ return 0;
+}
+
+static int fw_cache_piggyback_on_request(const char *name)
+{
+ struct firmware_cache *fwc = &fw_cache;
+ struct fw_cache_entry *fce;
+ int ret = 0;
+
+ spin_lock(&fwc->name_lock);
+ if (__fw_entry_found(name))
+ goto found;
+
+ fce = alloc_fw_cache_entry(name);
+ if (fce) {
+ ret = 1;
+ list_add(&fce->list, &fwc->fw_names);
+ pr_debug("%s: fw: %s\n", __func__, name);
+ }
+found:
+ spin_unlock(&fwc->name_lock);
+ return ret;
+}
+
+static void free_fw_cache_entry(struct fw_cache_entry *fce)
+{
+ kfree(fce);
+}
+
+static void __async_dev_cache_fw_image(void *fw_entry,
+ async_cookie_t cookie)
+{
+ struct fw_cache_entry *fce = fw_entry;
+ struct firmware_cache *fwc = &fw_cache;
+ int ret;
+
+ ret = cache_firmware(fce->name);
+ if (ret) {
+ spin_lock(&fwc->name_lock);
+ list_del(&fce->list);
+ spin_unlock(&fwc->name_lock);
+
+ free_fw_cache_entry(fce);
+ }
+}
+
+/* called with dev->devres_lock held */
+static void dev_create_fw_entry(struct device *dev, void *res,
+ void *data)
+{
+ struct fw_name_devm *fwn = res;
+ const char *fw_name = fwn->name;
+ struct list_head *head = data;
+ struct fw_cache_entry *fce;
+
+ fce = alloc_fw_cache_entry(fw_name);
+ if (fce)
+ list_add(&fce->list, head);
+}
+
+static int devm_name_match(struct device *dev, void *res,
+ void *match_data)
+{
+ struct fw_name_devm *fwn = res;
+ return (fwn->magic == (unsigned long)match_data);
+}
+
+static void dev_cache_fw_image(struct device *dev, void *data)
+{
+ LIST_HEAD(todo);
+ struct fw_cache_entry *fce;
+ struct fw_cache_entry *fce_next;
+ struct firmware_cache *fwc = &fw_cache;
+
+ devres_for_each_res(dev, fw_name_devm_release,
+ devm_name_match, &fw_cache,
+ dev_create_fw_entry, &todo);
+
+ list_for_each_entry_safe(fce, fce_next, &todo, list) {
+ list_del(&fce->list);
+
+ spin_lock(&fwc->name_lock);
+ /* only one cache entry for one firmware */
+ if (!__fw_entry_found(fce->name)) {
+ list_add(&fce->list, &fwc->fw_names);
+ } else {
+ free_fw_cache_entry(fce);
+ fce = NULL;
+ }
+ spin_unlock(&fwc->name_lock);
+
+ if (fce)
+ async_schedule_domain(__async_dev_cache_fw_image,
+ (void *)fce,
+ &fw_cache_domain);
+ }
+}
+
+static void __device_uncache_fw_images(void)
+{
+ struct firmware_cache *fwc = &fw_cache;
+ struct fw_cache_entry *fce;
+
+ spin_lock(&fwc->name_lock);
+ while (!list_empty(&fwc->fw_names)) {
+ fce = list_entry(fwc->fw_names.next,
+ struct fw_cache_entry, list);
+ list_del(&fce->list);
+ spin_unlock(&fwc->name_lock);
+
+ uncache_firmware(fce->name);
+ free_fw_cache_entry(fce);
+
+ spin_lock(&fwc->name_lock);
+ }
+ spin_unlock(&fwc->name_lock);
+}
+
+/**
+ * device_cache_fw_images - cache devices' firmware
+ *
+ * If one device called request_firmware or its nowait version
+ * successfully before, the firmware names are recored into the
+ * device's devres link list, so device_cache_fw_images can call
+ * cache_firmware() to cache these firmwares for the device,
+ * then the device driver can load its firmwares easily at
+ * time when system is not ready to complete loading firmware.
+ */
+static void device_cache_fw_images(void)
+{
+ struct firmware_cache *fwc = &fw_cache;
+ int old_timeout;
+ DEFINE_WAIT(wait);
+
+ pr_debug("%s\n", __func__);
+
+ /* cancel uncache work */
+ cancel_delayed_work_sync(&fwc->work);
+
+ /*
+ * use small loading timeout for caching devices' firmware
+ * because all these firmware images have been loaded
+ * successfully at lease once, also system is ready for
+ * completing firmware loading now. The maximum size of
+ * firmware in current distributions is about 2M bytes,
+ * so 10 secs should be enough.
+ */
+ old_timeout = loading_timeout;
+ loading_timeout = 10;
+
+ mutex_lock(&fw_lock);
+ fwc->state = FW_LOADER_START_CACHE;
+ dpm_for_each_dev(NULL, dev_cache_fw_image);
+ mutex_unlock(&fw_lock);
+
+ /* wait for completion of caching firmware for all devices */
+ async_synchronize_full_domain(&fw_cache_domain);
+
+ loading_timeout = old_timeout;
+}
+
+/**
+ * device_uncache_fw_images - uncache devices' firmware
+ *
+ * uncache all firmwares which have been cached successfully
+ * by device_uncache_fw_images earlier
+ */
+static void device_uncache_fw_images(void)
+{
+ pr_debug("%s\n", __func__);
+ __device_uncache_fw_images();
+}
+
+static void device_uncache_fw_images_work(struct work_struct *work)
+{
+ device_uncache_fw_images();
+}
+
+/**
+ * device_uncache_fw_images_delay - uncache devices firmwares
+ * @delay: number of milliseconds to delay uncache device firmwares
+ *
+ * uncache all devices's firmwares which has been cached successfully
+ * by device_cache_fw_images after @delay milliseconds.
+ */
+static void device_uncache_fw_images_delay(unsigned long delay)
+{
+ schedule_delayed_work(&fw_cache.work,
+ msecs_to_jiffies(delay));
+}
+
+static int fw_pm_notify(struct notifier_block *notify_block,
+ unsigned long mode, void *unused)
+{
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ device_cache_fw_images();
+ break;
+
+ case PM_POST_SUSPEND:
+ case PM_POST_HIBERNATION:
+ case PM_POST_RESTORE:
+ /*
+ * In case that system sleep failed and syscore_suspend is
+ * not called.
+ */
+ mutex_lock(&fw_lock);
+ fw_cache.state = FW_LOADER_NO_CACHE;
+ mutex_unlock(&fw_lock);
+
+ device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
+ break;
+ }
+
+ return 0;
+}
+
+/* stop caching firmware once syscore_suspend is reached */
+static int fw_suspend(void)
+{
+ fw_cache.state = FW_LOADER_NO_CACHE;
+ return 0;
+}
+
+static struct syscore_ops fw_syscore_ops = {
+ .suspend = fw_suspend,
+};
+#else
+static int fw_cache_piggyback_on_request(const char *name)
+{
+ return 0;
+}
+#endif
+
+static void __init fw_cache_init(void)
+{
+ spin_lock_init(&fw_cache.lock);
+ INIT_LIST_HEAD(&fw_cache.head);
+ fw_cache.state = FW_LOADER_NO_CACHE;
+
+#ifdef CONFIG_PM_SLEEP
+ spin_lock_init(&fw_cache.name_lock);
+ INIT_LIST_HEAD(&fw_cache.fw_names);
+
+ INIT_DELAYED_WORK(&fw_cache.work,
+ device_uncache_fw_images_work);
+
+ fw_cache.pm_notify.notifier_call = fw_pm_notify;
+ register_pm_notifier(&fw_cache.pm_notify);
+
+ register_syscore_ops(&fw_syscore_ops);
+#endif
+}
+
static int __init firmware_class_init(void)
{
+ fw_cache_init();
return class_register(&firmware_class);
}
static void __exit firmware_class_exit(void)
{
+#ifdef CONFIG_PM_SLEEP
+ unregister_syscore_ops(&fw_syscore_ops);
+ unregister_pm_notifier(&fw_cache.pm_notify);
+#endif
class_unregister(&firmware_class);
}
@@ -726,3 +1464,5 @@ module_exit(firmware_class_exit);
EXPORT_SYMBOL(release_firmware);
EXPORT_SYMBOL(request_firmware);
EXPORT_SYMBOL(request_firmware_nowait);
+EXPORT_SYMBOL_GPL(cache_firmware);
+EXPORT_SYMBOL_GPL(uncache_firmware);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 7dda4f790f0..86c88216a50 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -248,26 +248,23 @@ static bool pages_correctly_reserved(unsigned long start_pfn,
static int
memory_block_action(unsigned long phys_index, unsigned long action)
{
- unsigned long start_pfn, start_paddr;
+ unsigned long start_pfn;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct page *first_page;
int ret;
first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
+ start_pfn = page_to_pfn(first_page);
switch (action) {
case MEM_ONLINE:
- start_pfn = page_to_pfn(first_page);
-
if (!pages_correctly_reserved(start_pfn, nr_pages))
return -EBUSY;
ret = online_pages(start_pfn, nr_pages);
break;
case MEM_OFFLINE:
- start_paddr = page_to_pfn(first_page) << PAGE_SHIFT;
- ret = remove_memory(start_paddr,
- nr_pages << PAGE_SHIFT);
+ ret = offline_pages(start_pfn, nr_pages);
break;
default:
WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
@@ -278,13 +275,11 @@ memory_block_action(unsigned long phys_index, unsigned long action)
return ret;
}
-static int memory_block_change_state(struct memory_block *mem,
+static int __memory_block_change_state(struct memory_block *mem,
unsigned long to_state, unsigned long from_state_req)
{
int ret = 0;
- mutex_lock(&mem->state_mutex);
-
if (mem->state != from_state_req) {
ret = -EINVAL;
goto out;
@@ -312,10 +307,20 @@ static int memory_block_change_state(struct memory_block *mem,
break;
}
out:
- mutex_unlock(&mem->state_mutex);
return ret;
}
+static int memory_block_change_state(struct memory_block *mem,
+ unsigned long to_state, unsigned long from_state_req)
+{
+ int ret;
+
+ mutex_lock(&mem->state_mutex);
+ ret = __memory_block_change_state(mem, to_state, from_state_req);
+ mutex_unlock(&mem->state_mutex);
+
+ return ret;
+}
static ssize_t
store_mem_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
@@ -656,6 +661,21 @@ int unregister_memory_section(struct mem_section *section)
}
/*
+ * offline one memory block. If the memory block has been offlined, do nothing.
+ */
+int offline_memory_block(struct memory_block *mem)
+{
+ int ret = 0;
+
+ mutex_lock(&mem->state_mutex);
+ if (mem->state != MEM_OFFLINE)
+ ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+ mutex_unlock(&mem->state_mutex);
+
+ return ret;
+}
+
+/*
* Initialize the sysfs support for memory devices...
*/
int __init memory_dev_init(void)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index a1a72250258..8727e9c5eea 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -20,8 +20,13 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/idr.h>
#include "base.h"
+#include "power/power.h"
+
+/* For automatically allocated device IDs */
+static DEFINE_IDA(platform_devid_ida);
#define to_platform_driver(drv) (container_of((drv), struct platform_driver, \
driver))
@@ -99,6 +104,9 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
for (i = 0; i < dev->num_resources; i++) {
struct resource *r = &dev->resource[i];
+ if (unlikely(!r->name))
+ continue;
+
if (type == resource_type(r) && !strcmp(r->name, name))
return r;
}
@@ -263,7 +271,7 @@ EXPORT_SYMBOL_GPL(platform_device_add_data);
*/
int platform_device_add(struct platform_device *pdev)
{
- int i, ret = 0;
+ int i, ret;
if (!pdev)
return -EINVAL;
@@ -273,10 +281,27 @@ int platform_device_add(struct platform_device *pdev)
pdev->dev.bus = &platform_bus_type;
- if (pdev->id != -1)
+ switch (pdev->id) {
+ default:
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
- else
+ break;
+ case PLATFORM_DEVID_NONE:
dev_set_name(&pdev->dev, "%s", pdev->name);
+ break;
+ case PLATFORM_DEVID_AUTO:
+ /*
+ * Automatically allocated device ID. We mark it as such so
+ * that we remember it must be freed, and we append a suffix
+ * to avoid namespace collision with explicit IDs.
+ */
+ ret = ida_simple_get(&platform_devid_ida, 0, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto err_out;
+ pdev->id = ret;
+ pdev->id_auto = true;
+ dev_set_name(&pdev->dev, "%s.%d.auto", pdev->name, pdev->id);
+ break;
+ }
for (i = 0; i < pdev->num_resources; i++) {
struct resource *p, *r = &pdev->resource[i];
@@ -309,6 +334,11 @@ int platform_device_add(struct platform_device *pdev)
return ret;
failed:
+ if (pdev->id_auto) {
+ ida_simple_remove(&platform_devid_ida, pdev->id);
+ pdev->id = PLATFORM_DEVID_AUTO;
+ }
+
while (--i >= 0) {
struct resource *r = &pdev->resource[i];
unsigned long type = resource_type(r);
@@ -317,6 +347,7 @@ int platform_device_add(struct platform_device *pdev)
release_resource(r);
}
+ err_out:
return ret;
}
EXPORT_SYMBOL_GPL(platform_device_add);
@@ -336,6 +367,11 @@ void platform_device_del(struct platform_device *pdev)
if (pdev) {
device_del(&pdev->dev);
+ if (pdev->id_auto) {
+ ida_simple_remove(&platform_devid_ida, pdev->id);
+ pdev->id = PLATFORM_DEVID_AUTO;
+ }
+
for (i = 0; i < pdev->num_resources; i++) {
struct resource *r = &pdev->resource[i];
unsigned long type = resource_type(r);
@@ -948,6 +984,7 @@ void __init early_platform_add_devices(struct platform_device **devs, int num)
dev = &devs[i]->dev;
if (!dev->devres_head.next) {
+ pm_runtime_early_init(dev);
INIT_LIST_HEAD(&dev->devres_head);
list_add_tail(&dev->devres_head,
&early_platform_device_list);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ba3487c9835..96b71b6536d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -53,6 +53,24 @@
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
+static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
+{
+ struct generic_pm_domain *genpd = NULL, *gpd;
+
+ if (IS_ERR_OR_NULL(domain_name))
+ return NULL;
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (!strcmp(gpd->name, domain_name)) {
+ genpd = gpd;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+ return genpd;
+}
+
#ifdef CONFIG_PM
struct generic_pm_domain *dev_to_genpd(struct device *dev)
@@ -256,10 +274,28 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
return ret;
}
+/**
+ * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
+ * @domain_name: Name of the PM domain to power up.
+ */
+int pm_genpd_name_poweron(const char *domain_name)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = pm_genpd_lookup_name(domain_name);
+ return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
+}
+
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
+static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, start, dev);
+}
+
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{
return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
@@ -436,7 +472,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
not_suspended = 0;
list_for_each_entry(pdd, &genpd->dev_list, list_node)
if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
- || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
+ || pdd->dev->power.irq_safe))
not_suspended++;
if (not_suspended > genpd->in_progress)
@@ -578,9 +614,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
might_sleep_if(!genpd->dev_irq_safe);
- if (dev_gpd_data(dev)->always_on)
- return -EBUSY;
-
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
if (stop_ok && !stop_ok(dev))
return -EBUSY;
@@ -629,7 +662,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
/* If power.irq_safe, the PM domain is never powered off. */
if (dev->power.irq_safe)
- return genpd_start_dev(genpd, dev);
+ return genpd_start_dev_no_timing(genpd, dev);
mutex_lock(&genpd->lock);
ret = __pm_genpd_poweron(genpd);
@@ -697,6 +730,24 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
#ifdef CONFIG_PM_SLEEP
+/**
+ * pm_genpd_present - Check if the given PM domain has been initialized.
+ * @genpd: PM domain to check.
+ */
+static bool pm_genpd_present(struct generic_pm_domain *genpd)
+{
+ struct generic_pm_domain *gpd;
+
+ if (IS_ERR_OR_NULL(genpd))
+ return false;
+
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node)
+ if (gpd == genpd)
+ return true;
+
+ return false;
+}
+
static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
struct device *dev)
{
@@ -750,9 +801,10 @@ static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
* Check if the given PM domain can be powered off (during system suspend or
* hibernation) and do that if so. Also, in that case propagate to its masters.
*
- * This function is only called in "noirq" stages of system power transitions,
- * so it need not acquire locks (all of the "noirq" callbacks are executed
- * sequentially, so it is guaranteed that it will never run twice in parallel).
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions, so it need not acquire locks (all of the "noirq" callbacks are
+ * executed sequentially, so it is guaranteed that it will never run twice in
+ * parallel).
*/
static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
{
@@ -777,6 +829,33 @@ static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
}
/**
+ * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
+ * @genpd: PM domain to power on.
+ *
+ * This function is only called in "noirq" and "syscore" stages of system power
+ * transitions, so it need not acquire locks (all of the "noirq" callbacks are
+ * executed sequentially, so it is guaranteed that it will never run twice in
+ * parallel).
+ */
+static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
+{
+ struct gpd_link *link;
+
+ if (genpd->status != GPD_STATE_POWER_OFF)
+ return;
+
+ list_for_each_entry(link, &genpd->slave_links, slave_node) {
+ pm_genpd_sync_poweron(link->master);
+ genpd_sd_counter_inc(link->master);
+ }
+
+ if (genpd->power_on)
+ genpd->power_on(genpd);
+
+ genpd->status = GPD_STATE_ACTIVE;
+}
+
+/**
* resume_needed - Check whether to resume a device before system suspend.
* @dev: Device to check.
* @genpd: PM domain the device belongs to.
@@ -937,7 +1016,7 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+ if (genpd->suspend_power_off
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0;
@@ -970,7 +1049,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
+ if (genpd->suspend_power_off
|| (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
return 0;
@@ -979,7 +1058,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
* guaranteed that this function will never run twice in parallel for
* the same PM domain, so it is not necessary to use locking here.
*/
- pm_genpd_poweron(genpd);
+ pm_genpd_sync_poweron(genpd);
genpd->suspended_count--;
return genpd_start_dev(genpd, dev);
@@ -1090,8 +1169,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
- 0 : genpd_stop_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
}
/**
@@ -1111,8 +1189,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
- 0 : genpd_start_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
}
/**
@@ -1186,8 +1263,8 @@ static int pm_genpd_restore_noirq(struct device *dev)
if (genpd->suspended_count++ == 0) {
/*
* The boot kernel might put the domain into arbitrary state,
- * so make it appear as powered off to pm_genpd_poweron(), so
- * that it tries to power it on in case it was really off.
+ * so make it appear as powered off to pm_genpd_sync_poweron(),
+ * so that it tries to power it on in case it was really off.
*/
genpd->status = GPD_STATE_POWER_OFF;
if (genpd->suspend_power_off) {
@@ -1205,9 +1282,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- pm_genpd_poweron(genpd);
+ pm_genpd_sync_poweron(genpd);
- return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
+ return genpd_start_dev(genpd, dev);
}
/**
@@ -1246,6 +1323,31 @@ static void pm_genpd_complete(struct device *dev)
}
}
+/**
+ * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
+ * @dev: Device that normally is marked as "always on" to switch power for.
+ *
+ * This routine may only be called during the system core (syscore) suspend or
+ * resume phase for devices whose "always on" flags are set.
+ */
+void pm_genpd_syscore_switch(struct device *dev, bool suspend)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = dev_to_genpd(dev);
+ if (!pm_genpd_present(genpd))
+ return;
+
+ if (suspend) {
+ genpd->suspended_count++;
+ pm_genpd_sync_poweroff(genpd);
+ } else {
+ pm_genpd_sync_poweron(genpd);
+ genpd->suspended_count--;
+ }
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
+
#else
#define pm_genpd_prepare NULL
@@ -1393,6 +1495,19 @@ int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
return __pm_genpd_add_device(genpd, dev, td);
}
+
+/**
+ * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
+ * @domain_name: Name of the PM domain to add the device to.
+ * @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
+ */
+int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
+ struct gpd_timing_data *td)
+{
+ return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
+}
+
/**
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
* @genpd: PM domain to remove the device from.
@@ -1455,26 +1570,6 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
}
/**
- * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
- * @dev: Device to set/unset the flag for.
- * @val: The new value of the device's "always on" flag.
- */
-void pm_genpd_dev_always_on(struct device *dev, bool val)
-{
- struct pm_subsys_data *psd;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->power.lock, flags);
-
- psd = dev_to_psd(dev);
- if (psd && psd->domain_data)
- to_gpd_data(psd->domain_data)->always_on = val;
-
- spin_unlock_irqrestore(&dev->power.lock, flags);
-}
-EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
-
-/**
* pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
* @dev: Device to set/unset the flag for.
* @val: The new value of the device's "need restore" flag.
@@ -1505,7 +1600,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
struct gpd_link *link;
int ret = 0;
- if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
+ || genpd == subdomain)
return -EINVAL;
start:
@@ -1552,6 +1648,35 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
}
/**
+ * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
+ * @master_name: Name of the master PM domain to add the subdomain to.
+ * @subdomain_name: Name of the subdomain to be added.
+ */
+int pm_genpd_add_subdomain_names(const char *master_name,
+ const char *subdomain_name)
+{
+ struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
+
+ if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
+ return -EINVAL;
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (!master && !strcmp(gpd->name, master_name))
+ master = gpd;
+
+ if (!subdomain && !strcmp(gpd->name, subdomain_name))
+ subdomain = gpd;
+
+ if (master && subdomain)
+ break;
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return pm_genpd_add_subdomain(master, subdomain);
+}
+
+/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
* @genpd: Master PM domain to remove the subdomain from.
* @subdomain: Subdomain to be removed.
@@ -1704,7 +1829,16 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
}
EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
-int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+/**
+ * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
+ * @genpd: PM domain to be connected with cpuidle.
+ * @state: cpuidle state this domain can disable/enable.
+ *
+ * Make a PM domain behave as though it contained a CPU core, that is, instead
+ * of calling its power down routine it will enable the given cpuidle state so
+ * that the cpuidle subsystem can power it down (if possible and desirable).
+ */
+int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
{
struct cpuidle_driver *cpuidle_drv;
struct gpd_cpu_data *cpu_data;
@@ -1728,7 +1862,7 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
cpuidle_drv = cpuidle_driver_ref();
if (!cpuidle_drv) {
ret = -ENODEV;
- goto out;
+ goto err_drv;
}
if (cpuidle_drv->state_count <= state) {
ret = -EINVAL;
@@ -1750,10 +1884,30 @@ int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
err:
cpuidle_driver_unref();
+
+ err_drv:
+ kfree(cpu_data);
goto out;
}
-int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+/**
+ * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
+ * @name: Name of the domain to connect to cpuidle.
+ * @state: cpuidle state this domain can manipulate.
+ */
+int pm_genpd_name_attach_cpuidle(const char *name, int state)
+{
+ return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
+}
+
+/**
+ * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
+ * @genpd: PM domain to remove the cpuidle connection from.
+ *
+ * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
+ * given PM domain.
+ */
+int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
{
struct gpd_cpu_data *cpu_data;
struct cpuidle_state *idle_state;
@@ -1784,6 +1938,15 @@ int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
return ret;
}
+/**
+ * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
+ * @name: Name of the domain to disconnect cpuidle from.
+ */
+int pm_genpd_name_detach_cpuidle(const char *name)
+{
+ return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
+}
+
/* Default device callbacks for generic PM domains. */
/**
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0113adc310d..a3c1404c793 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,20 +57,17 @@ static pm_message_t pm_transition;
static int async_error;
/**
- * device_pm_init - Initialize the PM-related part of a device object.
+ * device_pm_sleep_init - Initialize system suspend-related device fields.
* @dev: Device object being initialized.
*/
-void device_pm_init(struct device *dev)
+void device_pm_sleep_init(struct device *dev)
{
dev->power.is_prepared = false;
dev->power.is_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
- spin_lock_init(&dev->power.lock);
- pm_runtime_init(dev);
INIT_LIST_HEAD(&dev->power.entry);
- dev->power.power_state = PMSG_INVALID;
}
/**
@@ -408,6 +405,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
+ if (dev->power.syscore)
+ goto Out;
+
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -429,6 +429,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
error = dpm_run_callback(callback, dev, state, info);
+ Out:
TRACE_RESUME(error);
return error;
}
@@ -486,6 +487,9 @@ static int device_resume_early(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
+ if (dev->power.syscore)
+ goto Out;
+
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -507,6 +511,7 @@ static int device_resume_early(struct device *dev, pm_message_t state)
error = dpm_run_callback(callback, dev, state, info);
+ Out:
TRACE_RESUME(error);
return error;
}
@@ -565,11 +570,13 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
char *info = NULL;
int error = 0;
- bool put = false;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
+ if (dev->power.syscore)
+ goto Complete;
+
dpm_wait(dev->parent, async);
device_lock(dev);
@@ -583,7 +590,6 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
goto Unlock;
pm_runtime_enable(dev);
- put = true;
if (dev->pm_domain) {
info = "power domain ";
@@ -632,13 +638,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
Unlock:
device_unlock(dev);
+
+ Complete:
complete_all(&dev->power.completion);
TRACE_RESUME(error);
- if (put)
- pm_runtime_put_sync(dev);
-
return error;
}
@@ -722,6 +727,9 @@ static void device_complete(struct device *dev, pm_message_t state)
void (*callback)(struct device *) = NULL;
char *info = NULL;
+ if (dev->power.syscore)
+ return;
+
device_lock(dev);
if (dev->pm_domain) {
@@ -749,6 +757,8 @@ static void device_complete(struct device *dev, pm_message_t state)
}
device_unlock(dev);
+
+ pm_runtime_put_sync(dev);
}
/**
@@ -834,6 +844,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
pm_callback_t callback = NULL;
char *info = NULL;
+ if (dev->power.syscore)
+ return 0;
+
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -917,6 +930,9 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
pm_callback_t callback = NULL;
char *info = NULL;
+ if (dev->power.syscore)
+ return 0;
+
if (dev->pm_domain) {
info = "late power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -996,7 +1012,7 @@ int dpm_suspend_end(pm_message_t state)
error = dpm_suspend_noirq(state);
if (error) {
- dpm_resume_early(state);
+ dpm_resume_early(resume_event(state));
return error;
}
@@ -1043,16 +1059,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (async_error)
goto Complete;
- pm_runtime_get_noresume(dev);
+ /*
+ * If a device configured to wake up the system from sleep states
+ * has been suspended at run time and there's a resume request pending
+ * for it, this is equivalent to the device signaling wakeup, so the
+ * system suspend operation should be aborted.
+ */
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) {
- pm_runtime_put_sync(dev);
async_error = -EBUSY;
goto Complete;
}
+ if (dev->power.syscore)
+ goto Complete;
+
device_lock(dev);
if (dev->pm_domain) {
@@ -1111,12 +1134,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
Complete:
complete_all(&dev->power.completion);
- if (error) {
- pm_runtime_put_sync(dev);
+ if (error)
async_error = error;
- } else if (dev->power.is_suspended) {
+ else if (dev->power.is_suspended)
__pm_runtime_disable(dev, false);
- }
return error;
}
@@ -1209,6 +1230,17 @@ static int device_prepare(struct device *dev, pm_message_t state)
char *info = NULL;
int error = 0;
+ if (dev->power.syscore)
+ return 0;
+
+ /*
+ * If a device's parent goes into runtime suspend at the wrong time,
+ * it won't be possible to resume the device. To prevent this we
+ * block runtime suspend here, during the prepare phase, and allow
+ * it again during the complete phase.
+ */
+ pm_runtime_get_noresume(dev);
+
device_lock(dev);
dev->power.wakeup_path = device_may_wakeup(dev);
@@ -1324,3 +1356,25 @@ int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
return async_error;
}
EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
+
+/**
+ * dpm_for_each_dev - device iterator.
+ * @data: data for the callback.
+ * @fn: function to be called for each device.
+ *
+ * Iterate over devices in dpm_list, and call @fn for each device,
+ * passing it @data.
+ */
+void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
+{
+ struct device *dev;
+
+ if (!fn)
+ return;
+
+ device_pm_lock();
+ list_for_each_entry(dev, &dpm_list, power.entry)
+ fn(dev, data);
+ device_pm_unlock();
+}
+EXPORT_SYMBOL_GPL(dpm_for_each_dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ac993eafec8..d9468642fc4 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -22,6 +22,7 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/opp.h>
+#include <linux/of.h>
/*
* Internal data structure organization with the OPP layer library is as
@@ -674,3 +675,49 @@ struct srcu_notifier_head *opp_get_notifier(struct device *dev)
return &dev_opp->head;
}
+
+#ifdef CONFIG_OF
+/**
+ * of_init_opp_table() - Initialize opp table from device tree
+ * @dev: device pointer used to lookup device OPPs.
+ *
+ * Register the initial OPP table with the OPP library for given device.
+ */
+int of_init_opp_table(struct device *dev)
+{
+ const struct property *prop;
+ const __be32 *val;
+ int nr;
+
+ prop = of_find_property(dev->of_node, "operating-points", NULL);
+ if (!prop)
+ return -ENODEV;
+ if (!prop->value)
+ return -ENODATA;
+
+ /*
+ * Each OPP is a set of tuples consisting of frequency and
+ * voltage like <freq-kHz vol-uV>.
+ */
+ nr = prop->length / sizeof(u32);
+ if (nr % 2) {
+ dev_err(dev, "%s: Invalid OPP list\n", __func__);
+ return -EINVAL;
+ }
+
+ val = prop->value;
+ while (nr) {
+ unsigned long freq = be32_to_cpup(val++) * 1000;
+ unsigned long volt = be32_to_cpup(val++);
+
+ if (opp_add(dev, freq, volt)) {
+ dev_warn(dev, "%s: Failed to add OPP %ld\n",
+ __func__, freq);
+ continue;
+ }
+ nr -= 2;
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index eeb4bff9505..0dbfdf4419a 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -1,12 +1,32 @@
#include <linux/pm_qos.h>
+static inline void device_pm_init_common(struct device *dev)
+{
+ if (!dev->power.early_init) {
+ spin_lock_init(&dev->power.lock);
+ dev->power.power_state = PMSG_INVALID;
+ dev->power.early_init = true;
+ }
+}
+
#ifdef CONFIG_PM_RUNTIME
+static inline void pm_runtime_early_init(struct device *dev)
+{
+ dev->power.disable_depth = 1;
+ device_pm_init_common(dev);
+}
+
extern void pm_runtime_init(struct device *dev);
extern void pm_runtime_remove(struct device *dev);
#else /* !CONFIG_PM_RUNTIME */
+static inline void pm_runtime_early_init(struct device *dev)
+{
+ device_pm_init_common(dev);
+}
+
static inline void pm_runtime_init(struct device *dev) {}
static inline void pm_runtime_remove(struct device *dev) {}
@@ -25,7 +45,7 @@ static inline struct device *to_device(struct list_head *entry)
return container_of(entry, struct device, power.entry);
}
-extern void device_pm_init(struct device *dev);
+extern void device_pm_sleep_init(struct device *dev);
extern void device_pm_add(struct device *);
extern void device_pm_remove(struct device *);
extern void device_pm_move_before(struct device *, struct device *);
@@ -34,12 +54,7 @@ extern void device_pm_move_last(struct device *);
#else /* !CONFIG_PM_SLEEP */
-static inline void device_pm_init(struct device *dev)
-{
- spin_lock_init(&dev->power.lock);
- dev->power.power_state = PMSG_INVALID;
- pm_runtime_init(dev);
-}
+static inline void device_pm_sleep_init(struct device *dev) {}
static inline void device_pm_add(struct device *dev)
{
@@ -60,6 +75,13 @@ static inline void device_pm_move_last(struct device *dev) {}
#endif /* !CONFIG_PM_SLEEP */
+static inline void device_pm_init(struct device *dev)
+{
+ device_pm_init_common(dev);
+ device_pm_sleep_init(dev);
+ pm_runtime_init(dev);
+}
+
#ifdef CONFIG_PM
/*
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 7d9c1cb1c39..3148b10dc2e 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -509,6 +509,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
repeat:
if (dev->power.runtime_error)
retval = -EINVAL;
+ else if (dev->power.disable_depth == 1 && dev->power.is_suspended
+ && dev->power.runtime_status == RPM_ACTIVE)
+ retval = 1;
else if (dev->power.disable_depth > 0)
retval = -EACCES;
if (retval)
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index cbb463b3a75..e6ee5e80e54 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -127,6 +127,8 @@ EXPORT_SYMBOL_GPL(wakeup_source_destroy);
*/
void wakeup_source_add(struct wakeup_source *ws)
{
+ unsigned long flags;
+
if (WARN_ON(!ws))
return;
@@ -135,9 +137,9 @@ void wakeup_source_add(struct wakeup_source *ws)
ws->active = false;
ws->last_time = ktime_get();
- spin_lock_irq(&events_lock);
+ spin_lock_irqsave(&events_lock, flags);
list_add_rcu(&ws->entry, &wakeup_sources);
- spin_unlock_irq(&events_lock);
+ spin_unlock_irqrestore(&events_lock, flags);
}
EXPORT_SYMBOL_GPL(wakeup_source_add);
@@ -147,12 +149,14 @@ EXPORT_SYMBOL_GPL(wakeup_source_add);
*/
void wakeup_source_remove(struct wakeup_source *ws)
{
+ unsigned long flags;
+
if (WARN_ON(!ws))
return;
- spin_lock_irq(&events_lock);
+ spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
- spin_unlock_irq(&events_lock);
+ spin_unlock_irqrestore(&events_lock, flags);
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -649,6 +653,31 @@ void pm_wakeup_event(struct device *dev, unsigned int msec)
}
EXPORT_SYMBOL_GPL(pm_wakeup_event);
+static void print_active_wakeup_sources(void)
+{
+ struct wakeup_source *ws;
+ int active = 0;
+ struct wakeup_source *last_activity_ws = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ if (ws->active) {
+ pr_info("active wakeup source: %s\n", ws->name);
+ active = 1;
+ } else if (!active &&
+ (!last_activity_ws ||
+ ktime_to_ns(ws->last_time) >
+ ktime_to_ns(last_activity_ws->last_time))) {
+ last_activity_ws = ws;
+ }
+ }
+
+ if (!active && last_activity_ws)
+ pr_info("last active wakeup source: %s\n",
+ last_activity_ws->name);
+ rcu_read_unlock();
+}
+
/**
* pm_wakeup_pending - Check if power transition in progress should be aborted.
*
@@ -671,6 +700,10 @@ bool pm_wakeup_pending(void)
events_check_enabled = !ret;
}
spin_unlock_irqrestore(&events_lock, flags);
+
+ if (ret)
+ print_active_wakeup_sources();
+
return ret;
}
@@ -723,15 +756,16 @@ bool pm_get_wakeup_count(unsigned int *count, bool block)
bool pm_save_wakeup_count(unsigned int count)
{
unsigned int cnt, inpr;
+ unsigned long flags;
events_check_enabled = false;
- spin_lock_irq(&events_lock);
+ spin_lock_irqsave(&events_lock, flags);
split_counters(&cnt, &inpr);
if (cnt == count && inpr == 0) {
saved_count = count;
events_check_enabled = true;
}
- spin_unlock_irq(&events_lock);
+ spin_unlock_irqrestore(&events_lock, flags);
return events_check_enabled;
}
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 6be390bd8bd..f0d30543fcc 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,7 +3,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_MMIO || REGMAP_IRQ)
select LZO_COMPRESS
select LZO_DECOMPRESS
select IRQ_DOMAIN if REGMAP_IRQ