summaryrefslogtreecommitdiffstats
path: root/drivers/base/power/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power/main.c')
-rw-r--r--drivers/base/power/main.c280
1 files changed, 230 insertions, 50 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 1b41fca3d65..86d5e4fb5b9 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -29,6 +29,7 @@
#include <linux/async.h>
#include <linux/suspend.h>
#include <trace/events/power.h>
+#include <linux/cpufreq.h>
#include <linux/cpuidle.h>
#include <linux/timer.h>
@@ -91,6 +92,8 @@ void device_pm_sleep_init(struct device *dev)
{
dev->power.is_prepared = false;
dev->power.is_suspended = false;
+ dev->power.is_noirq_suspended = false;
+ dev->power.is_late_suspended = false;
init_completion(&dev->power.completion);
complete_all(&dev->power.completion);
dev->power.wakeup = NULL;
@@ -467,7 +470,7 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_resume_noirq(struct device *dev, pm_message_t state)
+static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -479,6 +482,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
if (dev->power.syscore)
goto Out;
+ if (!dev->power.is_noirq_suspended)
+ goto Out;
+
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "noirq power domain ";
callback = pm_noirq_op(&dev->pm_domain->ops, state);
@@ -499,12 +507,32 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
}
error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_noirq_suspended = false;
Out:
+ complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
+static void async_resume_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_noirq(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
@@ -514,29 +542,48 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
*/
static void dpm_resume_noirq(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_noirq_list)) {
- struct device *dev = to_device(dpm_noirq_list.next);
- int error;
+ pm_transition = state;
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_noirq, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_noirq_list)) {
+ dev = to_device(dpm_noirq_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_late_early_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_noirq(dev, state);
- if (error) {
- suspend_stats.failed_resume_noirq++;
- dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " noirq", error);
+ if (!is_async(dev)) {
+ int error;
+
+ error = device_resume_noirq(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_noirq++;
+ dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " noirq", error);
+ }
}
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
cpuidle_resume();
@@ -549,7 +596,7 @@ static void dpm_resume_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_resume_early(struct device *dev, pm_message_t state)
+static int device_resume_early(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
@@ -561,6 +608,11 @@ static int device_resume_early(struct device *dev, pm_message_t state)
if (dev->power.syscore)
goto Out;
+ if (!dev->power.is_late_suspended)
+ goto Out;
+
+ dpm_wait(dev->parent, async);
+
if (dev->pm_domain) {
info = "early power domain ";
callback = pm_late_early_op(&dev->pm_domain->ops, state);
@@ -581,43 +633,75 @@ static int device_resume_early(struct device *dev, pm_message_t state)
}
error = dpm_run_callback(callback, dev, state, info);
+ dev->power.is_late_suspended = false;
Out:
TRACE_RESUME(error);
pm_runtime_enable(dev);
+ complete_all(&dev->power.completion);
return error;
}
+static void async_resume_early(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = device_resume_early(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+
+ put_device(dev);
+}
+
/**
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
static void dpm_resume_early(pm_message_t state)
{
+ struct device *dev;
ktime_t starttime = ktime_get();
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_late_early_list)) {
- struct device *dev = to_device(dpm_late_early_list.next);
- int error;
+ pm_transition = state;
+ /*
+ * Advanced the async threads upfront,
+ * in case the starting of async threads is
+ * delayed by non-async resuming devices.
+ */
+ list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
+ reinit_completion(&dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume_early, dev);
+ }
+ }
+
+ while (!list_empty(&dpm_late_early_list)) {
+ dev = to_device(dpm_late_early_list.next);
get_device(dev);
list_move_tail(&dev->power.entry, &dpm_suspended_list);
mutex_unlock(&dpm_list_mtx);
- error = device_resume_early(dev, state);
- if (error) {
- suspend_stats.failed_resume_early++;
- dpm_save_failed_step(SUSPEND_RESUME_EARLY);
- dpm_save_failed_dev(dev_name(dev));
- pm_dev_err(dev, state, " early", error);
- }
+ if (!is_async(dev)) {
+ int error;
+ error = device_resume_early(dev, state, false);
+ if (error) {
+ suspend_stats.failed_resume_early++;
+ dpm_save_failed_step(SUSPEND_RESUME_EARLY);
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, state, " early", error);
+ }
+ }
mutex_lock(&dpm_list_mtx);
put_device(dev);
}
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, "early");
}
@@ -732,12 +816,6 @@ static void async_resume(void *data, async_cookie_t cookie)
put_device(dev);
}
-static bool is_async(struct device *dev)
-{
- return dev->power.async_suspend && pm_async_enabled
- && !pm_trace_is_enabled();
-}
-
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -789,6 +867,8 @@ void dpm_resume(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
dpm_show_time(starttime, state, NULL);
+
+ cpufreq_resume();
}
/**
@@ -913,13 +993,24 @@ static pm_message_t resume_event(pm_message_t sleep_state)
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int device_suspend_noirq(struct device *dev, pm_message_t state)
+static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
+ int error = 0;
+
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
if (dev->power.syscore)
- return 0;
+ goto Complete;
+
+ dpm_wait_for_children(dev, async);
if (dev->pm_domain) {
info = "noirq power domain ";
@@ -940,7 +1031,41 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
callback = pm_noirq_op(dev->driver->pm, state);
}
- return dpm_run_callback(callback, dev, state, info);
+ error = dpm_run_callback(callback, dev, state, info);
+ if (!error)
+ dev->power.is_noirq_suspended = true;
+ else
+ async_error = error;
+
+Complete:
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_noirq(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_noirq(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend_noirq(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend_noirq, dev);
+ return 0;
+ }
+ return __device_suspend_noirq(dev, pm_transition, false);
}
/**
@@ -958,19 +1083,20 @@ static int dpm_suspend_noirq(pm_message_t state)
cpuidle_pause();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
while (!list_empty(&dpm_late_early_list)) {
struct device *dev = to_device(dpm_late_early_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_noirq(dev, state);
+ error = device_suspend_noirq(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " noirq", error);
- suspend_stats.failed_suspend_noirq++;
- dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
@@ -979,16 +1105,21 @@ static int dpm_suspend_noirq(pm_message_t state)
list_move(&dev->power.entry, &dpm_noirq_list);
put_device(dev);
- if (pm_wakeup_pending()) {
- error = -EBUSY;
+ if (async_error)
break;
- }
}
mutex_unlock(&dpm_list_mtx);
- if (error)
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
+
+ if (error) {
+ suspend_stats.failed_suspend_noirq++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
dpm_resume_noirq(resume_event(state));
- else
+ } else {
dpm_show_time(starttime, state, "noirq");
+ }
return error;
}
@@ -999,15 +1130,26 @@ static int dpm_suspend_noirq(pm_message_t state)
*
* Runtime PM is disabled for @dev while this function is being executed.
*/
-static int device_suspend_late(struct device *dev, pm_message_t state)
+static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
{
pm_callback_t callback = NULL;
char *info = NULL;
+ int error = 0;
__pm_runtime_disable(dev, false);
+ if (async_error)
+ goto Complete;
+
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
if (dev->power.syscore)
- return 0;
+ goto Complete;
+
+ dpm_wait_for_children(dev, async);
if (dev->pm_domain) {
info = "late power domain ";
@@ -1028,7 +1170,41 @@ static int device_suspend_late(struct device *dev, pm_message_t state)
callback = pm_late_early_op(dev->driver->pm, state);
}
- return dpm_run_callback(callback, dev, state, info);
+ error = dpm_run_callback(callback, dev, state, info);
+ if (!error)
+ dev->power.is_late_suspended = true;
+ else
+ async_error = error;
+
+Complete:
+ complete_all(&dev->power.completion);
+ return error;
+}
+
+static void async_suspend_late(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ error = __device_suspend_late(dev, pm_transition, true);
+ if (error) {
+ dpm_save_failed_dev(dev_name(dev));
+ pm_dev_err(dev, pm_transition, " async", error);
+ }
+ put_device(dev);
+}
+
+static int device_suspend_late(struct device *dev)
+{
+ reinit_completion(&dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend_late, dev);
+ return 0;
+ }
+
+ return __device_suspend_late(dev, pm_transition, false);
}
/**
@@ -1041,19 +1217,20 @@ static int dpm_suspend_late(pm_message_t state)
int error = 0;
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
+
while (!list_empty(&dpm_suspended_list)) {
struct device *dev = to_device(dpm_suspended_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend_late(dev, state);
+ error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
pm_dev_err(dev, state, " late", error);
- suspend_stats.failed_suspend_late++;
- dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
@@ -1062,17 +1239,18 @@ static int dpm_suspend_late(pm_message_t state)
list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
- if (pm_wakeup_pending()) {
- error = -EBUSY;
+ if (async_error)
break;
- }
}
mutex_unlock(&dpm_list_mtx);
- if (error)
+ async_synchronize_full();
+ if (error) {
+ suspend_stats.failed_suspend_late++;
+ dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
- else
+ } else {
dpm_show_time(starttime, state, "late");
-
+ }
return error;
}
@@ -1259,6 +1437,8 @@ int dpm_suspend(pm_message_t state)
might_sleep();
+ cpufreq_suspend();
+
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;