summaryrefslogtreecommitdiffstats
path: root/drivers/base/power/runtime.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power/runtime.c')
-rw-r--r--drivers/base/power/runtime.c151
1 files changed, 101 insertions, 50 deletions
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index acb3f83b807..8c78443bca8 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -8,7 +8,9 @@
*/
#include <linux/sched.h>
+#include <linux/export.h>
#include <linux/pm_runtime.h>
+#include <trace/events/rpm.h>
#include "power.h"
static int rpm_resume(struct device *dev, int rpmflags);
@@ -28,13 +30,10 @@ static int rpm_suspend(struct device *dev, int rpmflags);
void update_pm_runtime_accounting(struct device *dev)
{
unsigned long now = jiffies;
- int delta;
+ unsigned long delta;
delta = now - dev->power.accounting_timestamp;
- if (delta < 0)
- delta = 0;
-
dev->power.accounting_timestamp = now;
if (dev->power.disable_depth > 0)
@@ -155,6 +154,31 @@ static int rpm_check_suspend_allowed(struct device *dev)
}
/**
+ * __rpm_callback - Run a given runtime PM callback for a given device.
+ * @cb: Runtime PM callback to run.
+ * @dev: Device to run the callback for.
+ */
+static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
+ __releases(&dev->power.lock) __acquires(&dev->power.lock)
+{
+ int retval;
+
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = cb(dev);
+
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
+
+ return retval;
+}
+
+/**
* rpm_idle - Notify device bus type if the device can be suspended.
* @dev: Device to notify the bus type about.
* @rpmflags: Flag bits.
@@ -171,6 +195,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
+ trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
@@ -225,24 +250,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
else
callback = NULL;
- if (callback) {
- if (dev->power.irq_safe)
- spin_unlock(&dev->power.lock);
- else
- spin_unlock_irq(&dev->power.lock);
-
- callback(dev);
-
- if (dev->power.irq_safe)
- spin_lock(&dev->power.lock);
- else
- spin_lock_irq(&dev->power.lock);
- }
+ if (callback)
+ __rpm_callback(callback, dev);
dev->power.idle_notification = false;
wake_up_all(&dev->power.wait_queue);
out:
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -252,22 +267,14 @@ static int rpm_idle(struct device *dev, int rpmflags)
* @dev: Device to run the callback for.
*/
static int rpm_callback(int (*cb)(struct device *), struct device *dev)
- __releases(&dev->power.lock) __acquires(&dev->power.lock)
{
int retval;
if (!cb)
return -ENOSYS;
- if (dev->power.irq_safe) {
- retval = cb(dev);
- } else {
- spin_unlock_irq(&dev->power.lock);
+ retval = __rpm_callback(cb, dev);
- retval = cb(dev);
-
- spin_lock_irq(&dev->power.lock);
- }
dev->power.runtime_error = retval;
return retval != -EACCES ? retval : -EIO;
}
@@ -277,14 +284,19 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
- * Check if the device's runtime PM status allows it to be suspended. If
- * another suspend has been started earlier, either return immediately or wait
- * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
- * pending idle notification. If the RPM_ASYNC flag is set then queue a
- * suspend request; otherwise run the ->runtime_suspend() callback directly.
- * If a deferred resume was requested while the callback was running then carry
- * it out; otherwise send an idle notification for the device (if the suspend
- * failed) or for its parent (if the suspend succeeded).
+ * Check if the device's runtime PM status allows it to be suspended.
+ * Cancel a pending idle notification, autosuspend or suspend. If
+ * another suspend has been started earlier, either return immediately
+ * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
+ * flags. If the RPM_ASYNC flag is set then queue a suspend request;
+ * otherwise run the ->runtime_suspend() callback directly. When
+ * ->runtime_suspend succeeded, if a deferred resume was requested while
+ * the callback was running then carry it out, otherwise send an idle
+ * notification for its parent (if the suspend succeeded and both
+ * ignore_children of parent->power and irq_safe of dev->power are not set).
+ * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
+ * flag is set and the next autosuspend-delay expiration time is in the
+ * future, schedule another autosuspend attempt.
*
* This function must be called under dev->power.lock with interrupts disabled.
*/
@@ -295,7 +307,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
+ trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@@ -347,6 +359,15 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+
+ cpu_relax();
+
+ spin_lock(&dev->power.lock);
+ goto repeat;
+ }
+
/* Wait for the other suspend running in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -396,19 +417,31 @@ static int rpm_suspend(struct device *dev, int rpmflags)
if (retval) {
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
- if (retval == -EAGAIN || retval == -EBUSY)
+ if (retval == -EAGAIN || retval == -EBUSY) {
dev->power.runtime_error = 0;
- else
+
+ /*
+ * If the callback routine failed an autosuspend, and
+ * if the last_busy time has been updated so that there
+ * is a new autosuspend expiration time, automatically
+ * reschedule another autosuspend.
+ */
+ if ((rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+ } else {
pm_runtime_cancel_pending(dev);
- } else {
+ }
+ wake_up_all(&dev->power.wait_queue);
+ goto out;
+ }
no_callback:
- __update_runtime_status(dev, RPM_SUSPENDED);
- pm_runtime_deactivate_timer(dev);
+ __update_runtime_status(dev, RPM_SUSPENDED);
+ pm_runtime_deactivate_timer(dev);
- if (dev->parent) {
- parent = dev->parent;
- atomic_add_unless(&parent->power.child_count, -1, 0);
- }
+ if (dev->parent) {
+ parent = dev->parent;
+ atomic_add_unless(&parent->power.child_count, -1, 0);
}
wake_up_all(&dev->power.wait_queue);
@@ -430,7 +463,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
- dev_dbg(dev, "%s returns %d\n", __func__, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -459,7 +492,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
- dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags);
+ trace_rpm_resume(dev, rpmflags);
repeat:
if (dev->power.runtime_error)
@@ -496,6 +529,15 @@ static int rpm_resume(struct device *dev, int rpmflags)
goto out;
}
+ if (dev->power.irq_safe) {
+ spin_unlock(&dev->power.lock);
+
+ cpu_relax();
+
+ spin_lock(&dev->power.lock);
+ goto repeat;
+ }
+
/* Wait for the operation carried out in parallel with us. */
for (;;) {
prepare_to_wait(&dev->power.wait_queue, &wait,
@@ -615,7 +657,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
- dev_dbg(dev, "%s returns %d\n", __func__, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -732,13 +774,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend);
* return immediately if it is larger than zero. Then carry out an idle
* notification, either synchronous or asynchronous.
*
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_idle(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
@@ -761,13 +806,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle);
* return immediately if it is larger than zero. Then carry out a suspend,
* either synchronous or asynchronous.
*
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_suspend(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
@@ -789,13 +837,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
* If the RPM_GET_PUT flag is set, increment the device's usage count. Then
* carry out a resume, either synchronous or asynchronous.
*
- * This routine may be called in atomic context if the RPM_ASYNC flag is set.
+ * This routine may be called in atomic context if the RPM_ASYNC flag is set,
+ * or if pm_runtime_irq_safe() has been called.
*/
int __pm_runtime_resume(struct device *dev, int rpmflags)
{
unsigned long flags;
int retval;
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);