summaryrefslogtreecommitdiffstats
path: root/drivers/base/power/wakeup.c
blob: 25599077c39ca2ec4f9d2cd170f7f7c37a8a47d8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
/*
 * drivers/base/power/wakeup.c - System wakeup events framework
 *
 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
 *
 * This file is released under the GPLv2.
 */

#include <linux/device.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/suspend.h>
#include <linux/pm.h>

/*
 * If set, the suspend/hibernate code will abort transitions to a sleep state
 * if wakeup events are registered during or immediately before the transition.
 */
bool events_check_enabled;

/* The counter of registered wakeup events. */
static unsigned long event_count;
/* A preserved old value of event_count. */
static unsigned long saved_event_count;
/* The counter of wakeup events being processed. */
static unsigned long events_in_progress;

static DEFINE_SPINLOCK(events_lock);

/*
 * The functions below use the observation that each wakeup event starts a
 * period in which the system should not be suspended.  The moment this period
 * will end depends on how the wakeup event is going to be processed after being
 * detected and all of the possible cases can be divided into two distinct
 * groups.
 *
 * First, a wakeup event may be detected by the same functional unit that will
 * carry out the entire processing of it and possibly will pass it to user space
 * for further processing.  In that case the functional unit that has detected
 * the event may later "close" the "no suspend" period associated with it
 * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
 * pm_relax(), balanced with each other, is supposed to be used in such
 * situations.
 *
 * Second, a wakeup event may be detected by one functional unit and processed
 * by another one.  In that case the unit that has detected it cannot really
 * "close" the "no suspend" period associated with it, unless it knows in
 * advance what's going to happen to the event during processing.  This
 * knowledge, however, may not be available to it, so it can simply specify time
 * to wait before the system can be suspended and pass it as the second
 * argument of pm_wakeup_event().
 */

/**
 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
 * @dev: Device the wakeup event is related to.
 *
 * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the
 * counter of wakeup events being processed.  If @dev is not NULL, the counter
 * of wakeup events related to @dev is incremented too.
 *
 * Call this function after detecting of a wakeup event if pm_relax() is going
 * to be called directly after processing the event (and possibly passing it to
 * user space for further processing).
 *
 * It is safe to call this function from interrupt context.
 */
void pm_stay_awake(struct device *dev)
{
	unsigned long flags;

	spin_lock_irqsave(&events_lock, flags);
	if (dev)
		dev->power.wakeup_count++;

	events_in_progress++;
	spin_unlock_irqrestore(&events_lock, flags);
}

/**
 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
 *
 * Notify the PM core that a wakeup event has been processed by decrementing
 * the counter of wakeup events being processed and incrementing the counter
 * of registered wakeup events.
 *
 * Call this function for wakeup events whose processing started with calling
 * pm_stay_awake().
 *
 * It is safe to call it from interrupt context.
 */
void pm_relax(void)
{
	unsigned long flags;

	spin_lock_irqsave(&events_lock, flags);
	if (events_in_progress) {
		events_in_progress--;
		event_count++;
	}
	spin_unlock_irqrestore(&events_lock, flags);
}

/**
 * pm_wakeup_work_fn - Deferred closing of a wakeup event.
 *
 * Execute pm_relax() for a wakeup event detected in the past and free the
 * work item object used for queuing up the work.
 */
static void pm_wakeup_work_fn(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);

	pm_relax();
	kfree(dwork);
}

/**
 * pm_wakeup_event - Notify the PM core of a wakeup event.
 * @dev: Device the wakeup event is related to.
 * @msec: Anticipated event processing time (in milliseconds).
 *
 * Notify the PM core of a wakeup event (signaled by @dev) that will take
 * approximately @msec milliseconds to be processed by the kernel.  Increment
 * the counter of wakeup events being processed and queue up a work item
 * that will execute pm_relax() for the event after @msec milliseconds.  If @dev
 * is not NULL, the counter of wakeup events related to @dev is incremented too.
 *
 * It is safe to call this function from interrupt context.
 */
void pm_wakeup_event(struct device *dev, unsigned int msec)
{
	unsigned long flags;
	struct delayed_work *dwork;

	dwork = msec ? kzalloc(sizeof(*dwork), GFP_ATOMIC) : NULL;

	spin_lock_irqsave(&events_lock, flags);
	if (dev)
		dev->power.wakeup_count++;

	if (dwork) {
		INIT_DELAYED_WORK(dwork, pm_wakeup_work_fn);
		schedule_delayed_work(dwork, msecs_to_jiffies(msec));

		events_in_progress++;
	} else {
		event_count++;
	}
	spin_unlock_irqrestore(&events_lock, flags);
}

/**
 * pm_check_wakeup_events - Check for new wakeup events.
 *
 * Compare the current number of registered wakeup events with its preserved
 * value from the past to check if new wakeup events have been registered since
 * the old value was stored.  Check if the current number of wakeup events being
 * processed is zero.
 */
bool pm_check_wakeup_events(void)
{
	unsigned long flags;
	bool ret = true;

	spin_lock_irqsave(&events_lock, flags);
	if (events_check_enabled) {
		ret = (event_count == saved_event_count) && !events_in_progress;
		events_check_enabled = ret;
	}
	spin_unlock_irqrestore(&events_lock, flags);
	return ret;
}

/**
 * pm_get_wakeup_count - Read the number of registered wakeup events.
 * @count: Address to store the value at.
 *
 * Store the number of registered wakeup events at the address in @count.  Block
 * if the current number of wakeup events being processed is nonzero.
 *
 * Return false if the wait for the number of wakeup events being processed to
 * drop down to zero has been interrupted by a signal (and the current number
 * of wakeup events being processed is still nonzero).  Otherwise return true.
 */
bool pm_get_wakeup_count(unsigned long *count)
{
	bool ret;

	spin_lock_irq(&events_lock);
	if (capable(CAP_SYS_ADMIN))
		events_check_enabled = false;

	while (events_in_progress && !signal_pending(current)) {
		spin_unlock_irq(&events_lock);

		schedule_timeout_interruptible(msecs_to_jiffies(100));

		spin_lock_irq(&events_lock);
	}
	*count = event_count;
	ret = !events_in_progress;
	spin_unlock_irq(&events_lock);
	return ret;
}

/**
 * pm_save_wakeup_count - Save the current number of registered wakeup events.
 * @count: Value to compare with the current number of registered wakeup events.
 *
 * If @count is equal to the current number of registered wakeup events and the
 * current number of wakeup events being processed is zero, store @count as the
 * old number of registered wakeup events to be used by pm_check_wakeup_events()
 * and return true.  Otherwise return false.
 */
bool pm_save_wakeup_count(unsigned long count)
{
	bool ret = false;

	spin_lock_irq(&events_lock);
	if (count == event_count && !events_in_progress) {
		saved_event_count = count;
		events_check_enabled = true;
		ret = true;
	}
	spin_unlock_irq(&events_lock);
	return ret;
}