summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-15 14:23:18 +0200
committerIngo Molnar <mingo@elte.hu>2009-09-15 16:01:01 +0200
commitb78bb868c54bebbf8d8786a3f8320700d6d2b864 (patch)
treede0c66ccbefff6b34959c2331b397804725fc3ce /kernel/sched.c
parent0cc6d77e55eca9557bbe41bf2db94b31aa8fcb2a (diff)
sched: Fix double_rq_lock() compile warning
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e27a53685ed..17e4391ec2d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -119,8 +119,6 @@
*/
#define RUNTIME_INF ((u64)~0ULL)
-static void double_rq_lock(struct rq *rq1, struct rq *rq2);
-
static inline int rt_policy(int policy)
{
if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
@@ -1695,6 +1693,8 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
#ifdef CONFIG_PREEMPT
+static void double_rq_lock(struct rq *rq1, struct rq *rq2);
+
/*
* fair double_lock_balance: Safely acquires both rq->locks in a fair
* way at the expense of forcing extra atomic operations in all