summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 10:31:01 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2010-02-28 10:31:01 -0800
commitf66ffdedbf0fc059a92219bb08c1dbcac88f074b (patch)
tree9db4ad51764455123130e82fb7acf4f0a0be58ce /include
parent2531216f236cb2a1f39ffa12a4a9339541e52191 (diff)
parentdd5feea14a7de4edbd9f36db1a2db785de91b88d (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits) sched: Fix SCHED_MC regression caused by change in sched cpu_power sched: Don't use possibly stale sched_class kthread, sched: Remove reference to kthread_create_on_cpu sched: cpuacct: Use bigger percpu counter batch values for stats counters percpu_counter: Make __percpu_counter_add an inline function on UP sched: Remove member rt_se from struct rt_rq sched: Change usage of rt_rq->rt_se to rt_rq->tg->rt_se[cpu] sched: Remove unused update_shares_locked() sched: Use for_each_bit sched: Queue a deboosted task to the head of the RT prio queue sched: Implement head queueing for sched_rt sched: Extend enqueue_task to allow head queueing sched: Remove USER_SCHED sched: Fix the place where group powers are updated sched: Assume *balance is valid sched: Remove load_balance_newidle() sched: Unify load_balance{,_newidle}() sched: Add a lock break for PREEMPT=y sched: Remove from fwd decls sched: Remove rq_iterator from move_one_task ... Fix up trivial conflicts in kernel/sched.c
Diffstat (limited to 'include')
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/percpu_counter.h9
-rw-r--r--include/linux/sched.h25
3 files changed, 12 insertions, 27 deletions
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 328bca609b9..1221d2331a6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -124,7 +124,7 @@ extern int _cond_resched(void);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
- void __might_sleep(char *file, int line, int preempt_offset);
+ void __might_sleep(const char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
*
@@ -138,7 +138,8 @@ extern int _cond_resched(void);
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
#else
- static inline void __might_sleep(char *file, int line, int preempt_offset) { }
+ static inline void __might_sleep(const char *file, int line,
+ int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
#endif
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index a7684a51399..794662b2be5 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -98,9 +98,6 @@ static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
fbc->count = amount;
}
-#define __percpu_counter_add(fbc, amount, batch) \
- percpu_counter_add(fbc, amount)
-
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
@@ -109,6 +106,12 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
preempt_enable();
}
+static inline void
+__percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
+{
+ percpu_counter_add(fbc, amount);
+}
+
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1f5fa53b46b..0eef87b58ea 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -740,14 +740,6 @@ struct user_struct {
uid_t uid;
struct user_namespace *user_ns;
-#ifdef CONFIG_USER_SCHED
- struct task_group *tg;
-#ifdef CONFIG_SYSFS
- struct kobject kobj;
- struct delayed_work work;
-#endif
-#endif
-
#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
#endif
@@ -1087,7 +1079,8 @@ struct sched_domain;
struct sched_class {
const struct sched_class *next;
- void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup,
+ bool head);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
void (*yield_task) (struct rq *rq);
@@ -1099,14 +1092,6 @@ struct sched_class {
#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
- unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
- struct rq *busiest, unsigned long max_load_move,
- struct sched_domain *sd, enum cpu_idle_type idle,
- int *all_pinned, int *this_best_prio);
-
- int (*move_one_task) (struct rq *this_rq, int this_cpu,
- struct rq *busiest, struct sched_domain *sd,
- enum cpu_idle_type idle);
void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct rq *this_rq, struct task_struct *task);
@@ -2520,13 +2505,9 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
extern void normalize_rt_tasks(void);
-#ifdef CONFIG_GROUP_SCHED
+#ifdef CONFIG_CGROUP_SCHED
extern struct task_group init_task_group;
-#ifdef CONFIG_USER_SCHED
-extern struct task_group root_task_group;
-extern void set_tg_uid(struct user_struct *user);
-#endif
extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);