summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 14:11:14 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 14:11:14 -0700
commitff86303e3021587c49a14df1bc54fe2d393e2223 (patch)
tree7f1b26407aef36ba486428285604b8b7a7cbf99e
parent626ac545c12e5f9bffe93086d1d03d26c99987ea (diff)
parente436d80085133858bf2613a630365e8a0459fd58 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: [PATCH] sched: implement cpu_clock(cpu) high-speed time source [PATCH] sched: fix the all pinned logic in load_balance_newidle() [PATCH] sched: fix newly idle load balance in case of SMT [PATCH] sched: sched_cacheflush is now unused
-rw-r--r--arch/ia64/kernel/setup.c9
-rw-r--r--include/asm-alpha/system.h10
-rw-r--r--include/asm-arm/system.h10
-rw-r--r--include/asm-arm26/system.h10
-rw-r--r--include/asm-i386/system.h9
-rw-r--r--include/asm-ia64/system.h1
-rw-r--r--include/asm-m32r/system.h10
-rw-r--r--include/asm-mips/system.h10
-rw-r--r--include/asm-parisc/system.h11
-rw-r--r--include/asm-powerpc/system.h10
-rw-r--r--include/asm-ppc/system.h10
-rw-r--r--include/asm-s390/system.h10
-rw-r--r--include/asm-sh/system.h10
-rw-r--r--include/asm-sparc/system.h10
-rw-r--r--include/asm-sparc64/system.h10
-rw-r--r--include/asm-x86_64/system.h9
-rw-r--r--include/linux/sched.h7
-rw-r--r--kernel/sched.c31
18 files changed, 33 insertions, 154 deletions
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 4d9864cc92c..cf06fe79904 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -980,15 +980,6 @@ cpu_init (void)
pm_idle = default_idle;
}
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- */
-void sched_cacheflush(void)
-{
- ia64_sal_cache_flush(3);
-}
-
void __init
check_bugs (void)
{
diff --git a/include/asm-alpha/system.h b/include/asm-alpha/system.h
index cf1021a97b2..620c4d86cbf 100644
--- a/include/asm-alpha/system.h
+++ b/include/asm-alpha/system.h
@@ -139,16 +139,6 @@ extern void halt(void) __attribute__((noreturn));
struct task_struct;
extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
#define imb() \
__asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index 6f8e6a69dc5..94ea8c6dc1a 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -254,16 +254,6 @@ do { \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
/*
* On the StrongARM, "swp" is terminally broken since it bypasses the
diff --git a/include/asm-arm26/system.h b/include/asm-arm26/system.h
index 4703593b3bb..e09da5ff1f5 100644
--- a/include/asm-arm26/system.h
+++ b/include/asm-arm26/system.h
@@ -110,16 +110,6 @@ do { \
} while (0)
/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
-/*
* Save the current interrupt enable state & disable IRQs
*/
#define local_irq_save(x) \
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 94ed3686a5f..609756c6167 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -310,15 +310,6 @@ void enable_hlt(void);
extern int es7000_plat;
void cpu_idle_wait(void);
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible:
- */
-static inline void sched_cacheflush(void)
-{
- wbinvd();
-}
-
extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
diff --git a/include/asm-ia64/system.h b/include/asm-ia64/system.h
index 384fbf7f2a0..91bb8e00066 100644
--- a/include/asm-ia64/system.h
+++ b/include/asm-ia64/system.h
@@ -259,7 +259,6 @@ extern void ia64_load_extra (struct task_struct *task);
#define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
void cpu_idle_wait(void);
-void sched_cacheflush(void);
#define arch_align_stack(x) (x)
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h
index 8ee73d3f316..2365de5c295 100644
--- a/include/asm-m32r/system.h
+++ b/include/asm-m32r/system.h
@@ -54,16 +54,6 @@
); \
} while(0)
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
/* Interrupt Control */
#if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
#define local_irq_enable() \
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 46bdb3f566f..76339165bc2 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -71,16 +71,6 @@ do { \
write_c0_userlocal(task_thread_info(current)->tp_value);\
} while(0)
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
__u32 retval;
diff --git a/include/asm-parisc/system.h b/include/asm-parisc/system.h
index 21fbfc5afd0..ee80c920b46 100644
--- a/include/asm-parisc/system.h
+++ b/include/asm-parisc/system.h
@@ -48,17 +48,6 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
(last) = _switch_to(prev, next); \
} while(0)
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
-
/* interrupt control */
#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 32aa42b748b..41520b7a7b7 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -184,16 +184,6 @@ struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
extern unsigned int rtas_data;
extern int mem_init_done; /* set on boot once kmalloc can be called */
extern unsigned long memory_limit;
diff --git a/include/asm-ppc/system.h b/include/asm-ppc/system.h
index d84a3cf4d03..f1311a8f310 100644
--- a/include/asm-ppc/system.h
+++ b/include/asm-ppc/system.h
@@ -129,16 +129,6 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
diff --git a/include/asm-s390/system.h b/include/asm-s390/system.h
index bbe137c3ed6..64a3cd05cae 100644
--- a/include/asm-s390/system.h
+++ b/include/asm-s390/system.h
@@ -97,16 +97,6 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \
} while (0)
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_vtime(struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h
index 7c75045ae22..24504253720 100644
--- a/include/asm-sh/system.h
+++ b/include/asm-sh/system.h
@@ -64,16 +64,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
last = __last; \
} while (0)
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
#ifdef CONFIG_CPU_SH4A
#define __icbi() \
{ \
diff --git a/include/asm-sparc/system.h b/include/asm-sparc/system.h
index 8b4e23b3bb3..d1a2572e3f5 100644
--- a/include/asm-sparc/system.h
+++ b/include/asm-sparc/system.h
@@ -165,16 +165,6 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
} while(0)
/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
-/*
* Changing the IRQ level on the Sparc.
*/
extern void local_irq_restore(unsigned long);
diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h
index 8ba380ec6da..409067408ee 100644
--- a/include/asm-sparc64/system.h
+++ b/include/asm-sparc64/system.h
@@ -204,16 +204,6 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
} \
} while(0)
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- *
- * TODO: fill this in!
- */
-static inline void sched_cacheflush(void)
-{
-}
-
static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{
unsigned long tmp1, tmp2;
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index ead9f9a5623..e4f246d62c4 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -111,15 +111,6 @@ static inline void write_cr4(unsigned long val)
#define wbinvd() \
__asm__ __volatile__ ("wbinvd": : :"memory");
-/*
- * On SMP systems, when the scheduler does migration-cost autodetection,
- * it needs a way to flush as much of the CPU's caches as possible.
- */
-static inline void sched_cacheflush(void)
-{
- wbinvd();
-}
-
#endif /* __KERNEL__ */
#define nop() __asm__ __volatile__ ("nop")
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 94f624aef01..33b9b4841ee 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
#endif
extern unsigned long long sched_clock(void);
+
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+extern unsigned long long cpu_clock(int cpu);
+
extern unsigned long long
task_sched_runtime(struct task_struct *task);
diff --git a/kernel/sched.c b/kernel/sched.c
index 645256b228c..93cf241cfbe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq)
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+unsigned long long cpu_clock(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long long now;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ now = rq_clock(rq);
+ spin_unlock_irqrestore(&rq->lock, flags);
+
+ return now;
+}
+
#ifdef CONFIG_FAIR_GROUP_SCHED
/* Change a task's ->cfs_rq if it moves across CPUs */
static inline void set_task_cfs_rq(struct task_struct *p)
@@ -2235,7 +2252,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
rq = cpu_rq(i);
- if (*sd_idle && !idle_cpu(i))
+ if (*sd_idle && rq->nr_running)
*sd_idle = 0;
/* Bias balancing toward cpus of our domain */
@@ -2257,9 +2274,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
/*
* First idle cpu or the first cpu(busiest) in this sched group
* is eligible for doing load balancing at this and above
- * domains.
+ * domains. In the newly idle case, we will allow all the cpu's
+ * to do the newly idle load balance.
*/
- if (local_group && balance_cpu != this_cpu && balance) {
+ if (idle != CPU_NEWLY_IDLE && local_group &&
+ balance_cpu != this_cpu && balance) {
*balance = 0;
goto ret;
}
@@ -2677,6 +2696,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
unsigned long imbalance;
int nr_moved = 0;
int sd_idle = 0;
+ int all_pinned = 0;
cpumask_t cpus = CPU_MASK_ALL;
/*
@@ -2715,10 +2735,11 @@ redo:
double_lock_balance(this_rq, busiest);
nr_moved = move_tasks(this_rq, this_cpu, busiest,
minus_1_or_zero(busiest->nr_running),
- imbalance, sd, CPU_NEWLY_IDLE, NULL);
+ imbalance, sd, CPU_NEWLY_IDLE,
+ &all_pinned);
spin_unlock(&busiest->lock);
- if (!nr_moved) {
+ if (unlikely(all_pinned)) {
cpu_clear(cpu_of(busiest), cpus);
if (!cpus_empty(cpus))
goto redo;