diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-03-23 23:44:19 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-03-23 23:44:19 -0500 |
commit | 1ebbe2b20091d306453a5cf480a87e6cd28ae76f (patch) | |
tree | f5cd7a0fa69b8b1938cb5a0faed2e7b0628072a5 /kernel/sched.c | |
parent | ac58c9059da8886b5e8cde012a80266b18ca146e (diff) | |
parent | 674a396c6d2ba0341ebdd7c1c9950f32f018e2dd (diff) |
Merge branch 'linus'
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 19 |
1 files changed, 9 insertions, 10 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 4d46e90f59c..7ffaabd64f8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -237,6 +237,7 @@ struct runqueue { task_t *migration_thread; struct list_head migration_queue; + int cpu; #endif #ifdef CONFIG_SCHEDSTATS @@ -707,12 +708,6 @@ static int recalc_task_prio(task_t *p, unsigned long long now) DEF_TIMESLICE); } else { /* - * The lower the sleep avg a task has the more - * rapidly it will rise with sleep time. - */ - sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1; - - /* * Tasks waking from uninterruptible sleep are * limited in their sleep_avg rise as they * are likely to be waiting on I/O @@ -1660,6 +1655,9 @@ unsigned long nr_iowait(void) /* * double_rq_lock - safely lock two runqueues * + * We must take them in cpu order to match code in + * dependent_sleeper and wake_dependent_sleeper. + * * Note this does not disable interrupts like task_rq_lock, * you need to do so manually before calling. */ @@ -1671,7 +1669,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2) spin_lock(&rq1->lock); __acquire(rq2->lock); /* Fake it out ;) */ } else { - if (rq1 < rq2) { + if (rq1->cpu < rq2->cpu) { spin_lock(&rq1->lock); spin_lock(&rq2->lock); } else { @@ -1707,7 +1705,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest) __acquires(this_rq->lock) { if (unlikely(!spin_trylock(&busiest->lock))) { - if (busiest < this_rq) { + if (busiest->cpu < this_rq->cpu) { spin_unlock(&this_rq->lock); spin_lock(&busiest->lock); spin_lock(&this_rq->lock); @@ -2875,7 +2873,7 @@ asmlinkage void __sched schedule(void) */ if (likely(!current->exit_state)) { if (unlikely(in_atomic())) { - printk(KERN_ERR "scheduling while atomic: " + printk(KERN_ERR "BUG: scheduling while atomic: " "%s/0x%08x/%d\n", current->comm, preempt_count(), current->pid); dump_stack(); @@ -6035,6 +6033,7 @@ void __init sched_init(void) rq->push_cpu = 0; rq->migration_thread = NULL; INIT_LIST_HEAD(&rq->migration_queue); + rq->cpu = i; #endif atomic_set(&rq->nr_iowait, 0); @@ -6075,7 +6074,7 @@ void __might_sleep(char *file, int line) if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) return; prev_jiffy = jiffies; - printk(KERN_ERR "Debug: sleeping function called from invalid" + printk(KERN_ERR "BUG: sleeping function called from invalid" " context at %s:%d\n", file, line); printk("in_atomic():%d, irqs_disabled():%d\n", in_atomic(), irqs_disabled()); |