diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Kconfig.instrumentation | 6 | ||||
-rw-r--r-- | kernel/acct.c | 4 | ||||
-rw-r--r-- | kernel/exit.c | 11 | ||||
-rw-r--r-- | kernel/fork.c | 21 | ||||
-rw-r--r-- | kernel/futex.c | 78 | ||||
-rw-r--r-- | kernel/hrtimer.c | 8 | ||||
-rw-r--r-- | kernel/irq/chip.c | 9 | ||||
-rw-r--r-- | kernel/kallsyms.c | 7 | ||||
-rw-r--r-- | kernel/kexec.c | 1 | ||||
-rw-r--r-- | kernel/lockdep.c | 42 | ||||
-rw-r--r-- | kernel/module.c | 8 | ||||
-rw-r--r-- | kernel/panic.c | 18 | ||||
-rw-r--r-- | kernel/params.c | 10 | ||||
-rw-r--r-- | kernel/printk.c | 2 | ||||
-rw-r--r-- | kernel/ptrace.c | 6 | ||||
-rw-r--r-- | kernel/rwsem.c | 5 | ||||
-rw-r--r-- | kernel/sched.c | 329 | ||||
-rw-r--r-- | kernel/sched_debug.c | 18 | ||||
-rw-r--r-- | kernel/sched_fair.c | 28 | ||||
-rw-r--r-- | kernel/sched_rt.c | 3 | ||||
-rw-r--r-- | kernel/sched_stats.h | 3 | ||||
-rw-r--r-- | kernel/sysctl.c | 20 | ||||
-rw-r--r-- | kernel/sysctl_check.c | 54 | ||||
-rw-r--r-- | kernel/time/clockevents.c | 5 | ||||
-rw-r--r-- | kernel/time/ntp.c | 9 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 56 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 4 | ||||
-rw-r--r-- | kernel/user.c | 7 | ||||
-rw-r--r-- | kernel/utsname_sysctl.c | 4 |
30 files changed, 475 insertions, 303 deletions
diff --git a/kernel/Kconfig.instrumentation b/kernel/Kconfig.instrumentation index f5f2c769d95..468f47ad750 100644 --- a/kernel/Kconfig.instrumentation +++ b/kernel/Kconfig.instrumentation @@ -20,8 +20,8 @@ config PROFILING config OPROFILE tristate "OProfile system profiling (EXPERIMENTAL)" - depends on PROFILING - depends on ALPHA || ARM || BLACKFIN || X86_32 || IA64 || M32R || MIPS || PARISC || PPC || S390 || SUPERH || SPARC || X86_64 + depends on PROFILING && !UML + depends on ARCH_SUPPORTS_OPROFILE || ALPHA || ARM || BLACKFIN || IA64 || M32R || PARISC || PPC || S390 || SUPERH || SPARC help OProfile is a profiling system capable of profiling the whole system, include the kernel, kernel modules, libraries, @@ -31,7 +31,7 @@ config OPROFILE config KPROBES bool "Kprobes" - depends on KALLSYMS && MODULES + depends on KALLSYMS && MODULES && !UML depends on X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32 help Kprobes allows you to trap at almost any kernel address and diff --git a/kernel/acct.c b/kernel/acct.c index fce53d8df8a..521dfa53cb9 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -413,7 +413,7 @@ static u32 encode_float(u64 value) * The acct_process() call is the workhorse of the process * accounting system. The struct acct is built here and then written * into the accounting file. This function should only be called from - * do_exit(). + * do_exit() or when switching to a different output file. */ /* @@ -482,7 +482,7 @@ static void do_acct_process(struct file *file) #endif #if ACCT_VERSION==3 ac.ac_pid = current->tgid; - ac.ac_ppid = current->parent->tgid; + ac.ac_ppid = current->real_parent->tgid; #endif spin_lock_irq(¤t->sighand->siglock); diff --git a/kernel/exit.c b/kernel/exit.c index cd0f1d4137a..549c0558ba6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1357,7 +1357,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, int __user *stat_addr, struct rusage __user *ru) { int retval, exit_code; - struct pid_namespace *ns; + pid_t pid; if (!p->exit_code) return 0; @@ -1376,12 +1376,11 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, * keep holding onto the tasklist_lock while we call getrusage and * possibly take page faults for user memory. */ - ns = current->nsproxy->pid_ns; + pid = task_pid_nr_ns(p, current->nsproxy->pid_ns); get_task_struct(p); read_unlock(&tasklist_lock); if (unlikely(noreap)) { - pid_t pid = task_pid_nr_ns(p, ns); uid_t uid = p->uid; int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; @@ -1389,7 +1388,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, if (unlikely(!exit_code) || unlikely(p->exit_state)) goto bail_ref; return wait_noreap_copyout(p, pid, uid, - why, (exit_code << 8) | 0x7f, + why, exit_code, infop, ru); } @@ -1451,11 +1450,11 @@ bail_ref: if (!retval && infop) retval = put_user(exit_code, &infop->si_status); if (!retval && infop) - retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid); + retval = put_user(pid, &infop->si_pid); if (!retval && infop) retval = put_user(p->uid, &infop->si_uid); if (!retval) - retval = task_pid_nr_ns(p, ns); + retval = pid; put_task_struct(p); BUG_ON(!retval); diff --git a/kernel/fork.c b/kernel/fork.c index 8ca1a14cdc8..8dd8ff28100 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1292,23 +1292,14 @@ static struct task_struct *copy_process(unsigned long clone_flags, __ptrace_link(p, current->parent); if (thread_group_leader(p)) { - if (clone_flags & CLONE_NEWPID) { + if (clone_flags & CLONE_NEWPID) p->nsproxy->pid_ns->child_reaper = p; - p->signal->tty = NULL; - set_task_pgrp(p, p->pid); - set_task_session(p, p->pid); - attach_pid(p, PIDTYPE_PGID, pid); - attach_pid(p, PIDTYPE_SID, pid); - } else { - p->signal->tty = current->signal->tty; - set_task_pgrp(p, task_pgrp_nr(current)); - set_task_session(p, task_session_nr(current)); - attach_pid(p, PIDTYPE_PGID, - task_pgrp(current)); - attach_pid(p, PIDTYPE_SID, - task_session(current)); - } + p->signal->tty = current->signal->tty; + set_task_pgrp(p, task_pgrp_nr(current)); + set_task_session(p, task_session_nr(current)); + attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); + attach_pid(p, PIDTYPE_SID, task_session(current)); list_add_tail_rcu(&p->tasks, &init_task.tasks); __get_cpu_var(process_counts)++; } diff --git a/kernel/futex.c b/kernel/futex.c index 9dc591ab681..db9824de8bf 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -658,7 +658,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) if (curval == -EFAULT) ret = -EFAULT; - if (curval != uval) + else if (curval != uval) ret = -EINVAL; if (ret) { spin_unlock(&pi_state->pi_mutex.wait_lock); @@ -1097,15 +1097,15 @@ static void unqueue_me_pi(struct futex_q *q) } /* - * Fixup the pi_state owner with current. + * Fixup the pi_state owner with the new owner. * * Must be called with hash bucket lock held and mm->sem held for non * private futexes. */ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, - struct task_struct *curr) + struct task_struct *newowner) { - u32 newtid = task_pid_vnr(curr) | FUTEX_WAITERS; + u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; u32 uval, curval, newval; int ret; @@ -1119,12 +1119,12 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, } else newtid |= FUTEX_OWNER_DIED; - pi_state->owner = curr; + pi_state->owner = newowner; - spin_lock_irq(&curr->pi_lock); + spin_lock_irq(&newowner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); - list_add(&pi_state->list, &curr->pi_state_list); - spin_unlock_irq(&curr->pi_lock); + list_add(&pi_state->list, &newowner->pi_state_list); + spin_unlock_irq(&newowner->pi_lock); /* * We own it, so we have to replace the pending owner @@ -1149,9 +1149,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, /* * In case we must use restart_block to restart a futex_wait, - * we encode in the 'arg3' shared capability + * we encode in the 'flags' shared capability */ -#define ARG3_SHARED 1 +#define FLAGS_SHARED 1 static long futex_wait_restart(struct restart_block *restart); @@ -1290,12 +1290,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, struct restart_block *restart; restart = ¤t_thread_info()->restart_block; restart->fn = futex_wait_restart; - restart->arg0 = (unsigned long)uaddr; - restart->arg1 = (unsigned long)val; - restart->arg2 = (unsigned long)abs_time; - restart->arg3 = 0; + restart->futex.uaddr = (u32 *)uaddr; + restart->futex.val = val; + restart->futex.time = abs_time->tv64; + restart->futex.flags = 0; + if (fshared) - restart->arg3 |= ARG3_SHARED; + restart->futex.flags |= FLAGS_SHARED; return -ERESTART_RESTARTBLOCK; } @@ -1310,15 +1311,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, static long futex_wait_restart(struct restart_block *restart) { - u32 __user *uaddr = (u32 __user *)restart->arg0; - u32 val = (u32)restart->arg1; - ktime_t *abs_time = (ktime_t *)restart->arg2; + u32 __user *uaddr = (u32 __user *)restart->futex.uaddr; struct rw_semaphore *fshared = NULL; + ktime_t t; + t.tv64 = restart->futex.time; restart->fn = do_no_restart_syscall; - if (restart->arg3 & ARG3_SHARED) + if (restart->futex.flags & FLAGS_SHARED) fshared = ¤t->mm->mmap_sem; - return (long)futex_wait(uaddr, fshared, val, abs_time); + return (long)futex_wait(uaddr, fshared, restart->futex.val, &t); } @@ -1507,9 +1508,40 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, * when we were on the way back before we locked the * hash bucket. */ - if (q.pi_state->owner == curr && - rt_mutex_trylock(&q.pi_state->pi_mutex)) { - ret = 0; + if (q.pi_state->owner == curr) { + /* + * Try to get the rt_mutex now. This might + * fail as some other task acquired the + * rt_mutex after we removed ourself from the + * rt_mutex waiters list. + */ + if (rt_mutex_trylock(&q.pi_state->pi_mutex)) + ret = 0; + else { + /* + * pi_state is incorrect, some other + * task did a lock steal and we + * returned due to timeout or signal + * without taking the rt_mutex. Too + * late. We can access the + * rt_mutex_owner without locking, as + * the other task is now blocked on + * the hash bucket lock. Fix the state + * up. + */ + struct task_struct *owner; + int res; + + owner = rt_mutex_owner(&q.pi_state->pi_mutex); + res = fixup_pi_state_owner(uaddr, &q, owner); + + WARN_ON(rt_mutex_owner(&q.pi_state->pi_mutex) != + owner); + + /* propagate -EFAULT, if the fixup failed */ + if (res) + ret = res; + } } else { /* * Paranoia check. If we did not take the lock diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 22a25142e4c..e65dd0b47cd 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -850,6 +850,14 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) #ifdef CONFIG_TIME_LOW_RES tim = ktime_add(tim, base->resolution); #endif + /* + * Careful here: User space might have asked for a + * very long sleep, so the add above might result in a + * negative number, which enqueues the timer in front + * of the queue. + */ + if (tim.tv64 < 0) + tim.tv64 = KTIME_MAX; } timer->expires = tim; diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 9b5dff6b3f6..44019ce30a1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -297,18 +297,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) if (unlikely(desc->status & IRQ_INPROGRESS)) goto out_unlock; + desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); kstat_cpu(cpu).irqs[irq]++; action = desc->action; - if (unlikely(!action || (desc->status & IRQ_DISABLED))) { - if (desc->chip->mask) - desc->chip->mask(irq); - desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); - desc->status |= IRQ_PENDING; + if (unlikely(!action || (desc->status & IRQ_DISABLED))) goto out_unlock; - } - desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING); desc->status |= IRQ_INPROGRESS; spin_unlock(&desc->lock); diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 474219a4192..2fc25810509 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c @@ -32,9 +32,14 @@ /* These will be re-linked against their real values during the second link stage */ extern const unsigned long kallsyms_addresses[] __attribute__((weak)); -extern const unsigned long kallsyms_num_syms __attribute__((weak)); extern const u8 kallsyms_names[] __attribute__((weak)); +/* tell the compiler that the count isn't in the small data section if the arch + * has one (eg: FRV) + */ +extern const unsigned long kallsyms_num_syms +__attribute__((weak, section(".rodata"))); + extern const u8 kallsyms_token_table[] __attribute__((weak)); extern const u16 kallsyms_token_index[] __attribute__((weak)); diff --git a/kernel/kexec.c b/kernel/kexec.c index aa74a1ef2da..9a26eec9eb0 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -1404,6 +1404,7 @@ static int __init crash_save_vmcoreinfo_init(void) VMCOREINFO_OFFSET(list_head, next); VMCOREINFO_OFFSET(list_head, prev); VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); + VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); VMCOREINFO_NUMBER(NR_FREE_PAGES); arch_crash_save_vmcoreinfo(); diff --git a/kernel/lockdep.c b/kernel/lockdep.c index ed38bbfc48a..723bd9f9255 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2654,10 +2654,15 @@ static void check_flags(unsigned long flags) if (!debug_locks) return; - if (irqs_disabled_flags(flags)) - DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled); - else - DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled); + if (irqs_disabled_flags(flags)) { + if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { + printk("possible reason: unannotated irqs-off.\n"); + } + } else { + if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { + printk("possible reason: unannotated irqs-on.\n"); + } + } /* * We dont accurately track softirq state in e.g. @@ -3054,11 +3059,6 @@ void __init lockdep_info(void) #endif } -static inline int in_range(const void *start, const void *addr, const void *end) -{ - return addr >= start && addr <= end; -} - static void print_freed_lock_bug(struct task_struct *curr, const void *mem_from, const void *mem_to, struct held_lock *hlock) @@ -3080,6 +3080,13 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, dump_stack(); } +static inline int not_in_range(const void* mem_from, unsigned long mem_len, + const void* lock_from, unsigned long lock_len) +{ + return lock_from + lock_len <= mem_from || + mem_from + mem_len <= lock_from; +} + /* * Called when kernel memory is freed (or unmapped), or if a lock * is destroyed or reinitialized - this code checks whether there is @@ -3087,7 +3094,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, */ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) { - const void *mem_to = mem_from + mem_len, *lock_from, *lock_to; struct task_struct *curr = current; struct held_lock *hlock; unsigned long flags; @@ -3100,14 +3106,11 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; - lock_from = (void *)hlock->instance; - lock_to = (void *)(hlock->instance + 1); - - if (!in_range(mem_from, lock_from, mem_to) && - !in_range(mem_from, lock_to, mem_to)) + if (not_in_range(mem_from, mem_len, hlock->instance, + sizeof(*hlock->instance))) continue; - print_freed_lock_bug(curr, mem_from, mem_to, hlock); + print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); break; } local_irq_restore(flags); @@ -3173,6 +3176,13 @@ retry: printk(" locked it.\n"); do_each_thread(g, p) { + /* + * It's not reliable to print a task's held locks + * if it's not sleeping (or if it's not the current + * task): + */ + if (p->state == TASK_RUNNING && p != current) + continue; if (p->lockdep_depth) lockdep_print_held_locks(p); if (!unlock) diff --git a/kernel/module.c b/kernel/module.c index 3202c995007..91fe6958b6e 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -81,7 +81,8 @@ int unregister_module_notifier(struct notifier_block * nb) } EXPORT_SYMBOL(unregister_module_notifier); -/* We require a truly strong try_module_get() */ +/* We require a truly strong try_module_get(): 0 means failure due to + ongoing or failed initialization etc. */ static inline int strong_try_module_get(struct module *mod) { if (mod && mod->state == MODULE_STATE_COMING) @@ -952,7 +953,8 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs, ret = __find_symbol(name, &owner, &crc, !(mod->taints & TAINT_PROPRIETARY_MODULE)); if (ret) { - /* use_module can fail due to OOM, or module unloading */ + /* use_module can fail due to OOM, + or module initialization or unloading */ if (!check_version(sechdrs, versindex, name, mod, crc) || !use_module(mod, owner)) ret = 0; @@ -1369,7 +1371,7 @@ dup: return ret; } -/* Change all symbols so that sh_value encodes the pointer directly. */ +/* Change all symbols so that st_value encodes the pointer directly. */ static int simplify_symbols(Elf_Shdr *sechdrs, unsigned int symindex, const char *strtab, diff --git a/kernel/panic.c b/kernel/panic.c index 6f6e03e9159..da4d6bac270 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -19,6 +19,7 @@ #include <linux/nmi.h> #include <linux/kexec.h> #include <linux/debug_locks.h> +#include <linux/random.h> int panic_on_oops; int tainted; @@ -266,12 +267,29 @@ void oops_enter(void) } /* + * 64-bit random ID for oopses: + */ +static u64 oops_id; + +static int init_oops_id(void) +{ + if (!oops_id) + get_random_bytes(&oops_id, sizeof(oops_id)); + + return 0; +} +late_initcall(init_oops_id); + +/* * Called when the architecture exits its oops handler, after printing * everything. */ void oops_exit(void) { do_oops_enter_exit(); + init_oops_id(); + printk(KERN_WARNING "---[ end trace %016llx ]---\n", + (unsigned long long)oops_id); } #ifdef CONFIG_CC_STACKPROTECTOR diff --git a/kernel/params.c b/kernel/params.c index 2a4c51487e7..7686417ee00 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -697,8 +697,18 @@ static struct kset_uevent_ops module_uevent_ops = { decl_subsys(module, &module_ktype, &module_uevent_ops); int module_sysfs_initialized; +static void module_release(struct kobject *kobj) +{ + /* + * Stupid empty release function to allow the memory for the kobject to + * be properly cleaned up. This will not need to be present for 2.6.25 + * with the upcoming kobject core rework. + */ +} + static struct kobj_type module_ktype = { .sysfs_ops = &module_sysfs_ops, + .release = module_release, }; /* diff --git a/kernel/printk.c b/kernel/printk.c index a30fe33de39..89011bf8c10 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -817,7 +817,7 @@ __setup("console=", console_setup); * commonly to provide a default console (ie from PROM variables) when * the user has not supplied one. */ -int __init add_preferred_console(char *name, int idx, char *options) +int add_preferred_console(char *name, int idx, char *options) { struct console_cmdline *c; int i; diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 7c76f2ffaea..c25db863081 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -120,7 +120,7 @@ int ptrace_check_attach(struct task_struct *child, int kill) return ret; } -static int may_attach(struct task_struct *task) +int __ptrace_may_attach(struct task_struct *task) { /* May we inspect the given task? * This check is used both for attaching with ptrace @@ -154,7 +154,7 @@ int ptrace_may_attach(struct task_struct *task) { int err; task_lock(task); - err = may_attach(task); + err = __ptrace_may_attach(task); task_unlock(task); return !err; } @@ -196,7 +196,7 @@ repeat: /* the same process cannot be attached many times */ if (task->ptrace & PT_PTRACED) goto bad; - retval = may_attach(task); + retval = __ptrace_may_attach(task); if (retval) goto bad; diff --git a/kernel/rwsem.c b/kernel/rwsem.c index 1ec620c0306..cae050b05f5 100644 --- a/kernel/rwsem.c +++ b/kernel/rwsem.c @@ -6,6 +6,7 @@ #include <linux/types.h> #include <linux/kernel.h> +#include <linux/sched.h> #include <linux/module.h> #include <linux/rwsem.h> @@ -15,7 +16,7 @@ /* * lock for reading */ -void down_read(struct rw_semaphore *sem) +void __sched down_read(struct rw_semaphore *sem) { might_sleep(); rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_); @@ -42,7 +43,7 @@ EXPORT_SYMBOL(down_read_trylock); /* * lock for writing */ -void down_write(struct rw_semaphore *sem) +void __sched down_write(struct rw_semaphore *sem) { might_sleep(); rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_); diff --git a/kernel/sched.c b/kernel/sched.c index 38933cafea8..37cf07aa416 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -209,9 +209,8 @@ static inline struct task_group *task_group(struct task_struct *p) tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), struct task_group, css); #else - tg = &init_task_group; + tg = &init_task_group; #endif - return tg; } @@ -249,15 +248,16 @@ struct cfs_rq { #ifdef CONFIG_FAIR_GROUP_SCHED struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ - /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in + /* + * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in * a hierarchy). Non-leaf lrqs hold other higher schedulable entities * (like users, containers etc.) * * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This * list is used during load balance. */ - struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ - struct task_group *tg; /* group that "owns" this runqueue */ + struct list_head leaf_cfs_rq_list; + struct task_group *tg; /* group that "owns" this runqueue */ #endif }; @@ -300,7 +300,7 @@ struct rq { /* list of leaf cfs_rq on this cpu: */ struct list_head leaf_cfs_rq_list; #endif - struct rt_rq rt; + struct rt_rq rt; /* * This is part of a global counter where only the total sum @@ -457,8 +457,8 @@ enum { SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, SCHED_FEAT_WAKEUP_PREEMPT = 2, SCHED_FEAT_START_DEBIT = 4, - SCHED_FEAT_TREE_AVG = 8, - SCHED_FEAT_APPROX_AVG = 16, + SCHED_FEAT_TREE_AVG = 8, + SCHED_FEAT_APPROX_AVG = 16, }; const_debug unsigned int sysctl_sched_features = @@ -488,7 +488,12 @@ unsigned long long cpu_clock(int cpu) local_irq_save(flags); rq = cpu_rq(cpu); - update_rq_clock(rq); + /* + * Only call sched_clock() if the scheduler has already been + * initialized (some code might call cpu_clock() very early): + */ + if (rq->idle) + update_rq_clock(rq); now = rq->clock; local_irq_restore(flags); @@ -503,10 +508,15 @@ EXPORT_SYMBOL_GPL(cpu_clock); # define finish_arch_switch(prev) do { } while (0) #endif +static inline int task_current(struct rq *rq, struct task_struct *p) +{ + return rq->curr == p; +} + #ifndef __ARCH_WANT_UNLOCKED_CTXSW static inline int task_running(struct rq *rq, struct task_struct *p) { - return rq->curr == p; + return task_current(rq, p); } static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) @@ -535,7 +545,7 @@ static inline int task_running(struct rq *rq, struct task_struct *p) #ifdef CONFIG_SMP return p->oncpu; #else - return rq->curr == p; + return task_current(rq, p); #endif } @@ -591,7 +601,7 @@ static inline struct rq *__task_rq_lock(struct task_struct *p) /* * task_rq_lock - lock the runqueue a given task resides on and disable - * interrupts. Note the ordering: we can safely lookup the task_rq without + * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) @@ -658,6 +668,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) struct rq *rq = cpu_rq(smp_processor_id()); u64 now = sched_clock(); + touch_softlockup_watchdog(); rq->idle_clock += delta_ns; /* * Override the previous timestamp and ignore all @@ -779,7 +790,7 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec) * To aid in avoiding the subversion of "niceness" due to uneven distribution * of tasks with abnormal "nice" values across CPUs the contribution that * each task makes to its run queue's load is weighted according to its - * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a + * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a * scaled version of the new time slice allocation that they receive on time * slice expiry etc. */ @@ -854,6 +865,12 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, struct rq_iterator *iterator); #endif +#ifdef CONFIG_CGROUP_CPUACCT +static void cpuacct_charge(struct task_struct *tsk, u64 cputime); +#else +static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} +#endif + #include "sched_stats.h" #include "sched_idletask.c" #include "sched_fair.c" @@ -1848,7 +1865,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, * and do any other architecture-specific cleanup actions. * * Note that we may have delayed dropping an mm in context_switch(). If - * so, we finish that here outside of the runqueue lock. (Doing it + * so, we finish that here outside of the runqueue lock. (Doing it * with the lock held can cause deadlocks; see schedule() for * details.) */ @@ -2130,7 +2147,7 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest) /* * If dest_cpu is allowed for this process, migrate the task to it. * This is accomplished by forcing the cpu_allowed mask to only - * allow dest_cpu, which will force the cpu onto dest_cpu. Then + * allow dest_cpu, which will force the cpu onto dest_cpu. Then * the cpu_allowed mask is restored. */ static void sched_migrate_task(struct task_struct *p, int dest_cpu) @@ -2575,7 +2592,7 @@ group_next: * tasks around. Thus we look for the minimum possible imbalance. * Negative imbalances (*we* are more loaded than anyone else) will * be counted as no imbalance for these purposes -- we can't fix that - * by pulling tasks to us. Be careful of negative numbers as they'll + * by pulling tasks to us. Be careful of negative numbers as they'll * appear as very large values with unsigned longs. */ if (max_load <= busiest_load_per_task) @@ -3010,7 +3027,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) /* * This condition is "impossible", if it occurs - * we need to fix it. Originally reported by + * we need to fix it. Originally reported by * Bjorn Helgaas on a 128-cpu setup. */ BUG_ON(busiest_rq == target_rq); @@ -3042,7 +3059,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) #ifdef CONFIG_NO_HZ static struct { atomic_t load_balancer; - cpumask_t cpu_mask; + cpumask_t cpu_mask; } nohz ____cacheline_aligned = { .load_balancer = ATOMIC_INIT(-1), .cpu_mask = CPU_MASK_NONE, @@ -3323,7 +3340,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) rq = task_rq_lock(p, &flags); ns = p->se.sum_exec_runtime; - if (rq->curr == p) { + if (task_current(rq, p)) { update_rq_clock(rq); delta_exec = rq->clock - p->se.exec_start; if ((s64)delta_exec > 0) @@ -3546,7 +3563,7 @@ static noinline void __schedule_bug(struct task_struct *prev) static inline void schedule_debug(struct task_struct *prev) { /* - * Test if we are atomic. Since do_exit() needs to call into + * Test if we are atomic. Since do_exit() needs to call into * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ @@ -3668,7 +3685,7 @@ EXPORT_SYMBOL(schedule); #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption - * off of preempt_enable. Kernel preemptions off return from interrupt + * off of preempt_enable. Kernel preemptions off return from interrupt * occur there and call schedule directly. */ asmlinkage void __sched preempt_schedule(void) @@ -3680,7 +3697,7 @@ asmlinkage void __sched preempt_schedule(void) #endif /* * If there is a non-zero preempt_count or interrupts are disabled, - * we do not want to preempt the current task. Just return.. + * we do not want to preempt the current task. Just return.. */ if (likely(ti->preempt_count || irqs_disabled())) return; @@ -3766,12 +3783,12 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, EXPORT_SYMBOL(default_wake_function); /* - * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just - * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve + * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just + * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve * number) then we wake all the non-exclusive tasks and one exclusive task. * * There are circumstances in which we can try to wake a task which has already - * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns + * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns * zero in this (rare) case, and we handle it by continuing to scan the queue. */ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, @@ -4010,7 +4027,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) oldprio = p->prio; on_rq = p->se.on_rq; - running = task_running(rq, p); + running = task_current(rq, p); if (on_rq) { dequeue_task(rq, p, 0); if (running) @@ -4321,7 +4338,7 @@ recheck: } update_rq_clock(rq); on_rq = p->se.on_rq; - running = task_running(rq, p); + running = task_current(rq, p); if (on_rq) { deactivate_task(rq, p, 0); if (running) @@ -4384,8 +4401,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) * @policy: new policy. * @param: structure containing the new RT priority. */ -asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, - struct sched_param __user *param) +asmlinkage long +sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) { /* negative values for policy are not valid */ if (policy < 0) @@ -4485,7 +4502,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) /* * It is not safe to call set_cpus_allowed with the - * tasklist_lock held. We will bump the task_struct's + * tasklist_lock held. We will bump the task_struct's * usage count and then drop tasklist_lock. */ get_task_struct(p); @@ -4681,7 +4698,7 @@ EXPORT_SYMBOL(cond_resched); * cond_resched_lock() - if a reschedule is pending, drop the given lock, * call schedule, and on return reacquire the lock. * - * This works OK both with and without CONFIG_PREEMPT. We do strange low-level + * This works OK both with and without CONFIG_PREEMPT. We do strange low-level * operations here to prevent schedule() from being called twice (once via * spin_unlock(), once by hand). */ @@ -4735,7 +4752,7 @@ void __sched yield(void) EXPORT_SYMBOL(yield); /* - * This task is about to go to sleep on IO. Increment rq->nr_iowait so + * This task is about to go to sleep on IO. Increment rq->nr_iowait so * that process accounting knows that this is a task in IO wait state. * * But don't do that if it is a deliberate, throttling IO wait (this task @@ -4844,17 +4861,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) if (retval) goto out_unlock; - if (p->policy == SCHED_FIFO) - time_slice = 0; - else if (p->policy == SCHED_RR) + /* + * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER + * tasks that are on an otherwise idle runqueue: + */ + time_slice = 0; + if (p->policy == SCHED_RR) { time_slice = DEF_TIMESLICE; - else { + } else { struct sched_entity *se = &p->se; unsigned long flags; struct rq *rq; rq = task_rq_lock(p, &flags); - time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); + if (rq->cfs.load.weight) + time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); task_rq_unlock(rq, &flags); } read_unlock(&tasklist_lock); @@ -4897,7 +4918,7 @@ static void show_task(struct task_struct *p) } #endif printk(KERN_CONT "%5lu %5d %6d\n", free, - task_pid_nr(p), task_pid_nr(p->parent)); + task_pid_nr(p), task_pid_nr(p->real_parent)); if (state != TASK_RUNNING) show_stack(p, NULL); @@ -5040,7 +5061,7 @@ static inline void sched_init_granularity(void) * is removed from the allowed bitmask. * * NOTE: the caller must have a valid reference to the task, the - * task must not exit() & deallocate itself prematurely. The + * task must not exit() & deallocate itself prematurely. The * call is not atomic; no spinlocks may be held. */ int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) @@ -5077,7 +5098,7 @@ out: EXPORT_SYMBOL_GPL(set_cpus_allowed); /* - * Move (not current) task off this cpu, onto dest cpu. We're doing + * Move (not current) task off this cpu, onto dest cpu. We're doing * this because either it can't run here any more (set_cpus_allowed() * away from this CPU, or CPU going down), or because we're * attempting to rebalance this task on exec (sched_exec). @@ -5222,7 +5243,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) * Try to stay on the same cpuset, where the * current cpuset may be a subset of all cpus. * The cpuset_cpus_allowed_locked() variant of - * cpuset_cpus_allowed() will not block. It must be + * cpuset_cpus_allowed() will not block. It must be * called within calls to cpuset_lock/cpuset_unlock. */ rq = task_rq_lock(p, &flags); @@ -5235,10 +5256,11 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) * kernel threads (both mm NULL), since they never * leave kernel. */ - if (p->mm && printk_ratelimit()) + if (p->mm && printk_ratelimit()) { printk(KERN_INFO "process %d (%s) no " "longer affine to cpu%d\n", - task_pid_nr(p), p->comm, dead_cpu); + task_pid_nr(p), p->comm, dead_cpu); + } } } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); } @@ -5340,7 +5362,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) /* * Drop lock around migration; if someone else moves it, - * that's OK. No task can be added to this CPU, so iteration is + * that's OK. No task can be added to this CPU, so iteration is * fine. */ spin_unlock_irq(&rq->lock); @@ -5404,7 +5426,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep) /* * In the intermediate directories, both the child directory and * procname are dynamically allocated and could fail but the mode - * will always be set. In the lowest directory the names are + * will always be set. In the lowest directory the names are * static strings and all have proc handlers. */ for (entry = *tablep; entry->mode; entry++) { @@ -5466,7 +5488,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) return table; } -static ctl_table * sd_alloc_ctl_cpu_table(int cpu) +static ctl_table *sd_alloc_ctl_cpu_table(int cpu) { struct ctl_table *entry, *table; struct sched_domain *sd; @@ -5575,7 +5597,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_UP_CANCELED_FROZEN: if (!cpu_rq(cpu)->migration_thread) break; - /* Unbind it from offline cpu so it can run. Fall thru. */ + /* Unbind it from offline cpu so it can run. Fall thru. */ kthread_bind(cpu_rq(cpu)->migration_thread, any_online_cpu(cpu_online_map)); kthread_stop(cpu_rq(cpu)->migration_thread); @@ -5602,9 +5624,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) migrate_nr_uninterruptible(rq); BUG_ON(rq->nr_running != 0); - /* No need to migrate the tasks: it was best-effort if - * they didn't take sched_hotcpu_mutex. Just wake up - * the requestors. */ + /* + * No need to migrate the tasks: it was best-effort if + * they didn't take sched_hotcpu_mutex. Just wake up + * the requestors. + */ spin_lock_irq(&rq->lock); while (!list_empty(&rq->migration_queue)) { struct migration_req *req; @@ -5912,7 +5936,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map, * @node: node whose sched_domain we're building * @used_nodes: nodes already in the sched_domain * - * Find the next node to include in a given scheduling domain. Simply + * Find the next node to include in a given scheduling domain. Simply * finds the closest node not already in the @used_nodes map. * * Should use nodemask_t. @@ -5952,7 +5976,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes) * @node: node whose cpumask we're constructing * @size: number of nodes to include in this span * - * Given a node, construct a good cpumask for its sched_domain to span. It + * Given a node, construct a good cpumask for its sched_domain to span. It * should be one that prevents unnecessary balancing, but also spreads tasks * out optimally. */ @@ -5989,8 +6013,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0; static DEFINE_PER_CPU(struct sched_domain, cpu_domains); static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); -static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, - struct sched_group **sg) +static int +cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { if (sg) *sg = &per_cpu(sched_group_cpus, cpu); @@ -6007,8 +6031,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core); #endif #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) -static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, - struct sched_group **sg) +static int +cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { int group; cpumask_t mask = per_cpu(cpu_sibling_map, cpu); @@ -6019,8 +6043,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, return group; } #elif defined(CONFIG_SCHED_MC) -static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, - struct sched_group **sg) +static int +cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { if (sg) *sg = &per_cpu(sched_group_core, cpu); @@ -6031,8 +6055,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, static DEFINE_PER_CPU(struct sched_domain, phys_domains); static DEFINE_PER_CPU(struct sched_group, sched_group_phys); -static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, - struct sched_group **sg) +static int +cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg) { int group; #ifdef CONFIG_SCHED_MC @@ -6212,7 +6236,7 @@ static int build_sched_domains(const cpumask_t *cpu_map) * Allocate the per-node list of sched groups */ sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *), - GFP_KERNEL); + GFP_KERNEL); if (!sched_group_nodes) { printk(KERN_WARNING "Can not alloc sched group node list\n"); return -ENOMEM; @@ -6459,7 +6483,7 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */ static cpumask_t fallback_doms; /* - * Set up scheduler domains and groups. Callers must hold the hotplug lock. + * Set up scheduler domains and groups. Callers must hold the hotplug lock. * For now this just excludes isolated cpus, but could be used to * exclude other special cases in the future. */ @@ -6501,19 +6525,19 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) /* * Partition sched domains as specified by the 'ndoms_new' - * cpumasks in the array doms_new[] of cpumasks. This compares + * cpumasks in the array doms_new[] of cpumasks. This compares * doms_new[] to the current sched domain partitioning, doms_cur[]. * It destroys each deleted domain and builds each new domain. * * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. - * The masks don't intersect (don't overlap.) We should setup one - * sched domain for each mask. CPUs not in any of the cpumasks will - * not be load balanced. If the same cpumask appears both in the + * The masks don't intersect (don't overlap.) We should setup one + * sched domain for each mask. CPUs not in any of the cpumasks will + * not be load balanced. If the same cpumask appears both in the * current 'doms_cur' domains and in the new 'doms_new', we can leave * it as it is. * - * The passed in 'doms_new' should be kmalloc'd. This routine takes - * ownership of it and will kfree it when done with it. If the caller + * The passed in 'doms_new' should be kmalloc'd. This routine takes + * ownership of it and will kfree it when done with it. If the caller * failed the kmalloc call, then it can pass in doms_new == NULL, * and partition_sched_domains() will fallback to the single partition * 'fallback_doms'. @@ -6643,7 +6667,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) #endif /* - * Force a reinitialization of the sched domains hierarchy. The domains + * Force a reinitialization of the sched domains hierarchy. The domains * and groups cannot be updated in place without racing with the balancing * code, so we temporarily attach all running cpus to the NULL domain * which will prevent rebalancing while the sched domains are recalculated. @@ -6708,9 +6732,6 @@ void __init sched_init_smp(void) int in_sched_functions(unsigned long addr) { - /* Linker adds these: start and end of __sched functions */ - extern char __sched_text_start[], __sched_text_end[]; - return in_lock_functions(addr) || (addr >= (unsigned long)__sched_text_start && addr < (unsigned long)__sched_text_end); @@ -6936,8 +6957,8 @@ struct task_struct *curr_task(int cpu) * @p: the task pointer to set. * * Description: This function must only be used when non-maskable interrupts - * are serviced on a separate stack. It allows the architecture to switch the - * notion of the current task on a cpu in a non-blocking manner. This function + * are serviced on a separate stack. It allows the architecture to switch the + * notion of the current task on a cpu in a non-blocking manner. This function * must be called with all CPU's synchronized, and interrupts disabled, the * and caller must save the original value of the current task (see * curr_task() above) and restore that value before reenabling interrupts and @@ -7086,7 +7107,7 @@ void sched_move_task(struct task_struct *tsk) update_rq_clock(rq); - running = task_running(rq, tsk); + running = task_current(rq, tsk); on_rq = tsk->se.on_rq; if (on_rq) { @@ -7186,16 +7207,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) return &tg->css; } -static void cpu_cgroup_destroy(struct cgroup_subsys *ss, - struct cgroup *cgrp) +static void +cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) { struct task_group *tg = cgroup_tg(cgrp); sched_destroy_group(tg); } -static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, - struct cgroup *cgrp, struct task_struct *tsk) +static int +cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, + struct task_struct *tsk) { /* We don't support RT-tasks being in separate groups */ if (tsk->sched_class != &fair_sched_class) @@ -7224,38 +7246,12 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) return (u64) tg->shares; } -static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft) -{ - struct task_group *tg = cgroup_tg(cgrp); - unsigned long flags; - u64 res = 0; - int i; - - for_each_possible_cpu(i) { - /* - * Lock to prevent races with updating 64-bit counters - * on 32-bit arches. - */ - spin_lock_irqsave(&cpu_rq(i)->lock, flags); - res += tg->se[i]->sum_exec_runtime; - spin_unlock_irqrestore(&cpu_rq(i)->lock, flags); - } - /* Convert from ns to ms */ - do_div(res, NSEC_PER_MSEC); - - return res; -} - static struct cftype cpu_files[] = { { .name = "shares", .read_uint = cpu_shares_read_uint, .write_uint = cpu_shares_write_uint, }, - { - .name = "usage", - .read_uint = cpu_usage_read, - }, }; static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) @@ -7275,3 +7271,126 @@ struct cgroup_subsys cpu_cgroup_subsys = { }; #endif /* CONFIG_FAIR_CGROUP_SCHED */ + +#ifdef CONFIG_CGROUP_CPUACCT + +/* + * CPU accounting code for task groups. + * + * Based on the work by Paul Menage (menage@google.com) and Balbir Singh + * (balbir@in.ibm.com). + */ + +/* track cpu usage of a group of tasks */ +struct cpuacct { + struct cgroup_subsys_state css; + /* cpuusage holds pointer to a u64-type object on every cpu */ + u64 *cpuusage; +}; + +struct cgroup_subsys cpuacct_subsys; + +/* return cpu accounting group corresponding to this container */ +static inline struct cpuacct *cgroup_ca(struct cgroup *cont) +{ + return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id), + struct cpuacct, css); +} + +/* return cpu accounting group to which this task belongs */ +static inline struct cpuacct *task_ca(struct task_struct *tsk) +{ + return container_of(task_subsys_state(tsk, cpuacct_subsys_id), + struct cpuacct, css); +} + +/* create a new cpu accounting group */ +static struct cgroup_subsys_state *cpuacct_create( + struct cgroup_subsys *ss, struct cgroup *cont) +{ + struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); + + if (!ca) + return ERR_PTR(-ENOMEM); + + ca->cpuusage = alloc_percpu(u64); + if (!ca->cpuusage) { + kfree(ca); + return ERR_PTR(-ENOMEM); + } + + return &ca->css; +} + +/* destroy an existing cpu accounting group */ +static void +cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont) +{ + struct cpuacct *ca = cgroup_ca(cont); + + free_percpu(ca->cpuusage); + kfree(ca); +} + +/* return total cpu usage (in nanoseconds) of a group */ +static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft) +{ + struct cpuacct *ca = cgroup_ca(cont); + u64 totalcpuusage = 0; + int i; + + for_each_possible_cpu(i) { + u64 *cpuusage = percpu_ptr(ca->cpuusage, i); + + /* + * Take rq->lock to make 64-bit addition safe on 32-bit + * platforms. + */ + spin_lock_irq(&cpu_rq(i)->lock); + totalcpuusage += *cpuusage; + spin_unlock_irq(&cpu_rq(i)->lock); + } + + return totalcpuusage; +} + +static struct cftype files[] = { + { + .name = "usage", + .read_uint = cpuusage_read, + }, +}; + +static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont) +{ + return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); +} + +/* + * charge this task's execution time to its accounting group. + * + * called with rq->lock held. + */ +static void cpuacct_charge(struct task_struct *tsk, u64 cputime) +{ + struct cpuacct *ca; + + if (!cpuacct_subsys.active) + return; + + ca = task_ca(tsk); + if (ca) { + u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); + + *cpuusage += cputime; + } +} + +struct cgroup_subsys cpuacct_subsys = { + .name = "cpuacct", + .create = cpuacct_create, + .destroy = cpuacct_destroy, + .populate = cpuacct_populate, + .subsys_id = cpuacct_subsys_id, +}; +#endif /* CONFIG_CGROUP_CPUACCT */ diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index ca198a797bf..80fbbfc0429 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -31,9 +31,9 @@ /* * Ease the printing of nsec fields: */ -static long long nsec_high(long long nsec) +static long long nsec_high(unsigned long long nsec) { - if (nsec < 0) { + if ((long long)nsec < 0) { nsec = -nsec; do_div(nsec, 1000000); return -nsec; @@ -43,9 +43,9 @@ static long long nsec_high(long long nsec) return nsec; } -static unsigned long nsec_low(long long nsec) +static unsigned long nsec_low(unsigned long long nsec) { - if (nsec < 0) + if ((long long)nsec < 0) nsec = -nsec; return do_div(nsec, 1000000); @@ -199,7 +199,7 @@ static int sched_debug_show(struct seq_file *m, void *v) u64 now = ktime_to_ns(ktime_get()); int cpu; - SEQ_printf(m, "Sched Debug Version: v0.06-v22, %s %.*s\n", + SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); @@ -327,10 +327,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) avg_atom = -1LL; avg_per_cpu = p->se.sum_exec_runtime; - if (p->se.nr_migrations) - avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations); - else + if (p->se.nr_migrations) { + avg_per_cpu = div64_64(avg_per_cpu, + p->se.nr_migrations); + } else { avg_per_cpu = -1LL; + } __PN(avg_atom); __PN(avg_per_cpu); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ee00da284b1..da7c061e720 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -22,7 +22,7 @@ /* * Targeted preemption latency for CPU-bound tasks: - * (default: 20ms * ilog(ncpus), units: nanoseconds) + * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds) * * NOTE: this latency value is not the same as the concept of * 'timeslice length' - timeslices in CFS are of variable length @@ -36,14 +36,14 @@ unsigned int sysctl_sched_latency = 20000000ULL; /* * Minimal preemption granularity for CPU-bound tasks: - * (default: 1 msec * ilog(ncpus), units: nanoseconds) + * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds) */ -unsigned int sysctl_sched_min_granularity = 1000000ULL; +unsigned int sysctl_sched_min_granularity = 4000000ULL; /* * is kept at sysctl_sched_latency / sysctl_sched_min_granularity */ -static unsigned int sched_nr_latency = 20; +static unsigned int sched_nr_latency = 5; /* * After fork, child runs first. (default) If set to 0 then @@ -61,7 +61,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield; /* * SCHED_BATCH wake-up granularity. - * (default: 10 msec * ilog(ncpus), units: nanoseconds) + * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still @@ -71,7 +71,7 @@ unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL; /* * SCHED_OTHER wake-up granularity. - * (default: 10 msec * ilog(ncpus), units: nanoseconds) + * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds) * * This option delays the preemption effects of decoupled workloads * and reduces their over-scheduling. Synchronous workloads will still @@ -351,6 +351,12 @@ static void update_curr(struct cfs_rq *cfs_rq) __update_curr(cfs_rq, curr, delta_exec); curr->exec_start = now; + + if (entity_is_task(curr)) { + struct task_struct *curtask = task_of(curr); + + cpuacct_charge(curtask, delta_exec); + } } static inline void @@ -505,8 +511,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) if (!initial) { /* sleeps upto a single latency don't count. */ - if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) && - task_of(se)->policy != SCHED_BATCH) + if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se)) vruntime -= sysctl_sched_latency; /* ensure we never gain time by being placed backwards. */ @@ -793,8 +798,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) */ static void yield_task_fair(struct rq *rq) { - struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr); - struct sched_entity *rightmost, *se = &rq->curr->se; + struct task_struct *curr = rq->curr; + struct cfs_rq *cfs_rq = task_cfs_rq(curr); + struct sched_entity *rightmost, *se = &curr->se; /* * Are we the only task in the tree? @@ -802,7 +808,7 @@ static void yield_task_fair(struct rq *rq) if (unlikely(cfs_rq->nr_running == 1)) return; - if (likely(!sysctl_sched_compat_yield)) { + if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) { __update_rq_clock(rq); /* * Update run-time statistics of the 'current'. diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 8abd752a0eb..9ba3daa0347 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -23,6 +23,7 @@ static void update_curr_rt(struct rq *rq) curr->se.sum_exec_runtime += delta_exec; curr->se.exec_start = rq->clock; + cpuacct_charge(curr, delta_exec); } static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) @@ -207,6 +208,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, static void task_tick_rt(struct rq *rq, struct task_struct *p) { + update_curr_rt(rq); + /* * RR tasks need a special form of timeslice management. * FIFO tasks have no timeslices. diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 630178e53bb..5b32433e7ee 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -52,7 +52,8 @@ static int show_schedstat(struct seq_file *seq, void *v) sd->lb_nobusyq[itype], sd->lb_nobusyg[itype]); } - seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n", + seq_printf(seq, + " %u %u %u %u %u %u %u %u %u %u %u %u\n", sd->alb_count, sd->alb_failed, sd->alb_pushed, sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0deed82a615..c68f68dcc60 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -225,10 +225,10 @@ static struct ctl_table root_table[] = { }; #ifdef CONFIG_SCHED_DEBUG -static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */ -static unsigned long max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ -static unsigned long min_wakeup_granularity_ns; /* 0 usecs */ -static unsigned long max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ +static int min_sched_granularity_ns = 100000; /* 100 usecs */ +static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ +static int min_wakeup_granularity_ns; /* 0 usecs */ +static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ #endif static struct ctl_table kern_table[] = { @@ -906,11 +906,11 @@ static struct ctl_table vm_table[] = { }, { .ctl_name = CTL_UNNUMBERED, - .procname = "hugetlb_dynamic_pool", - .data = &hugetlb_dynamic_pool, - .maxlen = sizeof(hugetlb_dynamic_pool), + .procname = "nr_overcommit_hugepages", + .data = &nr_overcommit_huge_pages, + .maxlen = sizeof(nr_overcommit_huge_pages), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = &proc_doulongvec_minmax, }, #endif { @@ -1588,6 +1588,10 @@ struct ctl_table_header *register_sysctl_table(struct ctl_table * table) void unregister_sysctl_table(struct ctl_table_header * header) { might_sleep(); + + if (header == NULL) + return; + spin_lock(&sysctl_lock); start_unregistering(header); spin_unlock(&sysctl_lock); diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c index 4abc6d2306f..a68425a5cc1 100644 --- a/kernel/sysctl_check.c +++ b/kernel/sysctl_check.c @@ -96,7 +96,7 @@ static struct trans_ctl_table trans_kern_table[] = { { KERN_PTY, "pty", trans_pty_table }, { KERN_NGROUPS_MAX, "ngroups_max" }, - { KERN_SPARC_SCONS_PWROFF, "scons_poweroff" }, + { KERN_SPARC_SCONS_PWROFF, "scons-poweroff" }, { KERN_HZ_TIMER, "hz_timer" }, { KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" }, { KERN_BOOTLOADER_TYPE, "bootloader_type" }, @@ -140,9 +140,6 @@ static struct trans_ctl_table trans_vm_table[] = { { VM_PANIC_ON_OOM, "panic_on_oom" }, { VM_VDSO_ENABLED, "vdso_enabled" }, { VM_MIN_SLAB, "min_slab_ratio" }, - { VM_CMM_PAGES, "cmm_pages" }, - { VM_CMM_TIMED_PAGES, "cmm_timed_pages" }, - { VM_CMM_TIMEOUT, "cmm_timeout" }, {} }; @@ -237,36 +234,6 @@ static struct trans_ctl_table trans_net_ipv4_conf_table[] = { {} }; - -static struct trans_ctl_table trans_net_ipv4_vs_table[] = { - { NET_IPV4_VS_AMEMTHRESH, "amemthresh" }, - { NET_IPV4_VS_DEBUG_LEVEL, "debug_level" }, - { NET_IPV4_VS_AMDROPRATE, "am_droprate" }, - { NET_IPV4_VS_DROP_ENTRY, "drop_entry" }, - { NET_IPV4_VS_DROP_PACKET, "drop_packet" }, - { NET_IPV4_VS_SECURE_TCP, "secure_tcp" }, - { NET_IPV4_VS_TO_ES, "timeout_established" }, - { NET_IPV4_VS_TO_SS, "timeout_synsent" }, - { NET_IPV4_VS_TO_SR, "timeout_synrecv" }, - { NET_IPV4_VS_TO_FW, "timeout_finwait" }, - { NET_IPV4_VS_TO_TW, "timeout_timewait" }, - { NET_IPV4_VS_TO_CL, "timeout_close" }, - { NET_IPV4_VS_TO_CW, "timeout_closewait" }, - { NET_IPV4_VS_TO_LA, "timeout_lastack" }, - { NET_IPV4_VS_TO_LI, "timeout_listen" }, - { NET_IPV4_VS_TO_SA, "timeout_synack" }, - { NET_IPV4_VS_TO_UDP, "timeout_udp" }, - { NET_IPV4_VS_TO_ICMP, "timeout_icmp" }, - { NET_IPV4_VS_CACHE_BYPASS, "cache_bypass" }, - { NET_IPV4_VS_EXPIRE_NODEST_CONN, "expire_nodest_conn" }, - { NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE, "expire_quiescent_template" }, - { NET_IPV4_VS_SYNC_THRESHOLD, "sync_threshold" }, - { NET_IPV4_VS_NAT_ICMP_SEND, "nat_icmp_send" }, - { NET_IPV4_VS_LBLC_EXPIRE, "lblc_expiration" }, - { NET_IPV4_VS_LBLCR_EXPIRE, "lblcr_expiration" }, - {} -}; - static struct trans_ctl_table trans_net_neigh_vars_table[] = { { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" }, { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" }, @@ -341,7 +308,6 @@ static struct trans_ctl_table trans_net_ipv4_table[] = { { NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table }, /* NET_IPV4_FIB_HASH unused */ { NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table }, - { NET_IPV4_VS, "vs", trans_net_ipv4_vs_table }, { NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" }, { NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" }, @@ -462,7 +428,7 @@ static struct trans_ctl_table trans_net_netrom_table[] = { {} }; -static struct trans_ctl_table trans_net_ax25_table[] = { +static struct trans_ctl_table trans_net_ax25_param_table[] = { { NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" }, { NET_AX25_DEFAULT_MODE, "ax25_default_mode" }, { NET_AX25_BACKOFF_TYPE, "backoff_type" }, @@ -480,6 +446,11 @@ static struct trans_ctl_table trans_net_ax25_table[] = { {} }; +static struct trans_ctl_table trans_net_ax25_table[] = { + { 0, NULL, trans_net_ax25_param_table }, + {} +}; + static struct trans_ctl_table trans_net_bridge_table[] = { { NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" }, { NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" }, @@ -1219,16 +1190,6 @@ static struct trans_ctl_table trans_arlan_table[] = { {} }; -static struct trans_ctl_table trans_appldata_table[] = { - { CTL_APPLDATA_TIMER, "timer" }, - { CTL_APPLDATA_INTERVAL, "interval" }, - { CTL_APPLDATA_OS, "os" }, - { CTL_APPLDATA_NET_SUM, "net_sum" }, - { CTL_APPLDATA_MEM, "mem" }, - {} - -}; - static struct trans_ctl_table trans_s390dbf_table[] = { { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" }, { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" }, @@ -1273,7 +1234,6 @@ static struct trans_ctl_table trans_root_table[] = { { CTL_ABI, "abi" }, /* CTL_CPU not used */ { CTL_ARLAN, "arlan", trans_arlan_table }, - { CTL_APPLDATA, "appldata", trans_appldata_table }, { CTL_S390DBF, "s390dbf", trans_s390dbf_table }, { CTL_SUNRPC, "sunrpc", trans_sunrpc_table }, { CTL_PM, "pm", trans_pm_table }, diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 822beebe664..5fb139fef9f 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -78,6 +78,11 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires, unsigned long long clc; int64_t delta; + if (unlikely(expires.tv64 < 0)) { + WARN_ON_ONCE(1); + return -ETIME; + } + delta = ktime_to_ns(ktime_sub(expires, now)); if (delta <= 0) diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 14a2ecf2b31..e64efaf957e 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -249,10 +249,12 @@ int do_adjtimex(struct timex *txc) /* Now we validate the data before disabling interrupts */ - if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) + if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) { /* singleshot must not be used with any other mode bits */ - if (txc->modes != ADJ_OFFSET_SINGLESHOT) + if (txc->modes != ADJ_OFFSET_SINGLESHOT && + txc->modes != ADJ_OFFSET_SS_READ) return -EINVAL; + } if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET)) /* adjustment Offset limited to +- .512 seconds */ @@ -372,7 +374,8 @@ int do_adjtimex(struct timex *txc) leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0) result = TIME_ERROR; - if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) + if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || + (txc->modes == ADJ_OFFSET_SS_READ)) txc->offset = save_adjust; else txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) * diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index aa82d7bf478..5b86698faa0 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -384,45 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc) } /* - * Reprogram the broadcast device: - * - * Called with tick_broadcast_lock held and interrupts disabled. - */ -static int tick_broadcast_reprogram(void) -{ - ktime_t expires = { .tv64 = KTIME_MAX }; - struct tick_device *td; - int cpu; - - /* - * Find the event which expires next: - */ - for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS; - cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) { - td = &per_cpu(tick_cpu_device, cpu); - if (td->evtdev->next_event.tv64 < expires.tv64) - expires = td->evtdev->next_event; - } - - if (expires.tv64 == KTIME_MAX) - return 0; - - return tick_broadcast_set_event(expires, 0); -} - -/* * Handle oneshot mode broadcasting */ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) { struct tick_device *td; cpumask_t mask; - ktime_t now; + ktime_t now, next_event; int cpu; spin_lock(&tick_broadcast_lock); again: dev->next_event.tv64 = KTIME_MAX; + next_event.tv64 = KTIME_MAX; mask = CPU_MASK_NONE; now = ktime_get(); /* Find all expired events */ @@ -431,19 +405,31 @@ again: td = &per_cpu(tick_cpu_device, cpu); if (td->evtdev->next_event.tv64 <= now.tv64) cpu_set(cpu, mask); + else if (td->evtdev->next_event.tv64 < next_event.tv64) + next_event.tv64 = td->evtdev->next_event.tv64; } /* - * Wakeup the cpus which have an expired event. The broadcast - * device is reprogrammed in the return from idle code. + * Wakeup the cpus which have an expired event. + */ + tick_do_broadcast(mask); + + /* + * Two reasons for reprogram: + * + * - The global event did not expire any CPU local + * events. This happens in dyntick mode, as the maximum PIT + * delta is quite small. + * + * - There are pending events on sleeping CPUs which were not + * in the event mask */ - if (!tick_do_broadcast(mask)) { + if (next_event.tv64 != KTIME_MAX) { /* - * The global event did not expire any CPU local - * events. This happens in dyntick mode, as the - * maximum PIT delta is quite small. + * Rearm the broadcast device. If event expired, + * repeat the above */ - if (tick_broadcast_reprogram()) + if (tick_broadcast_set_event(next_event, 0)) goto again; } spin_unlock(&tick_broadcast_lock); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 27a2338deb4..cb89fa8db11 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -133,6 +133,8 @@ void tick_nohz_update_jiffies(void) if (!ts->tick_stopped) return; + touch_softlockup_watchdog(); + cpu_clear(cpu, nohz_cpu_mask); now = ktime_get(); diff --git a/kernel/timer.c b/kernel/timer.c index a05817c021d..d4527dcef1a 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -1219,11 +1219,11 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info) */ static struct lock_class_key base_lock_keys[NR_CPUS]; -static int __devinit init_timers_cpu(int cpu) +static int __cpuinit init_timers_cpu(int cpu) { int j; tvec_base_t *base; - static char __devinitdata tvec_base_done[NR_CPUS]; + static char __cpuinitdata tvec_base_done[NR_CPUS]; if (!tvec_base_done[cpu]) { static char boot_done; diff --git a/kernel/user.c b/kernel/user.c index 0f3aa023410..8320a87f3e5 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -337,8 +337,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) struct user_struct *new; new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); - if (!new) + if (!new) { + uids_mutex_unlock(); return NULL; + } + new->uid = uid; atomic_set(&new->__count, 1); atomic_set(&new->processes, 0); @@ -355,6 +358,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) if (alloc_uid_keyring(new, current) < 0) { kmem_cache_free(uid_cachep, new); + uids_mutex_unlock(); return NULL; } @@ -362,6 +366,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) key_put(new->uid_keyring); key_put(new->session_keyring); kmem_cache_free(uid_cachep, new); + uids_mutex_unlock(); return NULL; } diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index c76c06466bf..fe3a56c2256 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c @@ -18,6 +18,10 @@ static void *get_uts(ctl_table *table, int write) { char *which = table->data; + struct uts_namespace *uts_ns; + + uts_ns = current->nsproxy->uts_ns; + which = (which - (char *)&init_uts_ns) + (char *)uts_ns; if (!write) down_read(&uts_sem); |