diff options
Diffstat (limited to 'kernel')
63 files changed, 8599 insertions, 2496 deletions
diff --git a/kernel/Kconfig.instrumentation b/kernel/Kconfig.instrumentation new file mode 100644 index 00000000000..f5f2c769d95 --- /dev/null +++ b/kernel/Kconfig.instrumentation @@ -0,0 +1,49 @@ +menuconfig INSTRUMENTATION + bool "Instrumentation Support" + default y + ---help--- + Say Y here to get to see options related to performance measurement, + system-wide debugging, and testing. This option alone does not add any + kernel code. + + If you say N, all options in this submenu will be skipped and + disabled. If you're trying to debug the kernel itself, go see the + Kernel Hacking menu. + +if INSTRUMENTATION + +config PROFILING + bool "Profiling support (EXPERIMENTAL)" + help + Say Y here to enable the extended profiling support mechanisms used + by profilers such as OProfile. + +config OPROFILE + tristate "OProfile system profiling (EXPERIMENTAL)" + depends on PROFILING + depends on ALPHA || ARM || BLACKFIN || X86_32 || IA64 || M32R || MIPS || PARISC || PPC || S390 || SUPERH || SPARC || X86_64 + help + OProfile is a profiling system capable of profiling the + whole system, include the kernel, kernel modules, libraries, + and applications. + + If unsure, say N. + +config KPROBES + bool "Kprobes" + depends on KALLSYMS && MODULES + depends on X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32 + help + Kprobes allows you to trap at almost any kernel address and + execute a callback function. register_kprobe() establishes + a probepoint and specifies the callback. Kprobes is useful + for kernel debugging, non-intrusive instrumentation and testing. + If in doubt, say "N". + +config MARKERS + bool "Activate markers" + help + Place an empty function call at each marker site. Can be + dynamically changed for a probe function. + +endif # INSTRUMENTATION diff --git a/kernel/Makefile b/kernel/Makefile index 2a999836ca1..05c3e6df859 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -8,8 +8,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ signal.o sys.o kmod.o workqueue.o pid.o \ rcupdate.o extable.o params.o posix-timers.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ - hrtimer.o rwsem.o latency.o nsproxy.o srcu.o die_notifier.o \ - utsname.o + hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \ + utsname.o sysctl_check.o notifier.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-y += time/ @@ -36,7 +36,11 @@ obj-$(CONFIG_PM) += power/ obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o obj-$(CONFIG_KEXEC) += kexec.o obj-$(CONFIG_COMPAT) += compat.o +obj-$(CONFIG_CGROUPS) += cgroup.o +obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o obj-$(CONFIG_CPUSETS) += cpuset.o +obj-$(CONFIG_CGROUP_CPUACCT) += cpu_acct.o +obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o obj-$(CONFIG_IKCONFIG) += configs.o obj-$(CONFIG_STOP_MACHINE) += stop_machine.o obj-$(CONFIG_AUDIT) += audit.o auditfilter.o @@ -51,6 +55,7 @@ obj-$(CONFIG_RELAY) += relay.o obj-$(CONFIG_SYSCTL) += utsname_sysctl.o obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o +obj-$(CONFIG_MARKERS) += marker.o ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is diff --git a/kernel/acct.c b/kernel/acct.c index 24f0f8b2ba7..fce53d8df8a 100644 --- a/kernel/acct.c +++ b/kernel/acct.c @@ -329,16 +329,16 @@ static comp_t encode_comp_t(unsigned long value) } /* - * If we need to round up, do it (and handle overflow correctly). - */ + * If we need to round up, do it (and handle overflow correctly). + */ if (rnd && (++value > MAXFRACT)) { value >>= EXPSIZE; exp++; } /* - * Clean it up and polish it off. - */ + * Clean it up and polish it off. + */ exp <<= MANTSIZE; /* Shift the exponent into place */ exp += value; /* and add on the mantissa. */ return exp; @@ -361,30 +361,30 @@ static comp_t encode_comp_t(unsigned long value) static comp2_t encode_comp2_t(u64 value) { - int exp, rnd; - - exp = (value > (MAXFRACT2>>1)); - rnd = 0; - while (value > MAXFRACT2) { - rnd = value & 1; - value >>= 1; - exp++; - } - - /* - * If we need to round up, do it (and handle overflow correctly). - */ - if (rnd && (++value > MAXFRACT2)) { - value >>= 1; - exp++; - } - - if (exp > MAXEXP2) { - /* Overflow. Return largest representable number instead. */ - return (1ul << (MANTSIZE2+EXPSIZE2-1)) - 1; - } else { - return (value & (MAXFRACT2>>1)) | (exp << (MANTSIZE2-1)); - } + int exp, rnd; + + exp = (value > (MAXFRACT2>>1)); + rnd = 0; + while (value > MAXFRACT2) { + rnd = value & 1; + value >>= 1; + exp++; + } + + /* + * If we need to round up, do it (and handle overflow correctly). + */ + if (rnd && (++value > MAXFRACT2)) { + value >>= 1; + exp++; + } + + if (exp > MAXEXP2) { + /* Overflow. Return largest representable number instead. */ + return (1ul << (MANTSIZE2+EXPSIZE2-1)) - 1; + } else { + return (value & (MAXFRACT2>>1)) | (exp << (MANTSIZE2-1)); + } } #endif @@ -501,14 +501,14 @@ static void do_acct_process(struct file *file) ac.ac_swaps = encode_comp_t(0); /* - * Kernel segment override to datasegment and write it - * to the accounting file. - */ + * Kernel segment override to datasegment and write it + * to the accounting file. + */ fs = get_fs(); set_fs(KERNEL_DS); /* - * Accounting records are not subject to resource limits. - */ + * Accounting records are not subject to resource limits. + */ flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; file->f_op->write(file, (char *)&ac, diff --git a/kernel/audit.c b/kernel/audit.c index 2924251a654..6977ea57a7e 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -664,11 +664,11 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (sid) { if (selinux_sid_to_string( sid, &ctx, &len)) { - audit_log_format(ab, + audit_log_format(ab, " ssid=%u", sid); /* Maybe call audit_panic? */ } else - audit_log_format(ab, + audit_log_format(ab, " subj=%s", ctx); kfree(ctx); } @@ -769,7 +769,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) sig_data->pid = audit_sig_pid; memcpy(sig_data->ctx, ctx, len); kfree(ctx); - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, + audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, 0, 0, sig_data, sizeof(*sig_data) + len); kfree(sig_data); break; @@ -1005,7 +1005,7 @@ unsigned int audit_serial(void) return ret; } -static inline void audit_get_stamp(struct audit_context *ctx, +static inline void audit_get_stamp(struct audit_context *ctx, struct timespec *t, unsigned int *serial) { if (ctx) @@ -1056,7 +1056,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, if (gfp_mask & __GFP_WAIT) reserve = 0; else - reserve = 5; /* Allow atomic callers to go up to five + reserve = 5; /* Allow atomic callers to go up to five entries over the normal backlog limit */ while (audit_backlog_limit @@ -1319,7 +1319,7 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix, if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */ /* FIXME: can we save some information here? */ audit_log_format(ab, "<too long>"); - } else + } else audit_log_untrustedstring(ab, p); kfree(path); } @@ -1365,7 +1365,7 @@ void audit_log_end(struct audit_buffer *ab) * audit_log_vformat, and audit_log_end. It may be called * in any context. */ -void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, +void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, const char *fmt, ...) { struct audit_buffer *ab; diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 359645cff5b..df66a21fb36 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -1498,7 +1498,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data, * auditctl to read from it... which isn't ever going to * happen if we're actually running in the context of auditctl * trying to _send_ the stuff */ - + dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); if (!dest) return -ENOMEM; @@ -1678,7 +1678,7 @@ int audit_filter_type(int type) { struct audit_entry *e; int result = 0; - + rcu_read_lock(); if (list_empty(&audit_filter_list[AUDIT_FILTER_TYPE])) goto unlock_and_return; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 938e60a6188..e19b5a33aed 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -320,7 +320,7 @@ static int audit_filter_rules(struct task_struct *tsk, result = audit_comparator(tsk->personality, f->op, f->val); break; case AUDIT_ARCH: - if (ctx) + if (ctx) result = audit_comparator(ctx->arch, f->op, f->val); break; @@ -898,7 +898,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts if (context->personality != PER_LINUX) audit_log_format(ab, " per=%lx", context->personality); if (context->return_valid) - audit_log_format(ab, " success=%s exit=%ld", + audit_log_format(ab, " success=%s exit=%ld", (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", context->return_code); @@ -1135,8 +1135,8 @@ void audit_free(struct task_struct *tsk) return; /* Check for system calls that do not go through the exit - * function (e.g., exit_group), then free context block. - * We use GFP_ATOMIC here because we might be doing this + * function (e.g., exit_group), then free context block. + * We use GFP_ATOMIC here because we might be doing this * in the context of the idle thread */ /* that can happen only if we are called from do_exit() */ if (context->in_syscall && context->auditable) @@ -1316,7 +1316,7 @@ void __audit_getname(const char *name) context->pwdmnt = mntget(current->fs->pwdmnt); read_unlock(¤t->fs->lock); } - + } /* audit_putname - intercept a putname request diff --git a/kernel/capability.c b/kernel/capability.c index 4e350a36ed6..efbd9cdce13 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -3,20 +3,18 @@ * * Copyright (C) 1997 Andrew Main <zefram@fysh.org> * - * Integrated into 2.1.97+, Andrew G. Morgan <morgan@transmeta.com> + * Integrated into 2.1.97+, Andrew G. Morgan <morgan@kernel.org> * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> - */ + */ #include <linux/capability.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/security.h> #include <linux/syscalls.h> +#include <linux/pid_namespace.h> #include <asm/uaccess.h> -unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */ -kernel_cap_t cap_bset = CAP_INIT_EFF_SET; - /* * This lock protects task->cap_* for all tasks including current. * Locking rule: acquire this prior to tasklist_lock. @@ -40,49 +38,49 @@ static DEFINE_SPINLOCK(task_capability_lock); */ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) { - int ret = 0; - pid_t pid; - __u32 version; - struct task_struct *target; - struct __user_cap_data_struct data; - - if (get_user(version, &header->version)) - return -EFAULT; - - if (version != _LINUX_CAPABILITY_VERSION) { - if (put_user(_LINUX_CAPABILITY_VERSION, &header->version)) - return -EFAULT; - return -EINVAL; - } + int ret = 0; + pid_t pid; + __u32 version; + struct task_struct *target; + struct __user_cap_data_struct data; + + if (get_user(version, &header->version)) + return -EFAULT; + + if (version != _LINUX_CAPABILITY_VERSION) { + if (put_user(_LINUX_CAPABILITY_VERSION, &header->version)) + return -EFAULT; + return -EINVAL; + } - if (get_user(pid, &header->pid)) - return -EFAULT; + if (get_user(pid, &header->pid)) + return -EFAULT; - if (pid < 0) - return -EINVAL; + if (pid < 0) + return -EINVAL; - spin_lock(&task_capability_lock); - read_lock(&tasklist_lock); + spin_lock(&task_capability_lock); + read_lock(&tasklist_lock); - if (pid && pid != current->pid) { - target = find_task_by_pid(pid); - if (!target) { - ret = -ESRCH; - goto out; - } - } else - target = current; + if (pid && pid != task_pid_vnr(current)) { + target = find_task_by_vpid(pid); + if (!target) { + ret = -ESRCH; + goto out; + } + } else + target = current; - ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted); + ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted); out: - read_unlock(&tasklist_lock); - spin_unlock(&task_capability_lock); + read_unlock(&tasklist_lock); + spin_unlock(&task_capability_lock); - if (!ret && copy_to_user(dataptr, &data, sizeof data)) - return -EFAULT; + if (!ret && copy_to_user(dataptr, &data, sizeof data)) + return -EFAULT; - return ret; + return ret; } /* @@ -98,7 +96,7 @@ static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective, int found = 0; struct pid *pgrp; - pgrp = find_pid(pgrp_nr); + pgrp = find_vpid(pgrp_nr); do_each_pid_task(pgrp, PIDTYPE_PGID, g) { target = g; while_each_thread(g, target) { @@ -115,7 +113,7 @@ static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective, } while_each_pid_task(pgrp, PIDTYPE_PGID, g); if (!found) - ret = 0; + ret = 0; return ret; } @@ -132,7 +130,7 @@ static inline int cap_set_all(kernel_cap_t *effective, int found = 0; do_each_thread(g, target) { - if (target == current || is_init(target)) + if (target == current || is_container_init(target->group_leader)) continue; found = 1; if (security_capset_check(target, effective, inheritable, @@ -169,68 +167,68 @@ static inline int cap_set_all(kernel_cap_t *effective, */ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) { - kernel_cap_t inheritable, permitted, effective; - __u32 version; - struct task_struct *target; - int ret; - pid_t pid; - - if (get_user(version, &header->version)) - return -EFAULT; - - if (version != _LINUX_CAPABILITY_VERSION) { - if (put_user(_LINUX_CAPABILITY_VERSION, &header->version)) - return -EFAULT; - return -EINVAL; - } - - if (get_user(pid, &header->pid)) - return -EFAULT; - - if (pid && pid != current->pid && !capable(CAP_SETPCAP)) - return -EPERM; - - if (copy_from_user(&effective, &data->effective, sizeof(effective)) || - copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) || - copy_from_user(&permitted, &data->permitted, sizeof(permitted))) - return -EFAULT; - - spin_lock(&task_capability_lock); - read_lock(&tasklist_lock); - - if (pid > 0 && pid != current->pid) { - target = find_task_by_pid(pid); - if (!target) { - ret = -ESRCH; - goto out; - } - } else - target = current; - - ret = 0; - - /* having verified that the proposed changes are legal, - we now put them into effect. */ - if (pid < 0) { - if (pid == -1) /* all procs other than current and init */ - ret = cap_set_all(&effective, &inheritable, &permitted); - - else /* all procs in process group */ - ret = cap_set_pg(-pid, &effective, &inheritable, - &permitted); - } else { - ret = security_capset_check(target, &effective, &inheritable, - &permitted); - if (!ret) - security_capset_set(target, &effective, &inheritable, - &permitted); - } + kernel_cap_t inheritable, permitted, effective; + __u32 version; + struct task_struct *target; + int ret; + pid_t pid; + + if (get_user(version, &header->version)) + return -EFAULT; + + if (version != _LINUX_CAPABILITY_VERSION) { + if (put_user(_LINUX_CAPABILITY_VERSION, &header->version)) + return -EFAULT; + return -EINVAL; + } + + if (get_user(pid, &header->pid)) + return -EFAULT; + + if (pid && pid != task_pid_vnr(current) && !capable(CAP_SETPCAP)) + return -EPERM; + + if (copy_from_user(&effective, &data->effective, sizeof(effective)) || + copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) || + copy_from_user(&permitted, &data->permitted, sizeof(permitted))) + return -EFAULT; + + spin_lock(&task_capability_lock); + read_lock(&tasklist_lock); + + if (pid > 0 && pid != task_pid_vnr(current)) { + target = find_task_by_vpid(pid); + if (!target) { + ret = -ESRCH; + goto out; + } + } else + target = current; + + ret = 0; + + /* having verified that the proposed changes are legal, + we now put them into effect. */ + if (pid < 0) { + if (pid == -1) /* all procs other than current and init */ + ret = cap_set_all(&effective, &inheritable, &permitted); + + else /* all procs in process group */ + ret = cap_set_pg(-pid, &effective, &inheritable, + &permitted); + } else { + ret = security_capset_check(target, &effective, &inheritable, + &permitted); + if (!ret) + security_capset_set(target, &effective, &inheritable, + &permitted); + } out: - read_unlock(&tasklist_lock); - spin_unlock(&task_capability_lock); + read_unlock(&tasklist_lock); + spin_unlock(&task_capability_lock); - return ret; + return ret; } int __capable(struct task_struct *t, int cap) diff --git a/kernel/cgroup.c b/kernel/cgroup.c new file mode 100644 index 00000000000..5987dccdb2a --- /dev/null +++ b/kernel/cgroup.c @@ -0,0 +1,2805 @@ +/* + * kernel/cgroup.c + * + * Generic process-grouping system. + * + * Based originally on the cpuset system, extracted by Paul Menage + * Copyright (C) 2006 Google, Inc + * + * Copyright notices from the original cpuset code: + * -------------------------------------------------- + * Copyright (C) 2003 BULL SA. + * Copyright (C) 2004-2006 Silicon Graphics, Inc. + * + * Portions derived from Patrick Mochel's sysfs code. + * sysfs is Copyright (c) 2001-3 Patrick Mochel + * + * 2003-10-10 Written by Simon Derr. + * 2003-10-22 Updates by Stephen Hemminger. + * 2004 May-July Rework by Paul Jackson. + * --------------------------------------------------- + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of the Linux + * distribution for more details. + */ + +#include <linux/cgroup.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/mutex.h> +#include <linux/mount.h> +#include <linux/pagemap.h> +#include <linux/proc_fs.h> +#include <linux/rcupdate.h> +#include <linux/sched.h> +#include <linux/backing-dev.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/magic.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/sort.h> +#include <linux/kmod.h> +#include <linux/delayacct.h> +#include <linux/cgroupstats.h> + +#include <asm/atomic.h> + +static DEFINE_MUTEX(cgroup_mutex); + +/* Generate an array of cgroup subsystem pointers */ +#define SUBSYS(_x) &_x ## _subsys, + +static struct cgroup_subsys *subsys[] = { +#include <linux/cgroup_subsys.h> +}; + +/* + * A cgroupfs_root represents the root of a cgroup hierarchy, + * and may be associated with a superblock to form an active + * hierarchy + */ +struct cgroupfs_root { + struct super_block *sb; + + /* + * The bitmask of subsystems intended to be attached to this + * hierarchy + */ + unsigned long subsys_bits; + + /* The bitmask of subsystems currently attached to this hierarchy */ + unsigned long actual_subsys_bits; + + /* A list running through the attached subsystems */ + struct list_head subsys_list; + + /* The root cgroup for this hierarchy */ + struct cgroup top_cgroup; + + /* Tracks how many cgroups are currently defined in hierarchy.*/ + int number_of_cgroups; + + /* A list running through the mounted hierarchies */ + struct list_head root_list; + + /* Hierarchy-specific flags */ + unsigned long flags; + + /* The path to use for release notifications. No locking + * between setting and use - so if userspace updates this + * while child cgroups exist, you could miss a + * notification. We ensure that it's always a valid + * NUL-terminated string */ + char release_agent_path[PATH_MAX]; +}; + + +/* + * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the + * subsystems that are otherwise unattached - it never has more than a + * single cgroup, and all tasks are part of that cgroup. + */ +static struct cgroupfs_root rootnode; + +/* The list of hierarchy roots */ + +static LIST_HEAD(roots); +static int root_count; + +/* dummytop is a shorthand for the dummy hierarchy's top cgroup */ +#define dummytop (&rootnode.top_cgroup) + +/* This flag indicates whether tasks in the fork and exit paths should + * take callback_mutex and check for fork/exit handlers to call. This + * avoids us having to do extra work in the fork/exit path if none of the + * subsystems need to be called. + */ +static int need_forkexit_callback; + +/* bits in struct cgroup flags field */ +enum { + /* Control Group is dead */ + CGRP_REMOVED, + /* Control Group has previously had a child cgroup or a task, + * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ + CGRP_RELEASABLE, + /* Control Group requires release notifications to userspace */ + CGRP_NOTIFY_ON_RELEASE, +}; + +/* convenient tests for these bits */ +inline int cgroup_is_removed(const struct cgroup *cgrp) +{ + return test_bit(CGRP_REMOVED, &cgrp->flags); +} + +/* bits in struct cgroupfs_root flags field */ +enum { + ROOT_NOPREFIX, /* mounted subsystems have no named prefix */ +}; + +inline int cgroup_is_releasable(const struct cgroup *cgrp) +{ + const int bits = + (1 << CGRP_RELEASABLE) | + (1 << CGRP_NOTIFY_ON_RELEASE); + return (cgrp->flags & bits) == bits; +} + +inline int notify_on_release(const struct cgroup *cgrp) +{ + return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); +} + +/* + * for_each_subsys() allows you to iterate on each subsystem attached to + * an active hierarchy + */ +#define for_each_subsys(_root, _ss) \ +list_for_each_entry(_ss, &_root->subsys_list, sibling) + +/* for_each_root() allows you to iterate across the active hierarchies */ +#define for_each_root(_root) \ +list_for_each_entry(_root, &roots, root_list) + +/* the list of cgroups eligible for automatic release. Protected by + * release_list_lock */ +static LIST_HEAD(release_list); +static DEFINE_SPINLOCK(release_list_lock); +static void cgroup_release_agent(struct work_struct *work); +static DECLARE_WORK(release_agent_work, cgroup_release_agent); +static void check_for_release(struct cgroup *cgrp); + +/* Link structure for associating css_set objects with cgroups */ +struct cg_cgroup_link { + /* + * List running through cg_cgroup_links associated with a + * cgroup, anchored on cgroup->css_sets + */ + struct list_head cgrp_link_list; + /* + * List running through cg_cgroup_links pointing at a + * single css_set object, anchored on css_set->cg_links + */ + struct list_head cg_link_list; + struct css_set *cg; +}; + +/* The default css_set - used by init and its children prior to any + * hierarchies being mounted. It contains a pointer to the root state + * for each subsystem. Also used to anchor the list of css_sets. Not + * reference-counted, to improve performance when child cgroups + * haven't been created. + */ + +static struct css_set init_css_set; +static struct cg_cgroup_link init_css_set_link; + +/* css_set_lock protects the list of css_set objects, and the + * chain of tasks off each css_set. Nests outside task->alloc_lock + * due to cgroup_iter_start() */ +static DEFINE_RWLOCK(css_set_lock); +static int css_set_count; + +/* We don't maintain the lists running through each css_set to its + * task until after the first call to cgroup_iter_start(). This + * reduces the fork()/exit() overhead for people who have cgroups + * compiled into their kernel but not actually in use */ +static int use_task_css_set_links; + +/* When we create or destroy a css_set, the operation simply + * takes/releases a reference count on all the cgroups referenced + * by subsystems in this css_set. This can end up multiple-counting + * some cgroups, but that's OK - the ref-count is just a + * busy/not-busy indicator; ensuring that we only count each cgroup + * once would require taking a global lock to ensure that no + * subsystems moved between hierarchies while we were doing so. + * + * Possible TODO: decide at boot time based on the number of + * registered subsystems and the number of CPUs or NUMA nodes whether + * it's better for performance to ref-count every subsystem, or to + * take a global lock and only add one ref count to each hierarchy. + */ + +/* + * unlink a css_set from the list and free it + */ +static void unlink_css_set(struct css_set *cg) +{ + write_lock(&css_set_lock); + list_del(&cg->list); + css_set_count--; + while (!list_empty(&cg->cg_links)) { + struct cg_cgroup_link *link; + link = list_entry(cg->cg_links.next, + struct cg_cgroup_link, cg_link_list); + list_del(&link->cg_link_list); + list_del(&link->cgrp_link_list); + kfree(link); + } + write_unlock(&css_set_lock); +} + +static void __release_css_set(struct kref *k, int taskexit) +{ + int i; + struct css_set *cg = container_of(k, struct css_set, ref); + + unlink_css_set(cg); + + rcu_read_lock(); + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup *cgrp = cg->subsys[i]->cgroup; + if (atomic_dec_and_test(&cgrp->count) && + notify_on_release(cgrp)) { + if (taskexit) + set_bit(CGRP_RELEASABLE, &cgrp->flags); + check_for_release(cgrp); + } + } + rcu_read_unlock(); + kfree(cg); +} + +static void release_css_set(struct kref *k) +{ + __release_css_set(k, 0); +} + +static void release_css_set_taskexit(struct kref *k) +{ + __release_css_set(k, 1); +} + +/* + * refcounted get/put for css_set objects + */ +static inline void get_css_set(struct css_set *cg) +{ + kref_get(&cg->ref); +} + +static inline void put_css_set(struct css_set *cg) +{ + kref_put(&cg->ref, release_css_set); +} + +static inline void put_css_set_taskexit(struct css_set *cg) +{ + kref_put(&cg->ref, release_css_set_taskexit); +} + +/* + * find_existing_css_set() is a helper for + * find_css_set(), and checks to see whether an existing + * css_set is suitable. This currently walks a linked-list for + * simplicity; a later patch will use a hash table for better + * performance + * + * oldcg: the cgroup group that we're using before the cgroup + * transition + * + * cgrp: the cgroup that we're moving into + * + * template: location in which to build the desired set of subsystem + * state objects for the new cgroup group + */ + +static struct css_set *find_existing_css_set( + struct css_set *oldcg, + struct cgroup *cgrp, + struct cgroup_subsys_state *template[]) +{ + int i; + struct cgroupfs_root *root = cgrp->root; + struct list_head *l = &init_css_set.list; + + /* Built the set of subsystem state objects that we want to + * see in the new css_set */ + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + if (root->subsys_bits & (1ull << i)) { + /* Subsystem is in this hierarchy. So we want + * the subsystem state from the new + * cgroup */ + template[i] = cgrp->subsys[i]; + } else { + /* Subsystem is not in this hierarchy, so we + * don't want to change the subsystem state */ + template[i] = oldcg->subsys[i]; + } + } + + /* Look through existing cgroup groups to find one to reuse */ + do { + struct css_set *cg = + list_entry(l, struct css_set, list); + + if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) { + /* All subsystems matched */ + return cg; + } + /* Try the next cgroup group */ + l = l->next; + } while (l != &init_css_set.list); + + /* No existing cgroup group matched */ + return NULL; +} + +/* + * allocate_cg_links() allocates "count" cg_cgroup_link structures + * and chains them on tmp through their cgrp_link_list fields. Returns 0 on + * success or a negative error + */ + +static int allocate_cg_links(int count, struct list_head *tmp) +{ + struct cg_cgroup_link *link; + int i; + INIT_LIST_HEAD(tmp); + for (i = 0; i < count; i++) { + link = kmalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + while (!list_empty(tmp)) { + link = list_entry(tmp->next, + struct cg_cgroup_link, + cgrp_link_list); + list_del(&link->cgrp_link_list); + kfree(link); + } + return -ENOMEM; + } + list_add(&link->cgrp_link_list, tmp); + } + return 0; +} + +static void free_cg_links(struct list_head *tmp) +{ + while (!list_empty(tmp)) { + struct cg_cgroup_link *link; + link = list_entry(tmp->next, + struct cg_cgroup_link, + cgrp_link_list); + list_del(&link->cgrp_link_list); + kfree(link); + } +} + +/* + * find_css_set() takes an existing cgroup group and a + * cgroup object, and returns a css_set object that's + * equivalent to the old group, but with the given cgroup + * substituted into the appropriate hierarchy. Must be called with + * cgroup_mutex held + */ + +static struct css_set *find_css_set( + struct css_set *oldcg, struct cgroup *cgrp) +{ + struct css_set *res; + struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; + int i; + + struct list_head tmp_cg_links; + struct cg_cgroup_link *link; + + /* First see if we already have a cgroup group that matches + * the desired set */ + write_lock(&css_set_lock); + res = find_existing_css_set(oldcg, cgrp, template); + if (res) + get_css_set(res); + write_unlock(&css_set_lock); + + if (res) + return res; + + res = kmalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return NULL; + + /* Allocate all the cg_cgroup_link objects that we'll need */ + if (allocate_cg_links(root_count, &tmp_cg_links) < 0) { + kfree(res); + return NULL; + } + + kref_init(&res->ref); + INIT_LIST_HEAD(&res->cg_links); + INIT_LIST_HEAD(&res->tasks); + + /* Copy the set of subsystem state objects generated in + * find_existing_css_set() */ + memcpy(res->subsys, template, sizeof(res->subsys)); + + write_lock(&css_set_lock); + /* Add reference counts and links from the new css_set. */ + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup *cgrp = res->subsys[i]->cgroup; + struct cgroup_subsys *ss = subsys[i]; + atomic_inc(&cgrp->count); + /* + * We want to add a link once per cgroup, so we + * only do it for the first subsystem in each + * hierarchy + */ + if (ss->root->subsys_list.next == &ss->sibling) { + BUG_ON(list_empty(&tmp_cg_links)); + link = list_entry(tmp_cg_links.next, + struct cg_cgroup_link, + cgrp_link_list); + list_del(&link->cgrp_link_list); + list_add(&link->cgrp_link_list, &cgrp->css_sets); + link->cg = res; + list_add(&link->cg_link_list, &res->cg_links); + } + } + if (list_empty(&rootnode.subsys_list)) { + link = list_entry(tmp_cg_links.next, + struct cg_cgroup_link, + cgrp_link_list); + list_del(&link->cgrp_link_list); + list_add(&link->cgrp_link_list, &dummytop->css_sets); + link->cg = res; + list_add(&link->cg_link_list, &res->cg_links); + } + + BUG_ON(!list_empty(&tmp_cg_links)); + + /* Link this cgroup group into the list */ + list_add(&res->list, &init_css_set.list); + css_set_count++; + INIT_LIST_HEAD(&res->tasks); + write_unlock(&css_set_lock); + + return res; +} + +/* + * There is one global cgroup mutex. We also require taking + * task_lock() when dereferencing a task's cgroup subsys pointers. + * See "The task_lock() exception", at the end of this comment. + * + * A task must hold cgroup_mutex to modify cgroups. + * + * Any task can increment and decrement the count field without lock. + * So in general, code holding cgroup_mutex can't rely on the count + * field not changing. However, if the count goes to zero, then only + * attach_task() can increment it again. Because a count of zero + * means that no tasks are currently attached, therefore there is no + * way a task attached to that cgroup can fork (the other way to + * increment the count). So code holding cgroup_mutex can safely + * assume that if the count is zero, it will stay zero. Similarly, if + * a task holds cgroup_mutex on a cgroup with zero count, it + * knows that the cgroup won't be removed, as cgroup_rmdir() + * needs that mutex. + * + * The cgroup_common_file_write handler for operations that modify + * the cgroup hierarchy holds cgroup_mutex across the entire operation, + * single threading all such cgroup modifications across the system. + * + * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't + * (usually) take cgroup_mutex. These are the two most performance + * critical pieces of code here. The exception occurs on cgroup_exit(), + * when a task in a notify_on_release cgroup exits. Then cgroup_mutex + * is taken, and if the cgroup count is zero, a usermode call made + * to /sbin/cgroup_release_agent with the name of the cgroup (path + * relative to the root of cgroup file system) as the argument. + * + * A cgroup can only be deleted if both its 'count' of using tasks + * is zero, and its list of 'children' cgroups is empty. Since all + * tasks in the system use _some_ cgroup, and since there is always at + * least one task in the system (init, pid == 1), therefore, top_cgroup + * always has either children cgroups and/or using tasks. So we don't + * need a special hack to ensure that top_cgroup cannot be deleted. + * + * The task_lock() exception + * + * The need for this exception arises from the action of + * attach_task(), which overwrites one tasks cgroup pointer with + * another. It does so using cgroup_mutexe, however there are + * several performance critical places that need to reference + * task->cgroup without the expense of grabbing a system global + * mutex. Therefore except as noted below, when dereferencing or, as + * in attach_task(), modifying a task'ss cgroup pointer we use + * task_lock(), which acts on a spinlock (task->alloc_lock) already in + * the task_struct routinely used for such matters. + * + * P.S. One more locking exception. RCU is used to guard the + * update of a tasks cgroup pointer by attach_task() + */ + +/** + * cgroup_lock - lock out any changes to cgroup structures + * + */ + +void cgroup_lock(void) +{ + mutex_lock(&cgroup_mutex); +} + +/** + * cgroup_unlock - release lock on cgroup changes + * + * Undo the lock taken in a previous cgroup_lock() call. + */ + +void cgroup_unlock(void) +{ + mutex_unlock(&cgroup_mutex); +} + +/* + * A couple of forward declarations required, due to cyclic reference loop: + * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir -> + * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations + * -> cgroup_mkdir. + */ + +static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode); +static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry); +static int cgroup_populate_dir(struct cgroup *cgrp); +static struct inode_operations cgroup_dir_inode_operations; +static struct file_operations proc_cgroupstats_operations; + +static struct backing_dev_info cgroup_backing_dev_info = { + .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, +}; + +static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) +{ + struct inode *inode = new_inode(sb); + + if (inode) { + inode->i_mode = mode; + inode->i_uid = current->fsuid; + inode->i_gid = current->fsgid; + inode->i_blocks = 0; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; + } + return inode; +} + +static void cgroup_diput(struct dentry *dentry, struct inode *inode) +{ + /* is dentry a directory ? if so, kfree() associated cgroup */ + if (S_ISDIR(inode->i_mode)) { + struct cgroup *cgrp = dentry->d_fsdata; + BUG_ON(!(cgroup_is_removed(cgrp))); + /* It's possible for external users to be holding css + * reference counts on a cgroup; css_put() needs to + * be able to access the cgroup after decrementing + * the reference count in order to know if it needs to + * queue the cgroup to be handled by the release + * agent */ + synchronize_rcu(); + kfree(cgrp); + } + iput(inode); +} + +static void remove_dir(struct dentry *d) +{ + struct dentry *parent = dget(d->d_parent); + + d_delete(d); + simple_rmdir(parent->d_inode, d); + dput(parent); +} + +static void cgroup_clear_directory(struct dentry *dentry) +{ + struct list_head *node; + + BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); + spin_lock(&dcache_lock); + node = dentry->d_subdirs.next; + while (node != &dentry->d_subdirs) { + struct dentry *d = list_entry(node, struct dentry, d_u.d_child); + list_del_init(node); + if (d->d_inode) { + /* This should never be called on a cgroup + * directory with child cgroups */ + BUG_ON(d->d_inode->i_mode & S_IFDIR); + d = dget_locked(d); + spin_unlock(&dcache_lock); + d_delete(d); + simple_unlink(dentry->d_inode, d); + dput(d); + spin_lock(&dcache_lock); + } + node = dentry->d_subdirs.next; + } + spin_unlock(&dcache_lock); +} + +/* + * NOTE : the dentry must have been dget()'ed + */ +static void cgroup_d_remove_dir(struct dentry *dentry) +{ + cgroup_clear_directory(dentry); + + spin_lock(&dcache_lock); + list_del_init(&dentry->d_u.d_child); + spin_unlock(&dcache_lock); + remove_dir(dentry); +} + +static int rebind_subsystems(struct cgroupfs_root *root, + unsigned long final_bits) +{ + unsigned long added_bits, removed_bits; + struct cgroup *cgrp = &root->top_cgroup; + int i; + + removed_bits = root->actual_subsys_bits & ~final_bits; + added_bits = final_bits & ~root->actual_subsys_bits; + /* Check that any added subsystems are currently free */ + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + unsigned long long bit = 1ull << i; + struct cgroup_subsys *ss = subsys[i]; + if (!(bit & added_bits)) + continue; + if (ss->root != &rootnode) { + /* Subsystem isn't free */ + return -EBUSY; + } + } + + /* Currently we don't handle adding/removing subsystems when + * any child cgroups exist. This is theoretically supportable + * but involves complex error handling, so it's being left until + * later */ + if (!list_empty(&cgrp->children)) + return -EBUSY; + + /* Process each subsystem */ + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup_subsys *ss = subsys[i]; + unsigned long bit = 1UL << i; + if (bit & added_bits) { + /* We're binding this subsystem to this hierarchy */ + BUG_ON(cgrp->subsys[i]); + BUG_ON(!dummytop->subsys[i]); + BUG_ON(dummytop->subsys[i]->cgroup != dummytop); + cgrp->subsys[i] = dummytop->subsys[i]; + cgrp->subsys[i]->cgroup = cgrp; + list_add(&ss->sibling, &root->subsys_list); + rcu_assign_pointer(ss->root, root); + if (ss->bind) + ss->bind(ss, cgrp); + + } else if (bit & removed_bits) { + /* We're removing this subsystem */ + BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]); + BUG_ON(cgrp->subsys[i]->cgroup != cgrp); + if (ss->bind) + ss->bind(ss, dummytop); + dummytop->subsys[i]->cgroup = dummytop; + cgrp->subsys[i] = NULL; + rcu_assign_pointer(subsys[i]->root, &rootnode); + list_del(&ss->sibling); + } else if (bit & final_bits) { + /* Subsystem state should already exist */ + BUG_ON(!cgrp->subsys[i]); + } else { + /* Subsystem state shouldn't exist */ + BUG_ON(cgrp->subsys[i]); + } + } + root->subsys_bits = root->actual_subsys_bits = final_bits; + synchronize_rcu(); + + return 0; +} + +static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs) +{ + struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info; + struct cgroup_subsys *ss; + + mutex_lock(&cgroup_mutex); + for_each_subsys(root, ss) + seq_printf(seq, ",%s", ss->name); + if (test_bit(ROOT_NOPREFIX, &root->flags)) + seq_puts(seq, ",noprefix"); + if (strlen(root->release_agent_path)) + seq_printf(seq, ",release_agent=%s", root->release_agent_path); + mutex_unlock(&cgroup_mutex); + return 0; +} + +struct cgroup_sb_opts { + unsigned long subsys_bits; + unsigned long flags; + char *release_agent; +}; + +/* Convert a hierarchy specifier into a bitmask of subsystems and + * flags. */ +static int parse_cgroupfs_options(char *data, + struct cgroup_sb_opts *opts) +{ + char *token, *o = data ?: "all"; + + opts->subsys_bits = 0; + opts->flags = 0; + opts->release_agent = NULL; + + while ((token = strsep(&o, ",")) != NULL) { + if (!*token) + return -EINVAL; + if (!strcmp(token, "all")) { + opts->subsys_bits = (1 << CGROUP_SUBSYS_COUNT) - 1; + } else if (!strcmp(token, "noprefix")) { + set_bit(ROOT_NOPREFIX, &opts->flags); + } else if (!strncmp(token, "release_agent=", 14)) { + /* Specifying two release agents is forbidden */ + if (opts->release_agent) + return -EINVAL; + opts->release_agent = kzalloc(PATH_MAX, GFP_KERNEL); + if (!opts->release_agent) + return -ENOMEM; + strncpy(opts->release_agent, token + 14, PATH_MAX - 1); + opts->release_agent[PATH_MAX - 1] = 0; + } else { + struct cgroup_subsys *ss; + int i; + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + ss = subsys[i]; + if (!strcmp(token, ss->name)) { + set_bit(i, &opts->subsys_bits); + break; + } + } + if (i == CGROUP_SUBSYS_COUNT) + return -ENOENT; + } + } + + /* We can't have an empty hierarchy */ + if (!opts->subsys_bits) + return -EINVAL; + + return 0; +} + +static int cgroup_remount(struct super_block *sb, int *flags, char *data) +{ + int ret = 0; + struct cgroupfs_root *root = sb->s_fs_info; + struct cgroup *cgrp = &root->top_cgroup; + struct cgroup_sb_opts opts; + + mutex_lock(&cgrp->dentry->d_inode->i_mutex); + mutex_lock(&cgroup_mutex); + + /* See what subsystems are wanted */ + ret = parse_cgroupfs_options(data, &opts); + if (ret) + goto out_unlock; + + /* Don't allow flags to change at remount */ + if (opts.flags != root->flags) { + ret = -EINVAL; + goto out_unlock; + } + + ret = rebind_subsystems(root, opts.subsys_bits); + + /* (re)populate subsystem files */ + if (!ret) + cgroup_populate_dir(cgrp); + + if (opts.release_agent) + strcpy(root->release_agent_path, opts.release_agent); + out_unlock: + if (opts.release_agent) + kfree(opts.release_agent); + mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgrp->dentry->d_inode->i_mutex); + return ret; +} + +static struct super_operations cgroup_ops = { + .statfs = simple_statfs, + .drop_inode = generic_delete_inode, + .show_options = cgroup_show_options, + .remount_fs = cgroup_remount, +}; + +static void init_cgroup_root(struct cgroupfs_root *root) +{ + struct cgroup *cgrp = &root->top_cgroup; + INIT_LIST_HEAD(&root->subsys_list); + INIT_LIST_HEAD(&root->root_list); + root->number_of_cgroups = 1; + cgrp->root = root; + cgrp->top_cgroup = cgrp; + INIT_LIST_HEAD(&cgrp->sibling); + INIT_LIST_HEAD(&cgrp->children); + INIT_LIST_HEAD(&cgrp->css_sets); + INIT_LIST_HEAD(&cgrp->release_list); +} + +static int cgroup_test_super(struct super_block *sb, void *data) +{ + struct cgroupfs_root *new = data; + struct cgroupfs_root *root = sb->s_fs_info; + + /* First check subsystems */ + if (new->subsys_bits != root->subsys_bits) + return 0; + + /* Next check flags */ + if (new->flags != root->flags) + return 0; + + return 1; +} + +static int cgroup_set_super(struct super_block *sb, void *data) +{ + int ret; + struct cgroupfs_root *root = data; + + ret = set_anon_super(sb, NULL); + if (ret) + return ret; + + sb->s_fs_info = root; + root->sb = sb; + + sb->s_blocksize = PAGE_CACHE_SIZE; + sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_magic = CGROUP_SUPER_MAGIC; + sb->s_op = &cgroup_ops; + + return 0; +} + +static int cgroup_get_rootdir(struct super_block *sb) +{ + struct inode *inode = + cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb); + struct dentry *dentry; + + if (!inode) + return -ENOMEM; + + inode->i_op = &simple_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + inode->i_op = &cgroup_dir_inode_operations; + /* directories start off with i_nlink == 2 (for "." entry) */ + inc_nlink(inode); + dentry = d_alloc_root(inode); + if (!dentry) { + iput(inode); + return -ENOMEM; + } + sb->s_root = dentry; + return 0; +} + +static int cgroup_get_sb(struct file_system_type *fs_type, + int flags, const char *unused_dev_name, + void *data, struct vfsmount *mnt) +{ + struct cgroup_sb_opts opts; + int ret = 0; + struct super_block *sb; + struct cgroupfs_root *root; + struct list_head tmp_cg_links, *l; + INIT_LIST_HEAD(&tmp_cg_links); + + /* First find the desired set of subsystems */ + ret = parse_cgroupfs_options(data, &opts); + if (ret) { + if (opts.release_agent) + kfree(opts.release_agent); + return ret; + } + + root = kzalloc(sizeof(*root), GFP_KERNEL); + if (!root) + return -ENOMEM; + + init_cgroup_root(root); + root->subsys_bits = opts.subsys_bits; + root->flags = opts.flags; + if (opts.release_agent) { + strcpy(root->release_agent_path, opts.release_agent); + kfree(opts.release_agent); + } + + sb = sget(fs_type, cgroup_test_super, cgroup_set_super, root); + + if (IS_ERR(sb)) { + kfree(root); + return PTR_ERR(sb); + } + + if (sb->s_fs_info != root) { + /* Reusing an existing superblock */ + BUG_ON(sb->s_root == NULL); + kfree(root); + root = NULL; + } else { + /* New superblock */ + struct cgroup *cgrp = &root->top_cgroup; + struct inode *inode; + + BUG_ON(sb->s_root != NULL); + + ret = cgroup_get_rootdir(sb); + if (ret) + goto drop_new_super; + inode = sb->s_root->d_inode; + + mutex_lock(&inode->i_mutex); + mutex_lock(&cgroup_mutex); + + /* + * We're accessing css_set_count without locking + * css_set_lock here, but that's OK - it can only be + * increased by someone holding cgroup_lock, and + * that's us. The worst that can happen is that we + * have some link structures left over + */ + ret = allocate_cg_links(css_set_count, &tmp_cg_links); + if (ret) { + mutex_unlock(&cgroup_mutex); + mutex_unlock(&inode->i_mutex); + goto drop_new_super; + } + + ret = rebind_subsystems(root, root->subsys_bits); + if (ret == -EBUSY) { + mutex_unlock(&cgroup_mutex); + mutex_unlock(&inode->i_mutex); + goto drop_new_super; + } + + /* EBUSY should be the only error here */ + BUG_ON(ret); + + list_add(&root->root_list, &roots); + root_count++; + + sb->s_root->d_fsdata = &root->top_cgroup; + root->top_cgroup.dentry = sb->s_root; + + /* Link the top cgroup in this hierarchy into all + * the css_set objects */ + write_lock(&css_set_lock); + l = &init_css_set.list; + do { + struct css_set *cg; + struct cg_cgroup_link *link; + cg = list_entry(l, struct css_set, list); + BUG_ON(list_empty(&tmp_cg_links)); + link = list_entry(tmp_cg_links.next, + struct cg_cgroup_link, + cgrp_link_list); + list_del(&link->cgrp_link_list); + link->cg = cg; + list_add(&link->cgrp_link_list, + &root->top_cgroup.css_sets); + list_add(&link->cg_link_list, &cg->cg_links); + l = l->next; + } while (l != &init_css_set.list); + write_unlock(&css_set_lock); + + free_cg_links(&tmp_cg_links); + + BUG_ON(!list_empty(&cgrp->sibling)); + BUG_ON(!list_empty(&cgrp->children)); + BUG_ON(root->number_of_cgroups != 1); + + cgroup_populate_dir(cgrp); + mutex_unlock(&inode->i_mutex); + mutex_unlock(&cgroup_mutex); + } + + return simple_set_mnt(mnt, sb); + + drop_new_super: + up_write(&sb->s_umount); + deactivate_super(sb); + free_cg_links(&tmp_cg_links); + return ret; +} + +static void cgroup_kill_sb(struct super_block *sb) { + struct cgroupfs_root *root = sb->s_fs_info; + struct cgroup *cgrp = &root->top_cgroup; + int ret; + + BUG_ON(!root); + + BUG_ON(root->number_of_cgroups != 1); + BUG_ON(!list_empty(&cgrp->children)); + BUG_ON(!list_empty(&cgrp->sibling)); + + mutex_lock(&cgroup_mutex); + + /* Rebind all subsystems back to the default hierarchy */ + ret = rebind_subsystems(root, 0); + /* Shouldn't be able to fail ... */ + BUG_ON(ret); + + /* + * Release all the links from css_sets to this hierarchy's + * root cgroup + */ + write_lock(&css_set_lock); + while (!list_empty(&cgrp->css_sets)) { + struct cg_cgroup_link *link; + link = list_entry(cgrp->css_sets.next, + struct cg_cgroup_link, cgrp_link_list); + list_del(&link->cg_link_list); + list_del(&link->cgrp_link_list); + kfree(link); + } + write_unlock(&css_set_lock); + + if (!list_empty(&root->root_list)) { + list_del(&root->root_list); + root_count--; + } + mutex_unlock(&cgroup_mutex); + + kfree(root); + kill_litter_super(sb); +} + +static struct file_system_type cgroup_fs_type = { + .name = "cgroup", + .get_sb = cgroup_get_sb, + .kill_sb = cgroup_kill_sb, +}; + +static inline struct cgroup *__d_cgrp(struct dentry *dentry) +{ + return dentry->d_fsdata; +} + +static inline struct cftype *__d_cft(struct dentry *dentry) +{ + return dentry->d_fsdata; +} + +/* + * Called with cgroup_mutex held. Writes path of cgroup into buf. + * Returns 0 on success, -errno on error. + */ +int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) +{ + char *start; + + if (cgrp == dummytop) { + /* + * Inactive subsystems have no dentry for their root + * cgroup + */ + strcpy(buf, "/"); + return 0; + } + + start = buf + buflen; + + *--start = '\0'; + for (;;) { + int len = cgrp->dentry->d_name.len; + if ((start -= len) < buf) + return -ENAMETOOLONG; + memcpy(start, cgrp->dentry->d_name.name, len); + cgrp = cgrp->parent; + if (!cgrp) + break; + if (!cgrp->parent) + continue; + if (--start < buf) + return -ENAMETOOLONG; + *start = '/'; + } + memmove(buf, start, buf + buflen - start); + return 0; +} + +/* + * Return the first subsystem attached to a cgroup's hierarchy, and + * its subsystem id. + */ + +static void get_first_subsys(const struct cgroup *cgrp, + struct cgroup_subsys_state **css, int *subsys_id) +{ + const struct cgroupfs_root *root = cgrp->root; + const struct cgroup_subsys *test_ss; + BUG_ON(list_empty(&root->subsys_list)); + test_ss = list_entry(root->subsys_list.next, + struct cgroup_subsys, sibling); + if (css) { + *css = cgrp->subsys[test_ss->subsys_id]; + BUG_ON(!*css); + } + if (subsys_id) + *subsys_id = test_ss->subsys_id; +} + +/* + * Attach task 'tsk' to cgroup 'cgrp' + * + * Call holding cgroup_mutex. May take task_lock of + * the task 'pid' during call. + */ +static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) +{ + int retval = 0; + struct cgroup_subsys *ss; + struct cgroup *oldcgrp; + struct css_set *cg = tsk->cgroups; + struct css_set *newcg; + struct cgroupfs_root *root = cgrp->root; + int subsys_id; + + get_first_subsys(cgrp, NULL, &subsys_id); + + /* Nothing to do if the task is already in that cgroup */ + oldcgrp = task_cgroup(tsk, subsys_id); + if (cgrp == oldcgrp) + return 0; + + for_each_subsys(root, ss) { + if (ss->can_attach) { + retval = ss->can_attach(ss, cgrp, tsk); + if (retval) { + return retval; + } + } + } + + /* + * Locate or allocate a new css_set for this task, + * based on its final set of cgroups + */ + newcg = find_css_set(cg, cgrp); + if (!newcg) { + return -ENOMEM; + } + + task_lock(tsk); + if (tsk->flags & PF_EXITING) { + task_unlock(tsk); + put_css_set(newcg); + return -ESRCH; + } + rcu_assign_pointer(tsk->cgroups, newcg); + task_unlock(tsk); + + /* Update the css_set linked lists if we're using them */ + write_lock(&css_set_lock); + if (!list_empty(&tsk->cg_list)) { + list_del(&tsk->cg_list); + list_add(&tsk->cg_list, &newcg->tasks); + } + write_unlock(&css_set_lock); + + for_each_subsys(root, ss) { + if (ss->attach) { + ss->attach(ss, cgrp, oldcgrp, tsk); + } + } + set_bit(CGRP_RELEASABLE, &oldcgrp->flags); + synchronize_rcu(); + put_css_set(cg); + return 0; +} + +/* + * Attach task with pid 'pid' to cgroup 'cgrp'. Call with + * cgroup_mutex, may take task_lock of task + */ +static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) +{ + pid_t pid; + struct task_struct *tsk; + int ret; + + if (sscanf(pidbuf, "%d", &pid) != 1) + return -EIO; + + if (pid) { + rcu_read_lock(); + tsk = find_task_by_pid(pid); + if (!tsk || tsk->flags & PF_EXITING) { + rcu_read_unlock(); + return -ESRCH; + } + get_task_struct(tsk); + rcu_read_unlock(); + + if ((current->euid) && (current->euid != tsk->uid) + && (current->euid != tsk->suid)) { + put_task_struct(tsk); + return -EACCES; + } + } else { + tsk = current; + get_task_struct(tsk); + } + + ret = attach_task(cgrp, tsk); + put_task_struct(tsk); + return ret; +} + +/* The various types of files and directories in a cgroup file system */ + +enum cgroup_filetype { + FILE_ROOT, + FILE_DIR, + FILE_TASKLIST, + FILE_NOTIFY_ON_RELEASE, + FILE_RELEASABLE, + FILE_RELEASE_AGENT, +}; + +static ssize_t cgroup_write_uint(struct cgroup *cgrp, struct cftype *cft, + struct file *file, + const char __user *userbuf, + size_t nbytes, loff_t *unused_ppos) +{ + char buffer[64]; + int retval = 0; + u64 val; + char *end; + + if (!nbytes) + return -EINVAL; + if (nbytes >= sizeof(buffer)) + return -E2BIG; + if (copy_from_user(buffer, userbuf, nbytes)) + return -EFAULT; + + buffer[nbytes] = 0; /* nul-terminate */ + + /* strip newline if necessary */ + if (nbytes && (buffer[nbytes-1] == '\n')) + buffer[nbytes-1] = 0; + val = simple_strtoull(buffer, &end, 0); + if (*end) + return -EINVAL; + + /* Pass to subsystem */ + retval = cft->write_uint(cgrp, cft, val); + if (!retval) + retval = nbytes; + return retval; +} + +static ssize_t cgroup_common_file_write(struct cgroup *cgrp, + struct cftype *cft, + struct file *file, + const char __user *userbuf, + size_t nbytes, loff_t *unused_ppos) +{ + enum cgroup_filetype type = cft->private; + char *buffer; + int retval = 0; + + if (nbytes >= PATH_MAX) + return -E2BIG; + + /* +1 for nul-terminator */ + buffer = kmalloc(nbytes + 1, GFP_KERNEL); + if (buffer == NULL) + return -ENOMEM; + + if (copy_from_user(buffer, userbuf, nbytes)) { + retval = -EFAULT; + goto out1; + } + buffer[nbytes] = 0; /* nul-terminate */ + + mutex_lock(&cgroup_mutex); + + if (cgroup_is_removed(cgrp)) { + retval = -ENODEV; + goto out2; + } + + switch (type) { + case FILE_TASKLIST: + retval = attach_task_by_pid(cgrp, buffer); + break; + case FILE_NOTIFY_ON_RELEASE: + clear_bit(CGRP_RELEASABLE, &cgrp->flags); + if (simple_strtoul(buffer, NULL, 10) != 0) + set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); + else + clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); + break; + case FILE_RELEASE_AGENT: + { + struct cgroupfs_root *root = cgrp->root; + /* Strip trailing newline */ + if (nbytes && (buffer[nbytes-1] == '\n')) { + buffer[nbytes-1] = 0; + } + if (nbytes < sizeof(root->release_agent_path)) { + /* We never write anything other than '\0' + * into the last char of release_agent_path, + * so it always remains a NUL-terminated + * string */ + strncpy(root->release_agent_path, buffer, nbytes); + root->release_agent_path[nbytes] = 0; + } else { + retval = -ENOSPC; + } + break; + } + default: + retval = -EINVAL; + goto out2; + } + + if (retval == 0) + retval = nbytes; +out2: + mutex_unlock(&cgroup_mutex); +out1: + kfree(buffer); + return retval; +} + +static ssize_t cgroup_file_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct cftype *cft = __d_cft(file->f_dentry); + struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); + + if (!cft) + return -ENODEV; + if (cft->write) + return cft->write(cgrp, cft, file, buf, nbytes, ppos); + if (cft->write_uint) + return cgroup_write_uint(cgrp, cft, file, buf, nbytes, ppos); + return -EINVAL; +} + +static ssize_t cgroup_read_uint(struct cgroup *cgrp, struct cftype *cft, + struct file *file, + char __user *buf, size_t nbytes, + loff_t *ppos) +{ + char tmp[64]; + u64 val = cft->read_uint(cgrp, cft); + int len = sprintf(tmp, "%llu\n", (unsigned long long) val); + + return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); +} + +static ssize_t cgroup_common_file_read(struct cgroup *cgrp, + struct cftype *cft, + struct file *file, + char __user *buf, + size_t nbytes, loff_t *ppos) +{ + enum cgroup_filetype type = cft->private; + char *page; + ssize_t retval = 0; + char *s; + + if (!(page = (char *)__get_free_page(GFP_KERNEL))) + return -ENOMEM; + + s = page; + + switch (type) { + case FILE_RELEASE_AGENT: + { + struct cgroupfs_root *root; + size_t n; + mutex_lock(&cgroup_mutex); + root = cgrp->root; + n = strnlen(root->release_agent_path, + sizeof(root->release_agent_path)); + n = min(n, (size_t) PAGE_SIZE); + strncpy(s, root->release_agent_path, n); + mutex_unlock(&cgroup_mutex); + s += n; + break; + } + default: + retval = -EINVAL; + goto out; + } + *s++ = '\n'; + + retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page); +out: + free_page((unsigned long)page); + return retval; +} + +static ssize_t cgroup_file_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct cftype *cft = __d_cft(file->f_dentry); + struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); + + if (!cft) + return -ENODEV; + + if (cft->read) + return cft->read(cgrp, cft, file, buf, nbytes, ppos); + if (cft->read_uint) + return cgroup_read_uint(cgrp, cft, file, buf, nbytes, ppos); + return -EINVAL; +} + +static int cgroup_file_open(struct inode *inode, struct file *file) +{ + int err; + struct cftype *cft; + + err = generic_file_open(inode, file); + if (err) + return err; + + cft = __d_cft(file->f_dentry); + if (!cft) + return -ENODEV; + if (cft->open) + err = cft->open(inode, file); + else + err = 0; + + return err; +} + +static int cgroup_file_release(struct inode *inode, struct file *file) +{ + struct cftype *cft = __d_cft(file->f_dentry); + if (cft->release) + return cft->release(inode, file); + return 0; +} + +/* + * cgroup_rename - Only allow simple rename of directories in place. + */ +static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +{ + if (!S_ISDIR(old_dentry->d_inode->i_mode)) + return -ENOTDIR; + if (new_dentry->d_inode) + return -EEXIST; + if (old_dir != new_dir) + return -EIO; + return simple_rename(old_dir, old_dentry, new_dir, new_dentry); +} + +static struct file_operations cgroup_file_operations = { + .read = cgroup_file_read, + .write = cgroup_file_write, + .llseek = generic_file_llseek, + .open = cgroup_file_open, + .release = cgroup_file_release, +}; + +static struct inode_operations cgroup_dir_inode_operations = { + .lookup = simple_lookup, + .mkdir = cgroup_mkdir, + .rmdir = cgroup_rmdir, + .rename = cgroup_rename, +}; + +static int cgroup_create_file(struct dentry *dentry, int mode, + struct super_block *sb) +{ + static struct dentry_operations cgroup_dops = { + .d_iput = cgroup_diput, + }; + + struct inode *inode; + + if (!dentry) + return -ENOENT; + if (dentry->d_inode) + return -EEXIST; + + inode = cgroup_new_inode(mode, sb); + if (!inode) + return -ENOMEM; + + if (S_ISDIR(mode)) { + inode->i_op = &cgroup_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + + /* start off with i_nlink == 2 (for "." entry) */ + inc_nlink(inode); + + /* start with the directory inode held, so that we can + * populate it without racing with another mkdir */ + mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); + } else if (S_ISREG(mode)) { + inode->i_size = 0; + inode->i_fop = &cgroup_file_operations; + } + dentry->d_op = &cgroup_dops; + d_instantiate(dentry, inode); + dget(dentry); /* Extra count - pin the dentry in core */ + return 0; +} + +/* + * cgroup_create_dir - create a directory for an object. + * cgrp: the cgroup we create the directory for. + * It must have a valid ->parent field + * And we are going to fill its ->dentry field. + * dentry: dentry of the new cgroup + * mode: mode to set on new directory. + */ +static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, + int mode) +{ + struct dentry *parent; + int error = 0; + + parent = cgrp->parent->dentry; + error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb); + if (!error) { + dentry->d_fsdata = cgrp; + inc_nlink(parent->d_inode); + cgrp->dentry = dentry; + dget(dentry); + } + dput(dentry); + + return error; +} + +int cgroup_add_file(struct cgroup *cgrp, + struct cgroup_subsys *subsys, + const struct cftype *cft) +{ + struct dentry *dir = cgrp->dentry; + struct dentry *dentry; + int error; + + char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; + if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) { + strcpy(name, subsys->name); + strcat(name, "."); + } + strcat(name, cft->name); + BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); + dentry = lookup_one_len(name, dir, strlen(name)); + if (!IS_ERR(dentry)) { + error = cgroup_create_file(dentry, 0644 | S_IFREG, + cgrp->root->sb); + if (!error) + dentry->d_fsdata = (void *)cft; + dput(dentry); + } else + error = PTR_ERR(dentry); + return error; +} + +int cgroup_add_files(struct cgroup *cgrp, + struct cgroup_subsys *subsys, + const struct cftype cft[], + int count) +{ + int i, err; + for (i = 0; i < count; i++) { + err = cgroup_add_file(cgrp, subsys, &cft[i]); + if (err) + return err; + } + return 0; +} + +/* Count the number of tasks in a cgroup. */ + +int cgroup_task_count(const struct cgroup *cgrp) +{ + int count = 0; + struct list_head *l; + + read_lock(&css_set_lock); + l = cgrp->css_sets.next; + while (l != &cgrp->css_sets) { + struct cg_cgroup_link *link = + list_entry(l, struct cg_cgroup_link, cgrp_link_list); + count += atomic_read(&link->cg->ref.refcount); + l = l->next; + } + read_unlock(&css_set_lock); + return count; +} + +/* + * Advance a list_head iterator. The iterator should be positioned at + * the start of a css_set + */ +static void cgroup_advance_iter(struct cgroup *cgrp, + struct cgroup_iter *it) +{ + struct list_head *l = it->cg_link; + struct cg_cgroup_link *link; + struct css_set *cg; + + /* Advance to the next non-empty css_set */ + do { + l = l->next; + if (l == &cgrp->css_sets) { + it->cg_link = NULL; + return; + } + link = list_entry(l, struct cg_cgroup_link, cgrp_link_list); + cg = link->cg; + } while (list_empty(&cg->tasks)); + it->cg_link = l; + it->task = cg->tasks.next; +} + +void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) +{ + /* + * The first time anyone tries to iterate across a cgroup, + * we need to enable the list linking each css_set to its + * tasks, and fix up all existing tasks. + */ + if (!use_task_css_set_links) { + struct task_struct *p, *g; + write_lock(&css_set_lock); + use_task_css_set_links = 1; + do_each_thread(g, p) { + task_lock(p); + if (list_empty(&p->cg_list)) + list_add(&p->cg_list, &p->cgroups->tasks); + task_unlock(p); + } while_each_thread(g, p); + write_unlock(&css_set_lock); + } + read_lock(&css_set_lock); + it->cg_link = &cgrp->css_sets; + cgroup_advance_iter(cgrp, it); +} + +struct task_struct *cgroup_iter_next(struct cgroup *cgrp, + struct cgroup_iter *it) +{ + struct task_struct *res; + struct list_head *l = it->task; + + /* If the iterator cg is NULL, we have no tasks */ + if (!it->cg_link) + return NULL; + res = list_entry(l, struct task_struct, cg_list); + /* Advance iterator to find next entry */ + l = l->next; + if (l == &res->cgroups->tasks) { + /* We reached the end of this task list - move on to + * the next cg_cgroup_link */ + cgroup_advance_iter(cgrp, it); + } else { + it->task = l; + } + return res; +} + +void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it) +{ + read_unlock(&css_set_lock); +} + +/* + * Stuff for reading the 'tasks' file. + * + * Reading this file can return large amounts of data if a cgroup has + * *lots* of attached tasks. So it may need several calls to read(), + * but we cannot guarantee that the information we produce is correct + * unless we produce it entirely atomically. + * + * Upon tasks file open(), a struct ctr_struct is allocated, that + * will have a pointer to an array (also allocated here). The struct + * ctr_struct * is stored in file->private_data. Its resources will + * be freed by release() when the file is closed. The array is used + * to sprintf the PIDs and then used by read(). + */ +struct ctr_struct { + char *buf; + int bufsz; +}; + +/* + * Load into 'pidarray' up to 'npids' of the tasks using cgroup + * 'cgrp'. Return actual number of pids loaded. No need to + * task_lock(p) when reading out p->cgroup, since we're in an RCU + * read section, so the css_set can't go away, and is + * immutable after creation. + */ +static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp) +{ + int n = 0; + struct cgroup_iter it; + struct task_struct *tsk; + cgroup_iter_start(cgrp, &it); + while ((tsk = cgroup_iter_next(cgrp, &it))) { + if (unlikely(n == npids)) + break; + pidarray[n++] = task_pid_nr(tsk); + } + cgroup_iter_end(cgrp, &it); + return n; +} + +/** + * Build and fill cgroupstats so that taskstats can export it to user + * space. + * + * @stats: cgroupstats to fill information into + * @dentry: A dentry entry belonging to the cgroup for which stats have + * been requested. + */ +int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry) +{ + int ret = -EINVAL; + struct cgroup *cgrp; + struct cgroup_iter it; + struct task_struct *tsk; + /* + * Validate dentry by checking the superblock operations + */ + if (dentry->d_sb->s_op != &cgroup_ops) + goto err; + + ret = 0; + cgrp = dentry->d_fsdata; + rcu_read_lock(); + + cgroup_iter_start(cgrp, &it); + while ((tsk = cgroup_iter_next(cgrp, &it))) { + switch (tsk->state) { + case TASK_RUNNING: + stats->nr_running++; + break; + case TASK_INTERRUPTIBLE: + stats->nr_sleeping++; + break; + case TASK_UNINTERRUPTIBLE: + stats->nr_uninterruptible++; + break; + case TASK_STOPPED: + stats->nr_stopped++; + break; + default: + if (delayacct_is_task_waiting_on_io(tsk)) + stats->nr_io_wait++; + break; + } + } + cgroup_iter_end(cgrp, &it); + + rcu_read_unlock(); +err: + return ret; +} + +static int cmppid(const void *a, const void *b) +{ + return *(pid_t *)a - *(pid_t *)b; +} + +/* + * Convert array 'a' of 'npids' pid_t's to a string of newline separated + * decimal pids in 'buf'. Don't write more than 'sz' chars, but return + * count 'cnt' of how many chars would be written if buf were large enough. + */ +static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) +{ + int cnt = 0; + int i; + + for (i = 0; i < npids; i++) + cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); + return cnt; +} + +/* + * Handle an open on 'tasks' file. Prepare a buffer listing the + * process id's of tasks currently attached to the cgroup being opened. + * + * Does not require any specific cgroup mutexes, and does not take any. + */ +static int cgroup_tasks_open(struct inode *unused, struct file *file) +{ + struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); + struct ctr_struct *ctr; + pid_t *pidarray; + int npids; + char c; + + if (!(file->f_mode & FMODE_READ)) + return 0; + + ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); + if (!ctr) + goto err0; + + /* + * If cgroup gets more users after we read count, we won't have + * enough space - tough. This race is indistinguishable to the + * caller from the case that the additional cgroup users didn't + * show up until sometime later on. + */ + npids = cgroup_task_count(cgrp); + if (npids) { + pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); + if (!pidarray) + goto err1; + + npids = pid_array_load(pidarray, npids, cgrp); + sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); + + /* Call pid_array_to_buf() twice, first just to get bufsz */ + ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; + ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); + if (!ctr->buf) + goto err2; + ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); + + kfree(pidarray); + } else { + ctr->buf = 0; + ctr->bufsz = 0; + } + file->private_data = ctr; + return 0; + +err2: + kfree(pidarray); +err1: + kfree(ctr); +err0: + return -ENOMEM; +} + +static ssize_t cgroup_tasks_read(struct cgroup *cgrp, + struct cftype *cft, + struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct ctr_struct *ctr = file->private_data; + + return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz); +} + +static int cgroup_tasks_release(struct inode *unused_inode, + struct file *file) +{ + struct ctr_struct *ctr; + + if (file->f_mode & FMODE_READ) { + ctr = file->private_data; + kfree(ctr->buf); + kfree(ctr); + } + return 0; +} + +static u64 cgroup_read_notify_on_release(struct cgroup *cgrp, + struct cftype *cft) +{ + return notify_on_release(cgrp); +} + +static u64 cgroup_read_releasable(struct cgroup *cgrp, struct cftype *cft) +{ + return test_bit(CGRP_RELEASABLE, &cgrp->flags); +} + +/* + * for the common functions, 'private' gives the type of file + */ +static struct cftype files[] = { + { + .name = "tasks", + .open = cgroup_tasks_open, + .read = cgroup_tasks_read, + .write = cgroup_common_file_write, + .release = cgroup_tasks_release, + .private = FILE_TASKLIST, + }, + + { + .name = "notify_on_release", + .read_uint = cgroup_read_notify_on_release, + .write = cgroup_common_file_write, + .private = FILE_NOTIFY_ON_RELEASE, + }, + + { + .name = "releasable", + .read_uint = cgroup_read_releasable, + .private = FILE_RELEASABLE, + } +}; + +static struct cftype cft_release_agent = { + .name = "release_agent", + .read = cgroup_common_file_read, + .write = cgroup_common_file_write, + .private = FILE_RELEASE_AGENT, +}; + +static int cgroup_populate_dir(struct cgroup *cgrp) +{ + int err; + struct cgroup_subsys *ss; + + /* First clear out any existing files */ + cgroup_clear_directory(cgrp->dentry); + + err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files)); + if (err < 0) + return err; + + if (cgrp == cgrp->top_cgroup) { + if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0) + return err; + } + + for_each_subsys(cgrp->root, ss) { + if (ss->populate && (err = ss->populate(ss, cgrp)) < 0) + return err; + } + + return 0; +} + +static void init_cgroup_css(struct cgroup_subsys_state *css, + struct cgroup_subsys *ss, + struct cgroup *cgrp) +{ + css->cgroup = cgrp; + atomic_set(&css->refcnt, 0); + css->flags = 0; + if (cgrp == dummytop) + set_bit(CSS_ROOT, &css->flags); + BUG_ON(cgrp->subsys[ss->subsys_id]); + cgrp->subsys[ss->subsys_id] = css; +} + +/* + * cgroup_create - create a cgroup + * parent: cgroup that will be parent of the new cgroup. + * name: name of the new cgroup. Will be strcpy'ed. + * mode: mode to set on new inode + * + * Must be called with the mutex on the parent inode held + */ + +static long cgroup_create(struct cgroup *parent, struct dentry *dentry, + int mode) +{ + struct cgroup *cgrp; + struct cgroupfs_root *root = parent->root; + int err = 0; + struct cgroup_subsys *ss; + struct super_block *sb = root->sb; + + cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL); + if (!cgrp) + return -ENOMEM; + + /* Grab a reference on the superblock so the hierarchy doesn't + * get deleted on unmount if there are child cgroups. This + * can be done outside cgroup_mutex, since the sb can't + * disappear while someone has an open control file on the + * fs */ + atomic_inc(&sb->s_active); + + mutex_lock(&cgroup_mutex); + + cgrp->flags = 0; + INIT_LIST_HEAD(&cgrp->sibling); + INIT_LIST_HEAD(&cgrp->children); + INIT_LIST_HEAD(&cgrp->css_sets); + INIT_LIST_HEAD(&cgrp->release_list); + + cgrp->parent = parent; + cgrp->root = parent->root; + cgrp->top_cgroup = parent->top_cgroup; + + for_each_subsys(root, ss) { + struct cgroup_subsys_state *css = ss->create(ss, cgrp); + if (IS_ERR(css)) { + err = PTR_ERR(css); + goto err_destroy; + } + init_cgroup_css(css, ss, cgrp); + } + + list_add(&cgrp->sibling, &cgrp->parent->children); + root->number_of_cgroups++; + + err = cgroup_create_dir(cgrp, dentry, mode); + if (err < 0) + goto err_remove; + + /* The cgroup directory was pre-locked for us */ + BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex)); + + err = cgroup_populate_dir(cgrp); + /* If err < 0, we have a half-filled directory - oh well ;) */ + + mutex_unlock(&cgroup_mutex); + mutex_unlock(&cgrp->dentry->d_inode->i_mutex); + + return 0; + + err_remove: + + list_del(&cgrp->sibling); + root->number_of_cgroups--; + + err_destroy: + + for_each_subsys(root, ss) { + if (cgrp->subsys[ss->subsys_id]) + ss->destroy(ss, cgrp); + } + + mutex_unlock(&cgroup_mutex); + + /* Release the reference count that we took on the superblock */ + deactivate_super(sb); + + kfree(cgrp); + return err; +} + +static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode) +{ + struct cgroup *c_parent = dentry->d_parent->d_fsdata; + + /* the vfs holds inode->i_mutex already */ + return cgroup_create(c_parent, dentry, mode | S_IFDIR); +} + +static inline int cgroup_has_css_refs(struct cgroup *cgrp) +{ + /* Check the reference count on each subsystem. Since we + * already established that there are no tasks in the + * cgroup, if the css refcount is also 0, then there should + * be no outstanding references, so the subsystem is safe to + * destroy. We scan across all subsystems rather than using + * the per-hierarchy linked list of mounted subsystems since + * we can be called via check_for_release() with no + * synchronization other than RCU, and the subsystem linked + * list isn't RCU-safe */ + int i; + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup_subsys *ss = subsys[i]; + struct cgroup_subsys_state *css; + /* Skip subsystems not in this hierarchy */ + if (ss->root != cgrp->root) + continue; + css = cgrp->subsys[ss->subsys_id]; + /* When called from check_for_release() it's possible + * that by this point the cgroup has been removed + * and the css deleted. But a false-positive doesn't + * matter, since it can only happen if the cgroup + * has been deleted and hence no longer needs the + * release agent to be called anyway. */ + if (css && atomic_read(&css->refcnt)) { + return 1; + } + } + return 0; +} + +static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) +{ + struct cgroup *cgrp = dentry->d_fsdata; + struct dentry *d; + struct cgroup *parent; + struct cgroup_subsys *ss; + struct super_block *sb; + struct cgroupfs_root *root; + + /* the vfs holds both inode->i_mutex already */ + + mutex_lock(&cgroup_mutex); + if (atomic_read(&cgrp->count) != 0) { + mutex_unlock(&cgroup_mutex); + return -EBUSY; + } + if (!list_empty(&cgrp->children)) { + mutex_unlock(&cgroup_mutex); + return -EBUSY; + } + + parent = cgrp->parent; + root = cgrp->root; + sb = root->sb; + + if (cgroup_has_css_refs(cgrp)) { + mutex_unlock(&cgroup_mutex); + return -EBUSY; + } + + for_each_subsys(root, ss) { + if (cgrp->subsys[ss->subsys_id]) + ss->destroy(ss, cgrp); + } + + spin_lock(&release_list_lock); + set_bit(CGRP_REMOVED, &cgrp->flags); + if (!list_empty(&cgrp->release_list)) + list_del(&cgrp->release_list); + spin_unlock(&release_list_lock); + /* delete my sibling from parent->children */ + list_del(&cgrp->sibling); + spin_lock(&cgrp->dentry->d_lock); + d = dget(cgrp->dentry); + cgrp->dentry = NULL; + spin_unlock(&d->d_lock); + + cgroup_d_remove_dir(d); + dput(d); + root->number_of_cgroups--; + + set_bit(CGRP_RELEASABLE, &parent->flags); + check_for_release(parent); + + mutex_unlock(&cgroup_mutex); + /* Drop the active superblock reference that we took when we + * created the cgroup */ + deactivate_super(sb); + return 0; +} + +static void cgroup_init_subsys(struct cgroup_subsys *ss) +{ + struct cgroup_subsys_state *css; + struct list_head *l; + printk(KERN_ERR "Initializing cgroup subsys %s\n", ss->name); + + /* Create the top cgroup state for this subsystem */ + ss->root = &rootnode; + css = ss->create(ss, dummytop); + /* We don't handle early failures gracefully */ + BUG_ON(IS_ERR(css)); + init_cgroup_css(css, ss, dummytop); + + /* Update all cgroup groups to contain a subsys + * pointer to this state - since the subsystem is + * newly registered, all tasks and hence all cgroup + * groups are in the subsystem's top cgroup. */ + write_lock(&css_set_lock); + l = &init_css_set.list; + do { + struct css_set *cg = + list_entry(l, struct css_set, list); + cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id]; + l = l->next; + } while (l != &init_css_set.list); + write_unlock(&css_set_lock); + + /* If this subsystem requested that it be notified with fork + * events, we should send it one now for every process in the + * system */ + if (ss->fork) { + struct task_struct *g, *p; + + read_lock(&tasklist_lock); + do_each_thread(g, p) { + ss->fork(ss, p); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); + } + + need_forkexit_callback |= ss->fork || ss->exit; + + ss->active = 1; +} + +/** + * cgroup_init_early - initialize cgroups at system boot, and + * initialize any subsystems that request early init. + */ +int __init cgroup_init_early(void) +{ + int i; + kref_init(&init_css_set.ref); + kref_get(&init_css_set.ref); + INIT_LIST_HEAD(&init_css_set.list); + INIT_LIST_HEAD(&init_css_set.cg_links); + INIT_LIST_HEAD(&init_css_set.tasks); + css_set_count = 1; + init_cgroup_root(&rootnode); + list_add(&rootnode.root_list, &roots); + root_count = 1; + init_task.cgroups = &init_css_set; + + init_css_set_link.cg = &init_css_set; + list_add(&init_css_set_link.cgrp_link_list, + &rootnode.top_cgroup.css_sets); + list_add(&init_css_set_link.cg_link_list, + &init_css_set.cg_links); + + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup_subsys *ss = subsys[i]; + + BUG_ON(!ss->name); + BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN); + BUG_ON(!ss->create); + BUG_ON(!ss->destroy); + if (ss->subsys_id != i) { + printk(KERN_ERR "Subsys %s id == %d\n", + ss->name, ss->subsys_id); + BUG(); + } + + if (ss->early_init) + cgroup_init_subsys(ss); + } + return 0; +} + +/** + * cgroup_init - register cgroup filesystem and /proc file, and + * initialize any subsystems that didn't request early init. + */ +int __init cgroup_init(void) +{ + int err; + int i; + struct proc_dir_entry *entry; + + err = bdi_init(&cgroup_backing_dev_info); + if (err) + return err; + + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup_subsys *ss = subsys[i]; + if (!ss->early_init) + cgroup_init_subsys(ss); + } + + err = register_filesystem(&cgroup_fs_type); + if (err < 0) + goto out; + + entry = create_proc_entry("cgroups", 0, NULL); + if (entry) + entry->proc_fops = &proc_cgroupstats_operations; + +out: + if (err) + bdi_destroy(&cgroup_backing_dev_info); + + return err; +} + +/* + * proc_cgroup_show() + * - Print task's cgroup paths into seq_file, one line for each hierarchy + * - Used for /proc/<pid>/cgroup. + * - No need to task_lock(tsk) on this tsk->cgroup reference, as it + * doesn't really matter if tsk->cgroup changes after we read it, + * and we take cgroup_mutex, keeping attach_task() from changing it + * anyway. No need to check that tsk->cgroup != NULL, thanks to + * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks + * cgroup to top_cgroup. + */ + +/* TODO: Use a proper seq_file iterator */ +static int proc_cgroup_show(struct seq_file *m, void *v) +{ + struct pid *pid; + struct task_struct *tsk; + char *buf; + int retval; + struct cgroupfs_root *root; + + retval = -ENOMEM; + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!buf) + goto out; + + retval = -ESRCH; + pid = m->private; + tsk = get_pid_task(pid, PIDTYPE_PID); + if (!tsk) + goto out_free; + + retval = 0; + + mutex_lock(&cgroup_mutex); + + for_each_root(root) { + struct cgroup_subsys *ss; + struct cgroup *cgrp; + int subsys_id; + int count = 0; + + /* Skip this hierarchy if it has no active subsystems */ + if (!root->actual_subsys_bits) + continue; + for_each_subsys(root, ss) + seq_printf(m, "%s%s", count++ ? "," : "", ss->name); + seq_putc(m, ':'); + get_first_subsys(&root->top_cgroup, NULL, &subsys_id); + cgrp = task_cgroup(tsk, subsys_id); + retval = cgroup_path(cgrp, buf, PAGE_SIZE); + if (retval < 0) + goto out_unlock; + seq_puts(m, buf); + seq_putc(m, '\n'); + } + +out_unlock: + mutex_unlock(&cgroup_mutex); + put_task_struct(tsk); +out_free: + kfree(buf); +out: + return retval; +} + +static int cgroup_open(struct inode *inode, struct file *file) +{ + struct pid *pid = PROC_I(inode)->pid; + return single_open(file, proc_cgroup_show, pid); +} + +struct file_operations proc_cgroup_operations = { + .open = cgroup_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* Display information about each subsystem and each hierarchy */ +static int proc_cgroupstats_show(struct seq_file *m, void *v) +{ + int i; + struct cgroupfs_root *root; + + seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\n"); + mutex_lock(&cgroup_mutex); + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup_subsys *ss = subsys[i]; + seq_printf(m, "%s\t%lu\t%d\n", + ss->name, ss->root->subsys_bits, + ss->root->number_of_cgroups); + } + mutex_unlock(&cgroup_mutex); + return 0; +} + +static int cgroupstats_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_cgroupstats_show, 0); +} + +static struct file_operations proc_cgroupstats_operations = { + .open = cgroupstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/** + * cgroup_fork - attach newly forked task to its parents cgroup. + * @tsk: pointer to task_struct of forking parent process. + * + * Description: A task inherits its parent's cgroup at fork(). + * + * A pointer to the shared css_set was automatically copied in + * fork.c by dup_task_struct(). However, we ignore that copy, since + * it was not made under the protection of RCU or cgroup_mutex, so + * might no longer be a valid cgroup pointer. attach_task() might + * have already changed current->cgroups, allowing the previously + * referenced cgroup group to be removed and freed. + * + * At the point that cgroup_fork() is called, 'current' is the parent + * task, and the passed argument 'child' points to the child task. + */ +void cgroup_fork(struct task_struct *child) +{ + task_lock(current); + child->cgroups = current->cgroups; + get_css_set(child->cgroups); + task_unlock(current); + INIT_LIST_HEAD(&child->cg_list); +} + +/** + * cgroup_fork_callbacks - called on a new task very soon before + * adding it to the tasklist. No need to take any locks since no-one + * can be operating on this task + */ +void cgroup_fork_callbacks(struct task_struct *child) +{ + if (need_forkexit_callback) { + int i; + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup_subsys *ss = subsys[i]; + if (ss->fork) + ss->fork(ss, child); + } + } +} + +/** + * cgroup_post_fork - called on a new task after adding it to the + * task list. Adds the task to the list running through its css_set + * if necessary. Has to be after the task is visible on the task list + * in case we race with the first call to cgroup_iter_start() - to + * guarantee that the new task ends up on its list. */ +void cgroup_post_fork(struct task_struct *child) +{ + if (use_task_css_set_links) { + write_lock(&css_set_lock); + if (list_empty(&child->cg_list)) + list_add(&child->cg_list, &child->cgroups->tasks); + write_unlock(&css_set_lock); + } +} +/** + * cgroup_exit - detach cgroup from exiting task + * @tsk: pointer to task_struct of exiting process + * + * Description: Detach cgroup from @tsk and release it. + * + * Note that cgroups marked notify_on_release force every task in + * them to take the global cgroup_mutex mutex when exiting. + * This could impact scaling on very large systems. Be reluctant to + * use notify_on_release cgroups where very high task exit scaling + * is required on large systems. + * + * the_top_cgroup_hack: + * + * Set the exiting tasks cgroup to the root cgroup (top_cgroup). + * + * We call cgroup_exit() while the task is still competent to + * handle notify_on_release(), then leave the task attached to the + * root cgroup in each hierarchy for the remainder of its exit. + * + * To do this properly, we would increment the reference count on + * top_cgroup, and near the very end of the kernel/exit.c do_exit() + * code we would add a second cgroup function call, to drop that + * reference. This would just create an unnecessary hot spot on + * the top_cgroup reference count, to no avail. + * + * Normally, holding a reference to a cgroup without bumping its + * count is unsafe. The cgroup could go away, or someone could + * attach us to a different cgroup, decrementing the count on + * the first cgroup that we never incremented. But in this case, + * top_cgroup isn't going away, and either task has PF_EXITING set, + * which wards off any attach_task() attempts, or task is a failed + * fork, never visible to attach_task. + * + */ +void cgroup_exit(struct task_struct *tsk, int run_callbacks) +{ + int i; + struct css_set *cg; + + if (run_callbacks && need_forkexit_callback) { + for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { + struct cgroup_subsys *ss = subsys[i]; + if (ss->exit) + ss->exit(ss, tsk); + } + } + + /* + * Unlink from the css_set task list if necessary. + * Optimistically check cg_list before taking + * css_set_lock + */ + if (!list_empty(&tsk->cg_list)) { + write_lock(&css_set_lock); + if (!list_empty(&tsk->cg_list)) + list_del(&tsk->cg_list); + write_unlock(&css_set_lock); + } + + /* Reassign the task to the init_css_set. */ + task_lock(tsk); + cg = tsk->cgroups; + tsk->cgroups = &init_css_set; + task_unlock(tsk); + if (cg) + put_css_set_taskexit(cg); +} + +/** + * cgroup_clone - duplicate the current cgroup in the hierarchy + * that the given subsystem is attached to, and move this task into + * the new child + */ +int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) +{ + struct dentry *dentry; + int ret = 0; + char nodename[MAX_CGROUP_TYPE_NAMELEN]; + struct cgroup *parent, *child; + struct inode *inode; + struct css_set *cg; + struct cgroupfs_root *root; + struct cgroup_subsys *ss; + + /* We shouldn't be called by an unregistered subsystem */ + BUG_ON(!subsys->active); + + /* First figure out what hierarchy and cgroup we're dealing + * with, and pin them so we can drop cgroup_mutex */ + mutex_lock(&cgroup_mutex); + again: + root = subsys->root; + if (root == &rootnode) { + printk(KERN_INFO + "Not cloning cgroup for unused subsystem %s\n", + subsys->name); + mutex_unlock(&cgroup_mutex); + return 0; + } + cg = tsk->cgroups; + parent = task_cgroup(tsk, subsys->subsys_id); + + snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid); + + /* Pin the hierarchy */ + atomic_inc(&parent->root->sb->s_active); + + /* Keep the cgroup alive */ + get_css_set(cg); + mutex_unlock(&cgroup_mutex); + + /* Now do the VFS work to create a cgroup */ + inode = parent->dentry->d_inode; + + /* Hold the parent directory mutex across this operation to + * stop anyone else deleting the new cgroup */ + mutex_lock(&inode->i_mutex); + dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename)); + if (IS_ERR(dentry)) { + printk(KERN_INFO + "Couldn't allocate dentry for %s: %ld\n", nodename, + PTR_ERR(dentry)); + ret = PTR_ERR(dentry); + goto out_release; + } + + /* Create the cgroup directory, which also creates the cgroup */ + ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755); + child = __d_cgrp(dentry); + dput(dentry); + if (ret) { + printk(KERN_INFO + "Failed to create cgroup %s: %d\n", nodename, + ret); + goto out_release; + } + + if (!child) { + printk(KERN_INFO + "Couldn't find new cgroup %s\n", nodename); + ret = -ENOMEM; + goto out_release; + } + + /* The cgroup now exists. Retake cgroup_mutex and check + * that we're still in the same state that we thought we + * were. */ + mutex_lock(&cgroup_mutex); + if ((root != subsys->root) || + (parent != task_cgroup(tsk, subsys->subsys_id))) { + /* Aargh, we raced ... */ + mutex_unlock(&inode->i_mutex); + put_css_set(cg); + + deactivate_super(parent->root->sb); + /* The cgroup is still accessible in the VFS, but + * we're not going to try to rmdir() it at this + * point. */ + printk(KERN_INFO + "Race in cgroup_clone() - leaking cgroup %s\n", + nodename); + goto again; + } + + /* do any required auto-setup */ + for_each_subsys(root, ss) { + if (ss->post_clone) + ss->post_clone(ss, child); + } + + /* All seems fine. Finish by moving the task into the new cgroup */ + ret = attach_task(child, tsk); + mutex_unlock(&cgroup_mutex); + + out_release: + mutex_unlock(&inode->i_mutex); + + mutex_lock(&cgroup_mutex); + put_css_set(cg); + mutex_unlock(&cgroup_mutex); + deactivate_super(parent->root->sb); + return ret; +} + +/* + * See if "cgrp" is a descendant of the current task's cgroup in + * the appropriate hierarchy + * + * If we are sending in dummytop, then presumably we are creating + * the top cgroup in the subsystem. + * + * Called only by the ns (nsproxy) cgroup. + */ +int cgroup_is_descendant(const struct cgroup *cgrp) +{ + int ret; + struct cgroup *target; + int subsys_id; + + if (cgrp == dummytop) + return 1; + + get_first_subsys(cgrp, NULL, &subsys_id); + target = task_cgroup(current, subsys_id); + while (cgrp != target && cgrp!= cgrp->top_cgroup) + cgrp = cgrp->parent; + ret = (cgrp == target); + return ret; +} + +static void check_for_release(struct cgroup *cgrp) +{ + /* All of these checks rely on RCU to keep the cgroup + * structure alive */ + if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count) + && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) { + /* Control Group is currently removeable. If it's not + * already queued for a userspace notification, queue + * it now */ + int need_schedule_work = 0; + spin_lock(&release_list_lock); + if (!cgroup_is_removed(cgrp) && + list_empty(&cgrp->release_list)) { + list_add(&cgrp->release_list, &release_list); + need_schedule_work = 1; + } + spin_unlock(&release_list_lock); + if (need_schedule_work) + schedule_work(&release_agent_work); + } +} + +void __css_put(struct cgroup_subsys_state *css) +{ + struct cgroup *cgrp = css->cgroup; + rcu_read_lock(); + if (atomic_dec_and_test(&css->refcnt) && notify_on_release(cgrp)) { + set_bit(CGRP_RELEASABLE, &cgrp->flags); + check_for_release(cgrp); + } + rcu_read_unlock(); +} + +/* + * Notify userspace when a cgroup is released, by running the + * configured release agent with the name of the cgroup (path + * relative to the root of cgroup file system) as the argument. + * + * Most likely, this user command will try to rmdir this cgroup. + * + * This races with the possibility that some other task will be + * attached to this cgroup before it is removed, or that some other + * user task will 'mkdir' a child cgroup of this cgroup. That's ok. + * The presumed 'rmdir' will fail quietly if this cgroup is no longer + * unused, and this cgroup will be reprieved from its death sentence, + * to continue to serve a useful existence. Next time it's released, + * we will get notified again, if it still has 'notify_on_release' set. + * + * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which + * means only wait until the task is successfully execve()'d. The + * separate release agent task is forked by call_usermodehelper(), + * then control in this thread returns here, without waiting for the + * release agent task. We don't bother to wait because the caller of + * this routine has no use for the exit status of the release agent + * task, so no sense holding our caller up for that. + * + */ + +static void cgroup_release_agent(struct work_struct *work) +{ + BUG_ON(work != &release_agent_work); + mutex_lock(&cgroup_mutex); + spin_lock(&release_list_lock); + while (!list_empty(&release_list)) { + char *argv[3], *envp[3]; + int i; + char *pathbuf; + struct cgroup *cgrp = list_entry(release_list.next, + struct cgroup, + release_list); + list_del_init(&cgrp->release_list); + spin_unlock(&release_list_lock); + pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pathbuf) { + spin_lock(&release_list_lock); + continue; + } + + if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) { + kfree(pathbuf); + spin_lock(&release_list_lock); + continue; + } + + i = 0; + argv[i++] = cgrp->root->release_agent_path; + argv[i++] = (char *)pathbuf; + argv[i] = NULL; + + i = 0; + /* minimal command environment */ + envp[i++] = "HOME=/"; + envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; + envp[i] = NULL; + + /* Drop the lock while we invoke the usermode helper, + * since the exec could involve hitting disk and hence + * be a slow process */ + mutex_unlock(&cgroup_mutex); + call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); + kfree(pathbuf); + mutex_lock(&cgroup_mutex); + spin_lock(&release_list_lock); + } + spin_unlock(&release_list_lock); + mutex_unlock(&cgroup_mutex); +} diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c new file mode 100644 index 00000000000..37301e877cb --- /dev/null +++ b/kernel/cgroup_debug.c @@ -0,0 +1,97 @@ +/* + * kernel/ccontainer_debug.c - Example cgroup subsystem that + * exposes debug info + * + * Copyright (C) Google Inc, 2007 + * + * Developed by Paul Menage (menage@google.com) + * + */ + +#include <linux/cgroup.h> +#include <linux/fs.h> +#include <linux/slab.h> +#include <linux/rcupdate.h> + +#include <asm/atomic.h> + +static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss, + struct cgroup *cont) +{ + struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL); + + if (!css) + return ERR_PTR(-ENOMEM); + + return css; +} + +static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont) +{ + kfree(cont->subsys[debug_subsys_id]); +} + +static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft) +{ + return atomic_read(&cont->count); +} + +static u64 taskcount_read(struct cgroup *cont, struct cftype *cft) +{ + u64 count; + + cgroup_lock(); + count = cgroup_task_count(cont); + cgroup_unlock(); + return count; +} + +static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft) +{ + return (u64)(long)current->cgroups; +} + +static u64 current_css_set_refcount_read(struct cgroup *cont, + struct cftype *cft) +{ + u64 count; + + rcu_read_lock(); + count = atomic_read(¤t->cgroups->ref.refcount); + rcu_read_unlock(); + return count; +} + +static struct cftype files[] = { + { + .name = "cgroup_refcount", + .read_uint = cgroup_refcount_read, + }, + { + .name = "taskcount", + .read_uint = taskcount_read, + }, + + { + .name = "current_css_set", + .read_uint = current_css_set_read, + }, + + { + .name = "current_css_set_refcount", + .read_uint = current_css_set_refcount_read, + }, +}; + +static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont) +{ + return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); +} + +struct cgroup_subsys debug_subsys = { + .name = "debug", + .create = debug_create, + .destroy = debug_destroy, + .populate = debug_populate, + .subsys_id = debug_subsys_id, +}; diff --git a/kernel/compat.c b/kernel/compat.c index 3bae3742c2a..42a1ed4b61b 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -40,62 +40,27 @@ int put_compat_timespec(const struct timespec *ts, struct compat_timespec __user __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0; } -static long compat_nanosleep_restart(struct restart_block *restart) -{ - unsigned long expire = restart->arg0, now = jiffies; - struct compat_timespec __user *rmtp; - - /* Did it expire while we handled signals? */ - if (!time_after(expire, now)) - return 0; - - expire = schedule_timeout_interruptible(expire - now); - if (expire == 0) - return 0; - - rmtp = (struct compat_timespec __user *)restart->arg1; - if (rmtp) { - struct compat_timespec ct; - struct timespec t; - - jiffies_to_timespec(expire, &t); - ct.tv_sec = t.tv_sec; - ct.tv_nsec = t.tv_nsec; - if (copy_to_user(rmtp, &ct, sizeof(ct))) - return -EFAULT; - } - /* The 'restart' block is already filled in */ - return -ERESTART_RESTARTBLOCK; -} - asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, - struct compat_timespec __user *rmtp) + struct compat_timespec __user *rmtp) { - struct timespec t; - struct restart_block *restart; - unsigned long expire; + struct timespec tu, rmt; + long ret; - if (get_compat_timespec(&t, rqtp)) + if (get_compat_timespec(&tu, rqtp)) return -EFAULT; - if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0)) + if (!timespec_valid(&tu)) return -EINVAL; - expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec); - expire = schedule_timeout_interruptible(expire); - if (expire == 0) - return 0; + ret = hrtimer_nanosleep(&tu, rmtp ? &rmt : NULL, HRTIMER_MODE_REL, + CLOCK_MONOTONIC); - if (rmtp) { - jiffies_to_timespec(expire, &t); - if (put_compat_timespec(&t, rmtp)) + if (ret && rmtp) { + if (put_compat_timespec(&rmt, rmtp)) return -EFAULT; } - restart = ¤t_thread_info()->restart_block; - restart->fn = compat_nanosleep_restart; - restart->arg0 = jiffies + expire; - restart->arg1 = (unsigned long) rmtp; - return -ERESTART_RESTARTBLOCK; + + return ret; } static inline long get_compat_itimerval(struct itimerval *o, @@ -247,8 +212,8 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource, int ret; mm_segment_t old_fs = get_fs (); - if (resource >= RLIM_NLIMITS) - return -EINVAL; + if (resource >= RLIM_NLIMITS) + return -EINVAL; if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || __get_user(r.rlim_cur, &rlim->rlim_cur) || @@ -477,21 +442,21 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, int get_compat_itimerspec(struct itimerspec *dst, const struct compat_itimerspec __user *src) -{ +{ if (get_compat_timespec(&dst->it_interval, &src->it_interval) || get_compat_timespec(&dst->it_value, &src->it_value)) return -EFAULT; return 0; -} +} int put_compat_itimerspec(struct compat_itimerspec __user *dst, const struct itimerspec *src) -{ +{ if (put_compat_timespec(&src->it_interval, &dst->it_interval) || put_compat_timespec(&src->it_value, &dst->it_value)) return -EFAULT; return 0; -} +} long compat_sys_timer_create(clockid_t which_clock, struct compat_sigevent __user *timer_event_spec, @@ -512,9 +477,9 @@ long compat_sys_timer_create(clockid_t which_clock, } long compat_sys_timer_settime(timer_t timer_id, int flags, - struct compat_itimerspec __user *new, + struct compat_itimerspec __user *new, struct compat_itimerspec __user *old) -{ +{ long err; mm_segment_t oldfs; struct itimerspec newts, oldts; @@ -522,58 +487,58 @@ long compat_sys_timer_settime(timer_t timer_id, int flags, if (!new) return -EINVAL; if (get_compat_itimerspec(&newts, new)) - return -EFAULT; + return -EFAULT; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_timer_settime(timer_id, flags, (struct itimerspec __user *) &newts, (struct itimerspec __user *) &oldts); - set_fs(oldfs); + set_fs(oldfs); if (!err && old && put_compat_itimerspec(old, &oldts)) return -EFAULT; return err; -} +} long compat_sys_timer_gettime(timer_t timer_id, struct compat_itimerspec __user *setting) -{ +{ long err; mm_segment_t oldfs; - struct itimerspec ts; + struct itimerspec ts; oldfs = get_fs(); set_fs(KERNEL_DS); err = sys_timer_gettime(timer_id, - (struct itimerspec __user *) &ts); - set_fs(oldfs); + (struct itimerspec __user *) &ts); + set_fs(oldfs); if (!err && put_compat_itimerspec(setting, &ts)) return -EFAULT; return err; -} +} long compat_sys_clock_settime(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; - struct timespec ts; + struct timespec ts; if (get_compat_timespec(&ts, tp)) - return -EFAULT; + return -EFAULT; oldfs = get_fs(); - set_fs(KERNEL_DS); + set_fs(KERNEL_DS); err = sys_clock_settime(which_clock, (struct timespec __user *) &ts); set_fs(oldfs); return err; -} +} long compat_sys_clock_gettime(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; - struct timespec ts; + struct timespec ts; oldfs = get_fs(); set_fs(KERNEL_DS); @@ -581,16 +546,16 @@ long compat_sys_clock_gettime(clockid_t which_clock, (struct timespec __user *) &ts); set_fs(oldfs); if (!err && put_compat_timespec(&ts, tp)) - return -EFAULT; + return -EFAULT; return err; -} +} long compat_sys_clock_getres(clockid_t which_clock, struct compat_timespec __user *tp) { long err; mm_segment_t oldfs; - struct timespec ts; + struct timespec ts; oldfs = get_fs(); set_fs(KERNEL_DS); @@ -598,9 +563,9 @@ long compat_sys_clock_getres(clockid_t which_clock, (struct timespec __user *) &ts); set_fs(oldfs); if (!err && tp && put_compat_timespec(&ts, tp)) - return -EFAULT; + return -EFAULT; return err; -} +} static long compat_clock_nanosleep_restart(struct restart_block *restart) { @@ -632,10 +597,10 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, { long err; mm_segment_t oldfs; - struct timespec in, out; + struct timespec in, out; struct restart_block *restart; - if (get_compat_timespec(&in, rqtp)) + if (get_compat_timespec(&in, rqtp)) return -EFAULT; oldfs = get_fs(); @@ -654,8 +619,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, restart->fn = compat_clock_nanosleep_restart; restart->arg1 = (unsigned long) rmtp; } - return err; -} + return err; +} /* * We currently only need the following fields from the sigevent diff --git a/kernel/cpu.c b/kernel/cpu.c index 38033db8d8e..6b3a0c15144 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -98,7 +98,8 @@ static inline void check_for_tasks(int cpu) !cputime_eq(p->stime, cputime_zero))) printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ (state = %ld, flags = %x) \n", - p->comm, p->pid, cpu, p->state, p->flags); + p->comm, task_pid_nr(p), cpu, + p->state, p->flags); } write_unlock_irq(&tasklist_lock); } @@ -150,6 +151,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen) err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err == NOTIFY_BAD) { + nr_calls--; __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", @@ -233,6 +235,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); if (ret == NOTIFY_BAD) { + nr_calls--; printk("%s: attempt to bring up CPU %u failed\n", __FUNCTION__, cpu); ret = -EINVAL; @@ -262,6 +265,15 @@ out_notify: int __cpuinit cpu_up(unsigned int cpu) { int err = 0; + if (!cpu_isset(cpu, cpu_possible_map)) { + printk(KERN_ERR "can't online cpu %d because it is not " + "configured as may-hotadd at boot time\n", cpu); +#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390) + printk(KERN_ERR "please check additional_cpus= boot " + "parameter\n"); +#endif + return -EINVAL; + } mutex_lock(&cpu_add_remove_lock); if (cpu_hotplug_disabled) diff --git a/kernel/cpu_acct.c b/kernel/cpu_acct.c new file mode 100644 index 00000000000..731e47e7f16 --- /dev/null +++ b/kernel/cpu_acct.c @@ -0,0 +1,186 @@ +/* + * kernel/cpu_acct.c - CPU accounting cgroup subsystem + * + * Copyright (C) Google Inc, 2006 + * + * Developed by Paul Menage (menage@google.com) and Balbir Singh + * (balbir@in.ibm.com) + * + */ + +/* + * Example cgroup subsystem for reporting total CPU usage of tasks in a + * cgroup, along with percentage load over a time interval + */ + +#include <linux/module.h> +#include <linux/cgroup.h> +#include <linux/fs.h> +#include <linux/rcupdate.h> + +#include <asm/div64.h> + +struct cpuacct { + struct cgroup_subsys_state css; + spinlock_t lock; + /* total time used by this class */ + cputime64_t time; + + /* time when next load calculation occurs */ + u64 next_interval_check; + + /* time used in current period */ + cputime64_t current_interval_time; + + /* time used in last period */ + cputime64_t last_interval_time; +}; + +struct cgroup_subsys cpuacct_subsys; + +static inline struct cpuacct *cgroup_ca(struct cgroup *cont) +{ + return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id), + struct cpuacct, css); +} + +static inline struct cpuacct *task_ca(struct task_struct *task) +{ + return container_of(task_subsys_state(task, cpuacct_subsys_id), + struct cpuacct, css); +} + +#define INTERVAL (HZ * 10) + +static inline u64 next_interval_boundary(u64 now) +{ + /* calculate the next interval boundary beyond the + * current time */ + do_div(now, INTERVAL); + return (now + 1) * INTERVAL; +} + +static struct cgroup_subsys_state *cpuacct_create( + struct cgroup_subsys *ss, struct cgroup *cont) +{ + struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL); + + if (!ca) + return ERR_PTR(-ENOMEM); + spin_lock_init(&ca->lock); + ca->next_interval_check = next_interval_boundary(get_jiffies_64()); + return &ca->css; +} + +static void cpuacct_destroy(struct cgroup_subsys *ss, + struct cgroup *cont) +{ + kfree(cgroup_ca(cont)); +} + +/* Lazily update the load calculation if necessary. Called with ca locked */ +static void cpuusage_update(struct cpuacct *ca) +{ + u64 now = get_jiffies_64(); + + /* If we're not due for an update, return */ + if (ca->next_interval_check > now) + return; + + if (ca->next_interval_check <= (now - INTERVAL)) { + /* If it's been more than an interval since the last + * check, then catch up - the last interval must have + * been zero load */ + ca->last_interval_time = 0; + ca->next_interval_check = next_interval_boundary(now); + } else { + /* If a steal takes the last interval time negative, + * then we just ignore it */ + if ((s64)ca->current_interval_time > 0) + ca->last_interval_time = ca->current_interval_time; + else + ca->last_interval_time = 0; + ca->next_interval_check += INTERVAL; + } + ca->current_interval_time = 0; +} + +static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft) +{ + struct cpuacct *ca = cgroup_ca(cont); + u64 time; + + spin_lock_irq(&ca->lock); + cpuusage_update(ca); + time = cputime64_to_jiffies64(ca->time); + spin_unlock_irq(&ca->lock); + + /* Convert 64-bit jiffies to seconds */ + time *= 1000; + do_div(time, HZ); + return time; +} + +static u64 load_read(struct cgroup *cont, struct cftype *cft) +{ + struct cpuacct *ca = cgroup_ca(cont); + u64 time; + + /* Find the time used in the previous interval */ + spin_lock_irq(&ca->lock); + cpuusage_update(ca); + time = cputime64_to_jiffies64(ca->last_interval_time); + spin_unlock_irq(&ca->lock); + + /* Convert time to a percentage, to give the load in the + * previous period */ + time *= 100; + do_div(time, INTERVAL); + + return time; +} + +static struct cftype files[] = { + { + .name = "usage", + .read_uint = cpuusage_read, + }, + { + .name = "load", + .read_uint = load_read, + } +}; + +static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont) +{ + return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); +} + +void cpuacct_charge(struct task_struct *task, cputime_t cputime) +{ + + struct cpuacct *ca; + unsigned long flags; + + if (!cpuacct_subsys.active) + return; + rcu_read_lock(); + ca = task_ca(task); + if (ca) { + spin_lock_irqsave(&ca->lock, flags); + cpuusage_update(ca); + ca->time = cputime64_add(ca->time, cputime); + ca->current_interval_time = + cputime64_add(ca->current_interval_time, cputime); + spin_unlock_irqrestore(&ca->lock, flags); + } + rcu_read_unlock(); +} + +struct cgroup_subsys cpuacct_subsys = { + .name = "cpuacct", + .create = cpuacct_create, + .destroy = cpuacct_destroy, + .populate = cpuacct_populate, + .subsys_id = cpuacct_subsys_id, +}; diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 2eb2e50db0d..50f5dc46368 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -4,7 +4,8 @@ * Processor and Memory placement constraints for sets of tasks. * * Copyright (C) 2003 BULL SA. - * Copyright (C) 2004-2006 Silicon Graphics, Inc. + * Copyright (C) 2004-2007 Silicon Graphics, Inc. + * Copyright (C) 2006 Google, Inc * * Portions derived from Patrick Mochel's sysfs code. * sysfs is Copyright (c) 2001-3 Patrick Mochel @@ -12,6 +13,7 @@ * 2003-10-10 Written by Simon Derr. * 2003-10-22 Updates by Stephen Hemminger. * 2004 May-July Rework by Paul Jackson. + * 2006 Rework by Paul Menage to use generic cgroups * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux @@ -36,6 +38,7 @@ #include <linux/mount.h> #include <linux/namei.h> #include <linux/pagemap.h> +#include <linux/prio_heap.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/sched.h> @@ -52,8 +55,7 @@ #include <asm/uaccess.h> #include <asm/atomic.h> #include <linux/mutex.h> - -#define CPUSET_SUPER_MAGIC 0x27e0eb +#include <linux/kfifo.h> /* * Tracks how many cpusets are currently defined in system. @@ -62,6 +64,10 @@ */ int number_of_cpusets __read_mostly; +/* Retrieve the cpuset from a cgroup */ +struct cgroup_subsys cpuset_subsys; +struct cpuset; + /* See "Frequency meter" comments, below. */ struct fmeter { @@ -72,24 +78,13 @@ struct fmeter { }; struct cpuset { + struct cgroup_subsys_state css; + unsigned long flags; /* "unsigned long" so bitops work */ cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ - /* - * Count is atomic so can incr (fork) or decr (exit) without a lock. - */ - atomic_t count; /* count tasks using this cpuset */ - - /* - * We link our 'sibling' struct into our parents 'children'. - * Our children link their 'sibling' into our 'children'. - */ - struct list_head sibling; /* my parents children */ - struct list_head children; /* my children */ - struct cpuset *parent; /* my parent */ - struct dentry *dentry; /* cpuset fs entry */ /* * Copy of global cpuset_mems_generation as of the most @@ -98,15 +93,32 @@ struct cpuset { int mems_generation; struct fmeter fmeter; /* memory_pressure filter */ + + /* partition number for rebuild_sched_domains() */ + int pn; }; +/* Retrieve the cpuset for a cgroup */ +static inline struct cpuset *cgroup_cs(struct cgroup *cont) +{ + return container_of(cgroup_subsys_state(cont, cpuset_subsys_id), + struct cpuset, css); +} + +/* Retrieve the cpuset for a task */ +static inline struct cpuset *task_cs(struct task_struct *task) +{ + return container_of(task_subsys_state(task, cpuset_subsys_id), + struct cpuset, css); +} + + /* bits in struct cpuset flags field */ typedef enum { CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, CS_MEMORY_MIGRATE, - CS_REMOVED, - CS_NOTIFY_ON_RELEASE, + CS_SCHED_LOAD_BALANCE, CS_SPREAD_PAGE, CS_SPREAD_SLAB, } cpuset_flagbits_t; @@ -122,14 +134,9 @@ static inline int is_mem_exclusive(const struct cpuset *cs) return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); } -static inline int is_removed(const struct cpuset *cs) +static inline int is_sched_load_balance(const struct cpuset *cs) { - return test_bit(CS_REMOVED, &cs->flags); -} - -static inline int notify_on_release(const struct cpuset *cs) -{ - return test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); + return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); } static inline int is_memory_migrate(const struct cpuset *cs) @@ -172,14 +179,8 @@ static struct cpuset top_cpuset = { .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), .cpus_allowed = CPU_MASK_ALL, .mems_allowed = NODE_MASK_ALL, - .count = ATOMIC_INIT(0), - .sibling = LIST_HEAD_INIT(top_cpuset.sibling), - .children = LIST_HEAD_INIT(top_cpuset.children), }; -static struct vfsmount *cpuset_mount; -static struct super_block *cpuset_sb; - /* * We have two global cpuset mutexes below. They can nest. * It is ok to first take manage_mutex, then nest callback_mutex. We also @@ -263,297 +264,33 @@ static struct super_block *cpuset_sb; * the routine cpuset_update_task_memory_state(). */ -static DEFINE_MUTEX(manage_mutex); static DEFINE_MUTEX(callback_mutex); -/* - * A couple of forward declarations required, due to cyclic reference loop: - * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file - * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir. - */ - -static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode); -static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry); - -static struct backing_dev_info cpuset_backing_dev_info = { - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, -}; - -static struct inode *cpuset_new_inode(mode_t mode) -{ - struct inode *inode = new_inode(cpuset_sb); - - if (inode) { - inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; - inode->i_blocks = 0; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info; - } - return inode; -} - -static void cpuset_diput(struct dentry *dentry, struct inode *inode) -{ - /* is dentry a directory ? if so, kfree() associated cpuset */ - if (S_ISDIR(inode->i_mode)) { - struct cpuset *cs = dentry->d_fsdata; - BUG_ON(!(is_removed(cs))); - kfree(cs); - } - iput(inode); -} - -static struct dentry_operations cpuset_dops = { - .d_iput = cpuset_diput, -}; - -static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name) -{ - struct dentry *d = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(d)) - d->d_op = &cpuset_dops; - return d; -} - -static void remove_dir(struct dentry *d) -{ - struct dentry *parent = dget(d->d_parent); - - d_delete(d); - simple_rmdir(parent->d_inode, d); - dput(parent); -} - -/* - * NOTE : the dentry must have been dget()'ed - */ -static void cpuset_d_remove_dir(struct dentry *dentry) -{ - struct list_head *node; - - spin_lock(&dcache_lock); - node = dentry->d_subdirs.next; - while (node != &dentry->d_subdirs) { - struct dentry *d = list_entry(node, struct dentry, d_u.d_child); - list_del_init(node); - if (d->d_inode) { - d = dget_locked(d); - spin_unlock(&dcache_lock); - d_delete(d); - simple_unlink(dentry->d_inode, d); - dput(d); - spin_lock(&dcache_lock); - } - node = dentry->d_subdirs.next; - } - list_del_init(&dentry->d_u.d_child); - spin_unlock(&dcache_lock); - remove_dir(dentry); -} - -static struct super_operations cpuset_ops = { - .statfs = simple_statfs, - .drop_inode = generic_delete_inode, -}; - -static int cpuset_fill_super(struct super_block *sb, void *unused_data, - int unused_silent) -{ - struct inode *inode; - struct dentry *root; - - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = CPUSET_SUPER_MAGIC; - sb->s_op = &cpuset_ops; - cpuset_sb = sb; - - inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR); - if (inode) { - inode->i_op = &simple_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - /* directories start off with i_nlink == 2 (for "." entry) */ - inc_nlink(inode); - } else { - return -ENOMEM; - } - - root = d_alloc_root(inode); - if (!root) { - iput(inode); - return -ENOMEM; - } - sb->s_root = root; - return 0; -} - +/* This is ugly, but preserves the userspace API for existing cpuset + * users. If someone tries to mount the "cpuset" filesystem, we + * silently switch it to mount "cgroup" instead */ static int cpuset_get_sb(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data, struct vfsmount *mnt) { - return get_sb_single(fs_type, flags, data, cpuset_fill_super, mnt); + struct file_system_type *cgroup_fs = get_fs_type("cgroup"); + int ret = -ENODEV; + if (cgroup_fs) { + char mountopts[] = + "cpuset,noprefix," + "release_agent=/sbin/cpuset_release_agent"; + ret = cgroup_fs->get_sb(cgroup_fs, flags, + unused_dev_name, mountopts, mnt); + put_filesystem(cgroup_fs); + } + return ret; } static struct file_system_type cpuset_fs_type = { .name = "cpuset", .get_sb = cpuset_get_sb, - .kill_sb = kill_litter_super, }; -/* struct cftype: - * - * The files in the cpuset filesystem mostly have a very simple read/write - * handling, some common function will take care of it. Nevertheless some cases - * (read tasks) are special and therefore I define this structure for every - * kind of file. - * - * - * When reading/writing to a file: - * - the cpuset to use in file->f_path.dentry->d_parent->d_fsdata - * - the 'cftype' of the file is file->f_path.dentry->d_fsdata - */ - -struct cftype { - char *name; - int private; - int (*open) (struct inode *inode, struct file *file); - ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos); - int (*write) (struct file *file, const char __user *buf, size_t nbytes, - loff_t *ppos); - int (*release) (struct inode *inode, struct file *file); -}; - -static inline struct cpuset *__d_cs(struct dentry *dentry) -{ - return dentry->d_fsdata; -} - -static inline struct cftype *__d_cft(struct dentry *dentry) -{ - return dentry->d_fsdata; -} - -/* - * Call with manage_mutex held. Writes path of cpuset into buf. - * Returns 0 on success, -errno on error. - */ - -static int cpuset_path(const struct cpuset *cs, char *buf, int buflen) -{ - char *start; - - start = buf + buflen; - - *--start = '\0'; - for (;;) { - int len = cs->dentry->d_name.len; - if ((start -= len) < buf) - return -ENAMETOOLONG; - memcpy(start, cs->dentry->d_name.name, len); - cs = cs->parent; - if (!cs) - break; - if (!cs->parent) - continue; - if (--start < buf) - return -ENAMETOOLONG; - *start = '/'; - } - memmove(buf, start, buf + buflen - start); - return 0; -} - -/* - * Notify userspace when a cpuset is released, by running - * /sbin/cpuset_release_agent with the name of the cpuset (path - * relative to the root of cpuset file system) as the argument. - * - * Most likely, this user command will try to rmdir this cpuset. - * - * This races with the possibility that some other task will be - * attached to this cpuset before it is removed, or that some other - * user task will 'mkdir' a child cpuset of this cpuset. That's ok. - * The presumed 'rmdir' will fail quietly if this cpuset is no longer - * unused, and this cpuset will be reprieved from its death sentence, - * to continue to serve a useful existence. Next time it's released, - * we will get notified again, if it still has 'notify_on_release' set. - * - * The final arg to call_usermodehelper() is 0, which means don't - * wait. The separate /sbin/cpuset_release_agent task is forked by - * call_usermodehelper(), then control in this thread returns here, - * without waiting for the release agent task. We don't bother to - * wait because the caller of this routine has no use for the exit - * status of the /sbin/cpuset_release_agent task, so no sense holding - * our caller up for that. - * - * When we had only one cpuset mutex, we had to call this - * without holding it, to avoid deadlock when call_usermodehelper() - * allocated memory. With two locks, we could now call this while - * holding manage_mutex, but we still don't, so as to minimize - * the time manage_mutex is held. - */ - -static void cpuset_release_agent(const char *pathbuf) -{ - char *argv[3], *envp[3]; - int i; - - if (!pathbuf) - return; - - i = 0; - argv[i++] = "/sbin/cpuset_release_agent"; - argv[i++] = (char *)pathbuf; - argv[i] = NULL; - - i = 0; - /* minimal command environment */ - envp[i++] = "HOME=/"; - envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; - envp[i] = NULL; - - call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); - kfree(pathbuf); -} - -/* - * Either cs->count of using tasks transitioned to zero, or the - * cs->children list of child cpusets just became empty. If this - * cs is notify_on_release() and now both the user count is zero and - * the list of children is empty, prepare cpuset path in a kmalloc'd - * buffer, to be returned via ppathbuf, so that the caller can invoke - * cpuset_release_agent() with it later on, once manage_mutex is dropped. - * Call here with manage_mutex held. - * - * This check_for_release() routine is responsible for kmalloc'ing - * pathbuf. The above cpuset_release_agent() is responsible for - * kfree'ing pathbuf. The caller of these routines is responsible - * for providing a pathbuf pointer, initialized to NULL, then - * calling check_for_release() with manage_mutex held and the address - * of the pathbuf pointer, then dropping manage_mutex, then calling - * cpuset_release_agent() with pathbuf, as set by check_for_release(). - */ - -static void check_for_release(struct cpuset *cs, char **ppathbuf) -{ - if (notify_on_release(cs) && atomic_read(&cs->count) == 0 && - list_empty(&cs->children)) { - char *buf; - - buf = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!buf) - return; - if (cpuset_path(cs, buf, PAGE_SIZE) < 0) - kfree(buf); - else - *ppathbuf = buf; - } -} - /* * Return in *pmask the portion of a cpusets's cpus_allowed that * are online. If none are online, walk up the cpuset hierarchy @@ -653,20 +390,19 @@ void cpuset_update_task_memory_state(void) struct task_struct *tsk = current; struct cpuset *cs; - if (tsk->cpuset == &top_cpuset) { + if (task_cs(tsk) == &top_cpuset) { /* Don't need rcu for top_cpuset. It's never freed. */ my_cpusets_mem_gen = top_cpuset.mems_generation; } else { rcu_read_lock(); - cs = rcu_dereference(tsk->cpuset); - my_cpusets_mem_gen = cs->mems_generation; + my_cpusets_mem_gen = task_cs(current)->mems_generation; rcu_read_unlock(); } if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { mutex_lock(&callback_mutex); task_lock(tsk); - cs = tsk->cpuset; /* Maybe changed when task not locked */ + cs = task_cs(tsk); /* Maybe changed when task not locked */ guarantee_online_mems(cs, &tsk->mems_allowed); tsk->cpuset_mems_generation = cs->mems_generation; if (is_spread_page(cs)) @@ -721,11 +457,12 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) static int validate_change(const struct cpuset *cur, const struct cpuset *trial) { + struct cgroup *cont; struct cpuset *c, *par; /* Each of our child cpusets must be a subset of us */ - list_for_each_entry(c, &cur->children, sibling) { - if (!is_cpuset_subset(c, trial)) + list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { + if (!is_cpuset_subset(cgroup_cs(cont), trial)) return -EBUSY; } @@ -740,7 +477,8 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) return -EACCES; /* If either I or some sibling (!= me) is exclusive, we can't overlap */ - list_for_each_entry(c, &par->children, sibling) { + list_for_each_entry(cont, &par->css.cgroup->children, sibling) { + c = cgroup_cs(cont); if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && c != cur && cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) @@ -751,17 +489,265 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) return -EINVAL; } + /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */ + if (cgroup_task_count(cur->css.cgroup)) { + if (cpus_empty(trial->cpus_allowed) || + nodes_empty(trial->mems_allowed)) { + return -ENOSPC; + } + } + return 0; } /* + * Helper routine for rebuild_sched_domains(). + * Do cpusets a, b have overlapping cpus_allowed masks? + */ + +static int cpusets_overlap(struct cpuset *a, struct cpuset *b) +{ + return cpus_intersects(a->cpus_allowed, b->cpus_allowed); +} + +/* + * rebuild_sched_domains() + * + * If the flag 'sched_load_balance' of any cpuset with non-empty + * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset + * which has that flag enabled, or if any cpuset with a non-empty + * 'cpus' is removed, then call this routine to rebuild the + * scheduler's dynamic sched domains. + * + * This routine builds a partial partition of the systems CPUs + * (the set of non-overlappping cpumask_t's in the array 'part' + * below), and passes that partial partition to the kernel/sched.c + * partition_sched_domains() routine, which will rebuild the + * schedulers load balancing domains (sched domains) as specified + * by that partial partition. A 'partial partition' is a set of + * non-overlapping subsets whose union is a subset of that set. + * + * See "What is sched_load_balance" in Documentation/cpusets.txt + * for a background explanation of this. + * + * Does not return errors, on the theory that the callers of this + * routine would rather not worry about failures to rebuild sched + * domains when operating in the severe memory shortage situations + * that could cause allocation failures below. + * + * Call with cgroup_mutex held. May take callback_mutex during + * call due to the kfifo_alloc() and kmalloc() calls. May nest + * a call to the lock_cpu_hotplug()/unlock_cpu_hotplug() pair. + * Must not be called holding callback_mutex, because we must not + * call lock_cpu_hotplug() while holding callback_mutex. Elsewhere + * the kernel nests callback_mutex inside lock_cpu_hotplug() calls. + * So the reverse nesting would risk an ABBA deadlock. + * + * The three key local variables below are: + * q - a kfifo queue of cpuset pointers, used to implement a + * top-down scan of all cpusets. This scan loads a pointer + * to each cpuset marked is_sched_load_balance into the + * array 'csa'. For our purposes, rebuilding the schedulers + * sched domains, we can ignore !is_sched_load_balance cpusets. + * csa - (for CpuSet Array) Array of pointers to all the cpusets + * that need to be load balanced, for convenient iterative + * access by the subsequent code that finds the best partition, + * i.e the set of domains (subsets) of CPUs such that the + * cpus_allowed of every cpuset marked is_sched_load_balance + * is a subset of one of these domains, while there are as + * many such domains as possible, each as small as possible. + * doms - Conversion of 'csa' to an array of cpumasks, for passing to + * the kernel/sched.c routine partition_sched_domains() in a + * convenient format, that can be easily compared to the prior + * value to determine what partition elements (sched domains) + * were changed (added or removed.) + * + * Finding the best partition (set of domains): + * The triple nested loops below over i, j, k scan over the + * load balanced cpusets (using the array of cpuset pointers in + * csa[]) looking for pairs of cpusets that have overlapping + * cpus_allowed, but which don't have the same 'pn' partition + * number and gives them in the same partition number. It keeps + * looping on the 'restart' label until it can no longer find + * any such pairs. + * + * The union of the cpus_allowed masks from the set of + * all cpusets having the same 'pn' value then form the one + * element of the partition (one sched domain) to be passed to + * partition_sched_domains(). + */ + +static void rebuild_sched_domains(void) +{ + struct kfifo *q; /* queue of cpusets to be scanned */ + struct cpuset *cp; /* scans q */ + struct cpuset **csa; /* array of all cpuset ptrs */ + int csn; /* how many cpuset ptrs in csa so far */ + int i, j, k; /* indices for partition finding loops */ + cpumask_t *doms; /* resulting partition; i.e. sched domains */ + int ndoms; /* number of sched domains in result */ + int nslot; /* next empty doms[] cpumask_t slot */ + + q = NULL; + csa = NULL; + doms = NULL; + + /* Special case for the 99% of systems with one, full, sched domain */ + if (is_sched_load_balance(&top_cpuset)) { + ndoms = 1; + doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); + if (!doms) + goto rebuild; + *doms = top_cpuset.cpus_allowed; + goto rebuild; + } + + q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL); + if (IS_ERR(q)) + goto done; + csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); + if (!csa) + goto done; + csn = 0; + + cp = &top_cpuset; + __kfifo_put(q, (void *)&cp, sizeof(cp)); + while (__kfifo_get(q, (void *)&cp, sizeof(cp))) { + struct cgroup *cont; + struct cpuset *child; /* scans child cpusets of cp */ + if (is_sched_load_balance(cp)) + csa[csn++] = cp; + list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { + child = cgroup_cs(cont); + __kfifo_put(q, (void *)&child, sizeof(cp)); + } + } + + for (i = 0; i < csn; i++) + csa[i]->pn = i; + ndoms = csn; + +restart: + /* Find the best partition (set of sched domains) */ + for (i = 0; i < csn; i++) { + struct cpuset *a = csa[i]; + int apn = a->pn; + + for (j = 0; j < csn; j++) { + struct cpuset *b = csa[j]; + int bpn = b->pn; + + if (apn != bpn && cpusets_overlap(a, b)) { + for (k = 0; k < csn; k++) { + struct cpuset *c = csa[k]; + + if (c->pn == bpn) + c->pn = apn; + } + ndoms--; /* one less element */ + goto restart; + } + } + } + + /* Convert <csn, csa> to <ndoms, doms> */ + doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); + if (!doms) + goto rebuild; + + for (nslot = 0, i = 0; i < csn; i++) { + struct cpuset *a = csa[i]; + int apn = a->pn; + + if (apn >= 0) { + cpumask_t *dp = doms + nslot; + + if (nslot == ndoms) { + static int warnings = 10; + if (warnings) { + printk(KERN_WARNING + "rebuild_sched_domains confused:" + " nslot %d, ndoms %d, csn %d, i %d," + " apn %d\n", + nslot, ndoms, csn, i, apn); + warnings--; + } + continue; + } + + cpus_clear(*dp); + for (j = i; j < csn; j++) { + struct cpuset *b = csa[j]; + + if (apn == b->pn) { + cpus_or(*dp, *dp, b->cpus_allowed); + b->pn = -1; + } + } + nslot++; + } + } + BUG_ON(nslot != ndoms); + +rebuild: + /* Have scheduler rebuild sched domains */ + lock_cpu_hotplug(); + partition_sched_domains(ndoms, doms); + unlock_cpu_hotplug(); + +done: + if (q && !IS_ERR(q)) + kfifo_free(q); + kfree(csa); + /* Don't kfree(doms) -- partition_sched_domains() does that. */ +} + +static inline int started_after_time(struct task_struct *t1, + struct timespec *time, + struct task_struct *t2) +{ + int start_diff = timespec_compare(&t1->start_time, time); + if (start_diff > 0) { + return 1; + } else if (start_diff < 0) { + return 0; + } else { + /* + * Arbitrarily, if two processes started at the same + * time, we'll say that the lower pointer value + * started first. Note that t2 may have exited by now + * so this may not be a valid pointer any longer, but + * that's fine - it still serves to distinguish + * between two tasks started (effectively) + * simultaneously. + */ + return t1 > t2; + } +} + +static inline int started_after(void *p1, void *p2) +{ + struct task_struct *t1 = p1; + struct task_struct *t2 = p2; + return started_after_time(t1, &t2->start_time, t2); +} + +/* * Call with manage_mutex held. May take callback_mutex during call. */ static int update_cpumask(struct cpuset *cs, char *buf) { struct cpuset trialcs; - int retval; + int retval, i; + int is_load_balanced; + struct cgroup_iter it; + struct cgroup *cgrp = cs->css.cgroup; + struct task_struct *p, *dropped; + /* Never dereference latest_task, since it's not refcounted */ + struct task_struct *latest_task = NULL; + struct ptr_heap heap; + struct timespec latest_time = { 0, 0 }; /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ if (cs == &top_cpuset) @@ -770,11 +756,13 @@ static int update_cpumask(struct cpuset *cs, char *buf) trialcs = *cs; /* - * We allow a cpuset's cpus_allowed to be empty; if it has attached - * tasks, we'll catch it later when we validate the change and return - * -ENOSPC. + * An empty cpus_allowed is ok iff there are no tasks in the cpuset. + * Since cpulist_parse() fails on an empty mask, we special case + * that parsing. The validate_change() call ensures that cpusets + * with tasks have cpus. */ - if (!buf[0] || (buf[0] == '\n' && !buf[1])) { + buf = strstrip(buf); + if (!*buf) { cpus_clear(trialcs.cpus_allowed); } else { retval = cpulist_parse(buf, trialcs.cpus_allowed); @@ -782,15 +770,79 @@ static int update_cpumask(struct cpuset *cs, char *buf) return retval; } cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map); - /* cpus_allowed cannot be empty for a cpuset with attached tasks. */ - if (atomic_read(&cs->count) && cpus_empty(trialcs.cpus_allowed)) - return -ENOSPC; retval = validate_change(cs, &trialcs); if (retval < 0) return retval; + + /* Nothing to do if the cpus didn't change */ + if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) + return 0; + retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after); + if (retval) + return retval; + + is_load_balanced = is_sched_load_balance(&trialcs); + mutex_lock(&callback_mutex); cs->cpus_allowed = trialcs.cpus_allowed; mutex_unlock(&callback_mutex); + + again: + /* + * Scan tasks in the cpuset, and update the cpumasks of any + * that need an update. Since we can't call set_cpus_allowed() + * while holding tasklist_lock, gather tasks to be processed + * in a heap structure. If the statically-sized heap fills up, + * overflow tasks that started later, and in future iterations + * only consider tasks that started after the latest task in + * the previous pass. This guarantees forward progress and + * that we don't miss any tasks + */ + heap.size = 0; + cgroup_iter_start(cgrp, &it); + while ((p = cgroup_iter_next(cgrp, &it))) { + /* Only affect tasks that don't have the right cpus_allowed */ + if (cpus_equal(p->cpus_allowed, cs->cpus_allowed)) + continue; + /* + * Only process tasks that started after the last task + * we processed + */ + if (!started_after_time(p, &latest_time, latest_task)) + continue; + dropped = heap_insert(&heap, p); + if (dropped == NULL) { + get_task_struct(p); + } else if (dropped != p) { + get_task_struct(p); + put_task_struct(dropped); + } + } + cgroup_iter_end(cgrp, &it); + if (heap.size) { + for (i = 0; i < heap.size; i++) { + struct task_struct *p = heap.ptrs[i]; + if (i == 0) { + latest_time = p->start_time; + latest_task = p; + } + set_cpus_allowed(p, cs->cpus_allowed); + put_task_struct(p); + } + /* + * If we had to process any tasks at all, scan again + * in case some of them were in the middle of forking + * children that didn't notice the new cpumask + * restriction. Not the most efficient way to do it, + * but it avoids having to take callback_mutex in the + * fork path + */ + goto again; + } + heap_free(&heap); + if (is_load_balanced) + rebuild_sched_domains(); + return 0; } @@ -839,7 +891,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); mutex_lock(&callback_mutex); - guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed); + guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed); mutex_unlock(&callback_mutex); } @@ -857,16 +909,19 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, * their mempolicies to the cpusets new mems_allowed. */ +static void *cpuset_being_rebound; + static int update_nodemask(struct cpuset *cs, char *buf) { struct cpuset trialcs; nodemask_t oldmem; - struct task_struct *g, *p; + struct task_struct *p; struct mm_struct **mmarray; int i, n, ntasks; int migrate; int fudge; int retval; + struct cgroup_iter it; /* * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; @@ -878,29 +933,19 @@ static int update_nodemask(struct cpuset *cs, char *buf) trialcs = *cs; /* - * We allow a cpuset's mems_allowed to be empty; if it has attached - * tasks, we'll catch it later when we validate the change and return - * -ENOSPC. + * An empty mems_allowed is ok iff there are no tasks in the cpuset. + * Since nodelist_parse() fails on an empty mask, we special case + * that parsing. The validate_change() call ensures that cpusets + * with tasks have memory. */ - if (!buf[0] || (buf[0] == '\n' && !buf[1])) { + buf = strstrip(buf); + if (!*buf) { nodes_clear(trialcs.mems_allowed); } else { retval = nodelist_parse(buf, trialcs.mems_allowed); if (retval < 0) goto done; - if (!nodes_intersects(trialcs.mems_allowed, - node_states[N_HIGH_MEMORY])) { - /* - * error if only memoryless nodes specified. - */ - retval = -ENOSPC; - goto done; - } } - /* - * Exclude memoryless nodes. We know that trialcs.mems_allowed - * contains at least one node with memory. - */ nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_states[N_HIGH_MEMORY]); oldmem = cs->mems_allowed; @@ -908,11 +953,6 @@ static int update_nodemask(struct cpuset *cs, char *buf) retval = 0; /* Too easy - nothing to do */ goto done; } - /* mems_allowed cannot be empty for a cpuset with attached tasks. */ - if (atomic_read(&cs->count) && nodes_empty(trialcs.mems_allowed)) { - retval = -ENOSPC; - goto done; - } retval = validate_change(cs, &trialcs); if (retval < 0) goto done; @@ -922,7 +962,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) cs->mems_generation = cpuset_mems_generation++; mutex_unlock(&callback_mutex); - set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ + cpuset_being_rebound = cs; /* causes mpol_copy() rebind */ fudge = 10; /* spare mmarray[] slots */ fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ @@ -936,13 +976,13 @@ static int update_nodemask(struct cpuset *cs, char *buf) * enough mmarray[] w/o using GFP_ATOMIC. */ while (1) { - ntasks = atomic_read(&cs->count); /* guess */ + ntasks = cgroup_task_count(cs->css.cgroup); /* guess */ ntasks += fudge; mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); if (!mmarray) goto done; read_lock(&tasklist_lock); /* block fork */ - if (atomic_read(&cs->count) <= ntasks) + if (cgroup_task_count(cs->css.cgroup) <= ntasks) break; /* got enough */ read_unlock(&tasklist_lock); /* try again */ kfree(mmarray); @@ -951,21 +991,21 @@ static int update_nodemask(struct cpuset *cs, char *buf) n = 0; /* Load up mmarray[] with mm reference for each task in cpuset. */ - do_each_thread(g, p) { + cgroup_iter_start(cs->css.cgroup, &it); + while ((p = cgroup_iter_next(cs->css.cgroup, &it))) { struct mm_struct *mm; if (n >= ntasks) { printk(KERN_WARNING "Cpuset mempolicy rebind incomplete.\n"); - continue; + break; } - if (p->cpuset != cs) - continue; mm = get_task_mm(p); if (!mm) continue; mmarray[n++] = mm; - } while_each_thread(g, p); + } + cgroup_iter_end(cs->css.cgroup, &it); read_unlock(&tasklist_lock); /* @@ -993,12 +1033,17 @@ static int update_nodemask(struct cpuset *cs, char *buf) /* We're done rebinding vma's to this cpusets new mems_allowed. */ kfree(mmarray); - set_cpuset_being_rebound(NULL); + cpuset_being_rebound = NULL; retval = 0; done: return retval; } +int current_cpuset_is_being_rebound(void) +{ + return task_cs(current) == cpuset_being_rebound; +} + /* * Call with manage_mutex held. */ @@ -1015,6 +1060,7 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) /* * update_flag - read a 0 or a 1 in a file and update associated flag * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, + * CS_SCHED_LOAD_BALANCE, * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE, * CS_SPREAD_PAGE, CS_SPREAD_SLAB) * cs: the cpuset to update @@ -1028,6 +1074,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) int turning_on; struct cpuset trialcs; int err; + int cpus_nonempty, balance_flag_changed; turning_on = (simple_strtoul(buf, NULL, 10) != 0); @@ -1040,10 +1087,18 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) err = validate_change(cs, &trialcs); if (err < 0) return err; + + cpus_nonempty = !cpus_empty(trialcs.cpus_allowed); + balance_flag_changed = (is_sched_load_balance(cs) != + is_sched_load_balance(&trialcs)); + mutex_lock(&callback_mutex); cs->flags = trialcs.flags; mutex_unlock(&callback_mutex); + if (cpus_nonempty && balance_flag_changed) + rebuild_sched_domains(); + return 0; } @@ -1145,85 +1200,34 @@ static int fmeter_getrate(struct fmeter *fmp) return val; } -/* - * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly - * writing the path of the old cpuset in 'ppathbuf' if it needs to be - * notified on release. - * - * Call holding manage_mutex. May take callback_mutex and task_lock of - * the task 'pid' during call. - */ - -static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) +static int cpuset_can_attach(struct cgroup_subsys *ss, + struct cgroup *cont, struct task_struct *tsk) { - pid_t pid; - struct task_struct *tsk; - struct cpuset *oldcs; - cpumask_t cpus; - nodemask_t from, to; - struct mm_struct *mm; - int retval; + struct cpuset *cs = cgroup_cs(cont); - if (sscanf(pidbuf, "%d", &pid) != 1) - return -EIO; if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) return -ENOSPC; - if (pid) { - read_lock(&tasklist_lock); - - tsk = find_task_by_pid(pid); - if (!tsk || tsk->flags & PF_EXITING) { - read_unlock(&tasklist_lock); - return -ESRCH; - } - - get_task_struct(tsk); - read_unlock(&tasklist_lock); - - if ((current->euid) && (current->euid != tsk->uid) - && (current->euid != tsk->suid)) { - put_task_struct(tsk); - return -EACCES; - } - } else { - tsk = current; - get_task_struct(tsk); - } + return security_task_setscheduler(tsk, 0, NULL); +} - retval = security_task_setscheduler(tsk, 0, NULL); - if (retval) { - put_task_struct(tsk); - return retval; - } +static void cpuset_attach(struct cgroup_subsys *ss, + struct cgroup *cont, struct cgroup *oldcont, + struct task_struct *tsk) +{ + cpumask_t cpus; + nodemask_t from, to; + struct mm_struct *mm; + struct cpuset *cs = cgroup_cs(cont); + struct cpuset *oldcs = cgroup_cs(oldcont); mutex_lock(&callback_mutex); - - task_lock(tsk); - oldcs = tsk->cpuset; - /* - * After getting 'oldcs' cpuset ptr, be sure still not exiting. - * If 'oldcs' might be the top_cpuset due to the_top_cpuset_hack - * then fail this attach_task(), to avoid breaking top_cpuset.count. - */ - if (tsk->flags & PF_EXITING) { - task_unlock(tsk); - mutex_unlock(&callback_mutex); - put_task_struct(tsk); - return -ESRCH; - } - atomic_inc(&cs->count); - rcu_assign_pointer(tsk->cpuset, cs); - task_unlock(tsk); - guarantee_online_cpus(cs, &cpus); set_cpus_allowed(tsk, cpus); + mutex_unlock(&callback_mutex); from = oldcs->mems_allowed; to = cs->mems_allowed; - - mutex_unlock(&callback_mutex); - mm = get_task_mm(tsk); if (mm) { mpol_rebind_mm(mm, &to); @@ -1232,44 +1236,36 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) mmput(mm); } - put_task_struct(tsk); - synchronize_rcu(); - if (atomic_dec_and_test(&oldcs->count)) - check_for_release(oldcs, ppathbuf); - return 0; } /* The various types of files and directories in a cpuset file system */ typedef enum { - FILE_ROOT, - FILE_DIR, FILE_MEMORY_MIGRATE, FILE_CPULIST, FILE_MEMLIST, FILE_CPU_EXCLUSIVE, FILE_MEM_EXCLUSIVE, - FILE_NOTIFY_ON_RELEASE, + FILE_SCHED_LOAD_BALANCE, FILE_MEMORY_PRESSURE_ENABLED, FILE_MEMORY_PRESSURE, FILE_SPREAD_PAGE, FILE_SPREAD_SLAB, - FILE_TASKLIST, } cpuset_filetype_t; -static ssize_t cpuset_common_file_write(struct file *file, +static ssize_t cpuset_common_file_write(struct cgroup *cont, + struct cftype *cft, + struct file *file, const char __user *userbuf, size_t nbytes, loff_t *unused_ppos) { - struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); - struct cftype *cft = __d_cft(file->f_path.dentry); + struct cpuset *cs = cgroup_cs(cont); cpuset_filetype_t type = cft->private; char *buffer; - char *pathbuf = NULL; int retval = 0; /* Crude upper limit on largest legitimate cpulist user might write. */ - if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES)) + if (nbytes > 100U + 6 * max(NR_CPUS, MAX_NUMNODES)) return -E2BIG; /* +1 for nul-terminator */ @@ -1282,9 +1278,9 @@ static ssize_t cpuset_common_file_write(struct file *file, } buffer[nbytes] = 0; /* nul-terminate */ - mutex_lock(&manage_mutex); + cgroup_lock(); - if (is_removed(cs)) { + if (cgroup_is_removed(cont)) { retval = -ENODEV; goto out2; } @@ -1302,8 +1298,8 @@ static ssize_t cpuset_common_file_write(struct file *file, case FILE_MEM_EXCLUSIVE: retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); break; - case FILE_NOTIFY_ON_RELEASE: - retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer); + case FILE_SCHED_LOAD_BALANCE: + retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer); break; case FILE_MEMORY_MIGRATE: retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); @@ -1322,9 +1318,6 @@ static ssize_t cpuset_common_file_write(struct file *file, retval = update_flag(CS_SPREAD_SLAB, cs, buffer); cs->mems_generation = cpuset_mems_generation++; break; - case FILE_TASKLIST: - retval = attach_task(cs, buffer, &pathbuf); - break; default: retval = -EINVAL; goto out2; @@ -1333,30 +1326,12 @@ static ssize_t cpuset_common_file_write(struct file *file, if (retval == 0) retval = nbytes; out2: - mutex_unlock(&manage_mutex); - cpuset_release_agent(pathbuf); + cgroup_unlock(); out1: kfree(buffer); return retval; } -static ssize_t cpuset_file_write(struct file *file, const char __user *buf, - size_t nbytes, loff_t *ppos) -{ - ssize_t retval = 0; - struct cftype *cft = __d_cft(file->f_path.dentry); - if (!cft) - return -ENODEV; - - /* special function ? */ - if (cft->write) - retval = cft->write(file, buf, nbytes, ppos); - else - retval = cpuset_common_file_write(file, buf, nbytes, ppos); - - return retval; -} - /* * These ascii lists should be read in a single call, by using a user * buffer large enough to hold the entire map. If read in smaller @@ -1391,11 +1366,13 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) return nodelist_scnprintf(page, PAGE_SIZE, mask); } -static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) +static ssize_t cpuset_common_file_read(struct cgroup *cont, + struct cftype *cft, + struct file *file, + char __user *buf, + size_t nbytes, loff_t *ppos) { - struct cftype *cft = __d_cft(file->f_path.dentry); - struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); + struct cpuset *cs = cgroup_cs(cont); cpuset_filetype_t type = cft->private; char *page; ssize_t retval = 0; @@ -1419,8 +1396,8 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, case FILE_MEM_EXCLUSIVE: *s++ = is_mem_exclusive(cs) ? '1' : '0'; break; - case FILE_NOTIFY_ON_RELEASE: - *s++ = notify_on_release(cs) ? '1' : '0'; + case FILE_SCHED_LOAD_BALANCE: + *s++ = is_sched_load_balance(cs) ? '1' : '0'; break; case FILE_MEMORY_MIGRATE: *s++ = is_memory_migrate(cs) ? '1' : '0'; @@ -1449,390 +1426,150 @@ out: return retval; } -static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) -{ - ssize_t retval = 0; - struct cftype *cft = __d_cft(file->f_path.dentry); - if (!cft) - return -ENODEV; - - /* special function ? */ - if (cft->read) - retval = cft->read(file, buf, nbytes, ppos); - else - retval = cpuset_common_file_read(file, buf, nbytes, ppos); - - return retval; -} -static int cpuset_file_open(struct inode *inode, struct file *file) -{ - int err; - struct cftype *cft; - err = generic_file_open(inode, file); - if (err) - return err; - cft = __d_cft(file->f_path.dentry); - if (!cft) - return -ENODEV; - if (cft->open) - err = cft->open(inode, file); - else - err = 0; - - return err; -} - -static int cpuset_file_release(struct inode *inode, struct file *file) -{ - struct cftype *cft = __d_cft(file->f_path.dentry); - if (cft->release) - return cft->release(inode, file); - return 0; -} - -/* - * cpuset_rename - Only allow simple rename of directories in place. - */ -static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry) -{ - if (!S_ISDIR(old_dentry->d_inode->i_mode)) - return -ENOTDIR; - if (new_dentry->d_inode) - return -EEXIST; - if (old_dir != new_dir) - return -EIO; - return simple_rename(old_dir, old_dentry, new_dir, new_dentry); -} - -static const struct file_operations cpuset_file_operations = { - .read = cpuset_file_read, - .write = cpuset_file_write, - .llseek = generic_file_llseek, - .open = cpuset_file_open, - .release = cpuset_file_release, -}; - -static const struct inode_operations cpuset_dir_inode_operations = { - .lookup = simple_lookup, - .mkdir = cpuset_mkdir, - .rmdir = cpuset_rmdir, - .rename = cpuset_rename, -}; - -static int cpuset_create_file(struct dentry *dentry, int mode) -{ - struct inode *inode; - - if (!dentry) - return -ENOENT; - if (dentry->d_inode) - return -EEXIST; - - inode = cpuset_new_inode(mode); - if (!inode) - return -ENOMEM; - - if (S_ISDIR(mode)) { - inode->i_op = &cpuset_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - - /* start off with i_nlink == 2 (for "." entry) */ - inc_nlink(inode); - } else if (S_ISREG(mode)) { - inode->i_size = 0; - inode->i_fop = &cpuset_file_operations; - } - - d_instantiate(dentry, inode); - dget(dentry); /* Extra count - pin the dentry in core */ - return 0; -} - -/* - * cpuset_create_dir - create a directory for an object. - * cs: the cpuset we create the directory for. - * It must have a valid ->parent field - * And we are going to fill its ->dentry field. - * name: The name to give to the cpuset directory. Will be copied. - * mode: mode to set on new directory. - */ - -static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode) -{ - struct dentry *dentry = NULL; - struct dentry *parent; - int error = 0; - - parent = cs->parent->dentry; - dentry = cpuset_get_dentry(parent, name); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - error = cpuset_create_file(dentry, S_IFDIR | mode); - if (!error) { - dentry->d_fsdata = cs; - inc_nlink(parent->d_inode); - cs->dentry = dentry; - } - dput(dentry); - - return error; -} - -static int cpuset_add_file(struct dentry *dir, const struct cftype *cft) -{ - struct dentry *dentry; - int error; - - mutex_lock(&dir->d_inode->i_mutex); - dentry = cpuset_get_dentry(dir, cft->name); - if (!IS_ERR(dentry)) { - error = cpuset_create_file(dentry, 0644 | S_IFREG); - if (!error) - dentry->d_fsdata = (void *)cft; - dput(dentry); - } else - error = PTR_ERR(dentry); - mutex_unlock(&dir->d_inode->i_mutex); - return error; -} - -/* - * Stuff for reading the 'tasks' file. - * - * Reading this file can return large amounts of data if a cpuset has - * *lots* of attached tasks. So it may need several calls to read(), - * but we cannot guarantee that the information we produce is correct - * unless we produce it entirely atomically. - * - * Upon tasks file open(), a struct ctr_struct is allocated, that - * will have a pointer to an array (also allocated here). The struct - * ctr_struct * is stored in file->private_data. Its resources will - * be freed by release() when the file is closed. The array is used - * to sprintf the PIDs and then used by read(). - */ - -/* cpusets_tasks_read array */ - -struct ctr_struct { - char *buf; - int bufsz; -}; - -/* - * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'. - * Return actual number of pids loaded. No need to task_lock(p) - * when reading out p->cpuset, as we don't really care if it changes - * on the next cycle, and we are not going to try to dereference it. - */ -static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) -{ - int n = 0; - struct task_struct *g, *p; - - read_lock(&tasklist_lock); - - do_each_thread(g, p) { - if (p->cpuset == cs) { - if (unlikely(n == npids)) - goto array_full; - pidarray[n++] = p->pid; - } - } while_each_thread(g, p); - -array_full: - read_unlock(&tasklist_lock); - return n; -} - -static int cmppid(const void *a, const void *b) -{ - return *(pid_t *)a - *(pid_t *)b; -} - -/* - * Convert array 'a' of 'npids' pid_t's to a string of newline separated - * decimal pids in 'buf'. Don't write more than 'sz' chars, but return - * count 'cnt' of how many chars would be written if buf were large enough. - */ -static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) -{ - int cnt = 0; - int i; - - for (i = 0; i < npids; i++) - cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); - return cnt; -} - -/* - * Handle an open on 'tasks' file. Prepare a buffer listing the - * process id's of tasks currently attached to the cpuset being opened. - * - * Does not require any specific cpuset mutexes, and does not take any. - */ -static int cpuset_tasks_open(struct inode *unused, struct file *file) -{ - struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); - struct ctr_struct *ctr; - pid_t *pidarray; - int npids; - char c; - - if (!(file->f_mode & FMODE_READ)) - return 0; - - ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); - if (!ctr) - goto err0; - - /* - * If cpuset gets more users after we read count, we won't have - * enough space - tough. This race is indistinguishable to the - * caller from the case that the additional cpuset users didn't - * show up until sometime later on. - */ - npids = atomic_read(&cs->count); - pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); - if (!pidarray) - goto err1; - - npids = pid_array_load(pidarray, npids, cs); - sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); - - /* Call pid_array_to_buf() twice, first just to get bufsz */ - ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; - ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); - if (!ctr->buf) - goto err2; - ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); - - kfree(pidarray); - file->private_data = ctr; - return 0; - -err2: - kfree(pidarray); -err1: - kfree(ctr); -err0: - return -ENOMEM; -} - -static ssize_t cpuset_tasks_read(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) -{ - struct ctr_struct *ctr = file->private_data; - - return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz); -} - -static int cpuset_tasks_release(struct inode *unused_inode, struct file *file) -{ - struct ctr_struct *ctr; - - if (file->f_mode & FMODE_READ) { - ctr = file->private_data; - kfree(ctr->buf); - kfree(ctr); - } - return 0; -} /* * for the common functions, 'private' gives the type of file */ -static struct cftype cft_tasks = { - .name = "tasks", - .open = cpuset_tasks_open, - .read = cpuset_tasks_read, - .release = cpuset_tasks_release, - .private = FILE_TASKLIST, -}; - static struct cftype cft_cpus = { .name = "cpus", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_CPULIST, }; static struct cftype cft_mems = { .name = "mems", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_MEMLIST, }; static struct cftype cft_cpu_exclusive = { .name = "cpu_exclusive", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_CPU_EXCLUSIVE, }; static struct cftype cft_mem_exclusive = { .name = "mem_exclusive", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_MEM_EXCLUSIVE, }; -static struct cftype cft_notify_on_release = { - .name = "notify_on_release", - .private = FILE_NOTIFY_ON_RELEASE, +static struct cftype cft_sched_load_balance = { + .name = "sched_load_balance", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, + .private = FILE_SCHED_LOAD_BALANCE, }; static struct cftype cft_memory_migrate = { .name = "memory_migrate", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_MEMORY_MIGRATE, }; static struct cftype cft_memory_pressure_enabled = { .name = "memory_pressure_enabled", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_MEMORY_PRESSURE_ENABLED, }; static struct cftype cft_memory_pressure = { .name = "memory_pressure", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_MEMORY_PRESSURE, }; static struct cftype cft_spread_page = { .name = "memory_spread_page", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_SPREAD_PAGE, }; static struct cftype cft_spread_slab = { .name = "memory_spread_slab", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_SPREAD_SLAB, }; -static int cpuset_populate_dir(struct dentry *cs_dentry) +static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) { int err; - if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0) - return err; - if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_spread_page)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_spread_slab)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0) return err; - if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0) + if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0) return err; + /* memory_pressure_enabled is in root cpuset only */ + if (err == 0 && !cont->parent) + err = cgroup_add_file(cont, ss, + &cft_memory_pressure_enabled); return 0; } /* + * post_clone() is called at the end of cgroup_clone(). + * 'cgroup' was just created automatically as a result of + * a cgroup_clone(), and the current task is about to + * be moved into 'cgroup'. + * + * Currently we refuse to set up the cgroup - thereby + * refusing the task to be entered, and as a result refusing + * the sys_unshare() or clone() which initiated it - if any + * sibling cpusets have exclusive cpus or mem. + * + * If this becomes a problem for some users who wish to + * allow that scenario, then cpuset_post_clone() could be + * changed to grant parent->cpus_allowed-sibling_cpus_exclusive + * (and likewise for mems) to the new cgroup. + */ +static void cpuset_post_clone(struct cgroup_subsys *ss, + struct cgroup *cgroup) +{ + struct cgroup *parent, *child; + struct cpuset *cs, *parent_cs; + + parent = cgroup->parent; + list_for_each_entry(child, &parent->children, sibling) { + cs = cgroup_cs(child); + if (is_mem_exclusive(cs) || is_cpu_exclusive(cs)) + return; + } + cs = cgroup_cs(cgroup); + parent_cs = cgroup_cs(parent); + + cs->mems_allowed = parent_cs->mems_allowed; + cs->cpus_allowed = parent_cs->cpus_allowed; + return; +} + +/* * cpuset_create - create a cpuset * parent: cpuset that will be parent of the new cpuset. * name: name of the new cpuset. Will be strcpy'ed. @@ -1841,106 +1578,77 @@ static int cpuset_populate_dir(struct dentry *cs_dentry) * Must be called with the mutex on the parent inode held */ -static long cpuset_create(struct cpuset *parent, const char *name, int mode) +static struct cgroup_subsys_state *cpuset_create( + struct cgroup_subsys *ss, + struct cgroup *cont) { struct cpuset *cs; - int err; + struct cpuset *parent; + if (!cont->parent) { + /* This is early initialization for the top cgroup */ + top_cpuset.mems_generation = cpuset_mems_generation++; + return &top_cpuset.css; + } + parent = cgroup_cs(cont->parent); cs = kmalloc(sizeof(*cs), GFP_KERNEL); if (!cs) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - mutex_lock(&manage_mutex); cpuset_update_task_memory_state(); cs->flags = 0; - if (notify_on_release(parent)) - set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); if (is_spread_page(parent)) set_bit(CS_SPREAD_PAGE, &cs->flags); if (is_spread_slab(parent)) set_bit(CS_SPREAD_SLAB, &cs->flags); + set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); cs->cpus_allowed = CPU_MASK_NONE; cs->mems_allowed = NODE_MASK_NONE; - atomic_set(&cs->count, 0); - INIT_LIST_HEAD(&cs->sibling); - INIT_LIST_HEAD(&cs->children); cs->mems_generation = cpuset_mems_generation++; fmeter_init(&cs->fmeter); cs->parent = parent; - - mutex_lock(&callback_mutex); - list_add(&cs->sibling, &cs->parent->children); number_of_cpusets++; - mutex_unlock(&callback_mutex); - - err = cpuset_create_dir(cs, name, mode); - if (err < 0) - goto err; - - /* - * Release manage_mutex before cpuset_populate_dir() because it - * will down() this new directory's i_mutex and if we race with - * another mkdir, we might deadlock. - */ - mutex_unlock(&manage_mutex); - - err = cpuset_populate_dir(cs->dentry); - /* If err < 0, we have a half-filled directory - oh well ;) */ - return 0; -err: - list_del(&cs->sibling); - mutex_unlock(&manage_mutex); - kfree(cs); - return err; + return &cs->css ; } -static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode) -{ - struct cpuset *c_parent = dentry->d_parent->d_fsdata; - - /* the vfs holds inode->i_mutex already */ - return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); -} +/* + * Locking note on the strange update_flag() call below: + * + * If the cpuset being removed has its flag 'sched_load_balance' + * enabled, then simulate turning sched_load_balance off, which + * will call rebuild_sched_domains(). The lock_cpu_hotplug() + * call in rebuild_sched_domains() must not be made while holding + * callback_mutex. Elsewhere the kernel nests callback_mutex inside + * lock_cpu_hotplug() calls. So the reverse nesting would risk an + * ABBA deadlock. + */ -static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) +static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) { - struct cpuset *cs = dentry->d_fsdata; - struct dentry *d; - struct cpuset *parent; - char *pathbuf = NULL; + struct cpuset *cs = cgroup_cs(cont); - /* the vfs holds both inode->i_mutex already */ - - mutex_lock(&manage_mutex); cpuset_update_task_memory_state(); - if (atomic_read(&cs->count) > 0) { - mutex_unlock(&manage_mutex); - return -EBUSY; - } - if (!list_empty(&cs->children)) { - mutex_unlock(&manage_mutex); - return -EBUSY; - } - parent = cs->parent; - mutex_lock(&callback_mutex); - set_bit(CS_REMOVED, &cs->flags); - list_del(&cs->sibling); /* delete my sibling from parent->children */ - spin_lock(&cs->dentry->d_lock); - d = dget(cs->dentry); - cs->dentry = NULL; - spin_unlock(&d->d_lock); - cpuset_d_remove_dir(d); - dput(d); + + if (is_sched_load_balance(cs)) + update_flag(CS_SCHED_LOAD_BALANCE, cs, "0"); + number_of_cpusets--; - mutex_unlock(&callback_mutex); - if (list_empty(&parent->children)) - check_for_release(parent, &pathbuf); - mutex_unlock(&manage_mutex); - cpuset_release_agent(pathbuf); - return 0; + kfree(cs); } +struct cgroup_subsys cpuset_subsys = { + .name = "cpuset", + .create = cpuset_create, + .destroy = cpuset_destroy, + .can_attach = cpuset_can_attach, + .attach = cpuset_attach, + .populate = cpuset_populate, + .post_clone = cpuset_post_clone, + .subsys_id = cpuset_subsys_id, + .early_init = 1, +}; + /* * cpuset_init_early - just enough so that the calls to * cpuset_update_task_memory_state() in early init code @@ -1949,13 +1657,11 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) int __init cpuset_init_early(void) { - struct task_struct *tsk = current; - - tsk->cpuset = &top_cpuset; - tsk->cpuset->mems_generation = cpuset_mems_generation++; + top_cpuset.mems_generation = cpuset_mems_generation++; return 0; } + /** * cpuset_init - initialize cpusets at system boot * @@ -1964,39 +1670,21 @@ int __init cpuset_init_early(void) int __init cpuset_init(void) { - struct dentry *root; - int err; + int err = 0; top_cpuset.cpus_allowed = CPU_MASK_ALL; top_cpuset.mems_allowed = NODE_MASK_ALL; fmeter_init(&top_cpuset.fmeter); top_cpuset.mems_generation = cpuset_mems_generation++; - - init_task.cpuset = &top_cpuset; + set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); err = register_filesystem(&cpuset_fs_type); if (err < 0) - goto out; - cpuset_mount = kern_mount(&cpuset_fs_type); - if (IS_ERR(cpuset_mount)) { - printk(KERN_ERR "cpuset: could not mount!\n"); - err = PTR_ERR(cpuset_mount); - cpuset_mount = NULL; - goto out; - } - root = cpuset_mount->mnt_sb->s_root; - root->d_fsdata = &top_cpuset; - inc_nlink(root->d_inode); - top_cpuset.dentry = root; - root->d_inode->i_op = &cpuset_dir_inode_operations; + return err; + number_of_cpusets = 1; - err = cpuset_populate_dir(root); - /* memory_pressure_enabled is in root cpuset only */ - if (err == 0) - err = cpuset_add_file(root, &cft_memory_pressure_enabled); -out: - return err; + return 0; } /* @@ -2022,10 +1710,12 @@ out: static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) { + struct cgroup *cont; struct cpuset *c; /* Each of our child cpusets mems must be online */ - list_for_each_entry(c, &cur->children, sibling) { + list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { + c = cgroup_cs(cont); guarantee_online_cpus_mems_in_subtree(c); if (!cpus_empty(c->cpus_allowed)) guarantee_online_cpus(c, &c->cpus_allowed); @@ -2053,7 +1743,7 @@ static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) static void common_cpu_mem_hotplug_unplug(void) { - mutex_lock(&manage_mutex); + cgroup_lock(); mutex_lock(&callback_mutex); guarantee_online_cpus_mems_in_subtree(&top_cpuset); @@ -2061,7 +1751,7 @@ static void common_cpu_mem_hotplug_unplug(void) top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; mutex_unlock(&callback_mutex); - mutex_unlock(&manage_mutex); + cgroup_unlock(); } /* @@ -2074,8 +1764,8 @@ static void common_cpu_mem_hotplug_unplug(void) * cpu_online_map on each CPU hotplug (cpuhp) event. */ -static int cpuset_handle_cpuhp(struct notifier_block *nb, - unsigned long phase, void *cpu) +static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, + unsigned long phase, void *unused_cpu) { if (phase == CPU_DYING || phase == CPU_DYING_FROZEN) return NOTIFY_DONE; @@ -2113,109 +1803,7 @@ void __init cpuset_init_smp(void) } /** - * cpuset_fork - attach newly forked task to its parents cpuset. - * @tsk: pointer to task_struct of forking parent process. - * - * Description: A task inherits its parent's cpuset at fork(). - * - * A pointer to the shared cpuset was automatically copied in fork.c - * by dup_task_struct(). However, we ignore that copy, since it was - * not made under the protection of task_lock(), so might no longer be - * a valid cpuset pointer. attach_task() might have already changed - * current->cpuset, allowing the previously referenced cpuset to - * be removed and freed. Instead, we task_lock(current) and copy - * its present value of current->cpuset for our freshly forked child. - * - * At the point that cpuset_fork() is called, 'current' is the parent - * task, and the passed argument 'child' points to the child task. - **/ - -void cpuset_fork(struct task_struct *child) -{ - task_lock(current); - child->cpuset = current->cpuset; - atomic_inc(&child->cpuset->count); - task_unlock(current); -} - -/** - * cpuset_exit - detach cpuset from exiting task - * @tsk: pointer to task_struct of exiting process - * - * Description: Detach cpuset from @tsk and release it. - * - * Note that cpusets marked notify_on_release force every task in - * them to take the global manage_mutex mutex when exiting. - * This could impact scaling on very large systems. Be reluctant to - * use notify_on_release cpusets where very high task exit scaling - * is required on large systems. - * - * Don't even think about derefencing 'cs' after the cpuset use count - * goes to zero, except inside a critical section guarded by manage_mutex - * or callback_mutex. Otherwise a zero cpuset use count is a license to - * any other task to nuke the cpuset immediately, via cpuset_rmdir(). - * - * This routine has to take manage_mutex, not callback_mutex, because - * it is holding that mutex while calling check_for_release(), - * which calls kmalloc(), so can't be called holding callback_mutex(). - * - * the_top_cpuset_hack: - * - * Set the exiting tasks cpuset to the root cpuset (top_cpuset). - * - * Don't leave a task unable to allocate memory, as that is an - * accident waiting to happen should someone add a callout in - * do_exit() after the cpuset_exit() call that might allocate. - * If a task tries to allocate memory with an invalid cpuset, - * it will oops in cpuset_update_task_memory_state(). - * - * We call cpuset_exit() while the task is still competent to - * handle notify_on_release(), then leave the task attached to - * the root cpuset (top_cpuset) for the remainder of its exit. - * - * To do this properly, we would increment the reference count on - * top_cpuset, and near the very end of the kernel/exit.c do_exit() - * code we would add a second cpuset function call, to drop that - * reference. This would just create an unnecessary hot spot on - * the top_cpuset reference count, to no avail. - * - * Normally, holding a reference to a cpuset without bumping its - * count is unsafe. The cpuset could go away, or someone could - * attach us to a different cpuset, decrementing the count on - * the first cpuset that we never incremented. But in this case, - * top_cpuset isn't going away, and either task has PF_EXITING set, - * which wards off any attach_task() attempts, or task is a failed - * fork, never visible to attach_task. - * - * Another way to do this would be to set the cpuset pointer - * to NULL here, and check in cpuset_update_task_memory_state() - * for a NULL pointer. This hack avoids that NULL check, for no - * cost (other than this way too long comment ;). - **/ - -void cpuset_exit(struct task_struct *tsk) -{ - struct cpuset *cs; - - task_lock(current); - cs = tsk->cpuset; - tsk->cpuset = &top_cpuset; /* the_top_cpuset_hack - see above */ - task_unlock(current); - - if (notify_on_release(cs)) { - char *pathbuf = NULL; - mutex_lock(&manage_mutex); - if (atomic_dec_and_test(&cs->count)) - check_for_release(cs, &pathbuf); - mutex_unlock(&manage_mutex); - cpuset_release_agent(pathbuf); - } else { - atomic_dec(&cs->count); - } -} - -/** * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. * @@ -2230,10 +1818,23 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) cpumask_t mask; mutex_lock(&callback_mutex); + mask = cpuset_cpus_allowed_locked(tsk); + mutex_unlock(&callback_mutex); + + return mask; +} + +/** + * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. + * Must be called with callback_mutex held. + **/ +cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk) +{ + cpumask_t mask; + task_lock(tsk); - guarantee_online_cpus(tsk->cpuset, &mask); + guarantee_online_cpus(task_cs(tsk), &mask); task_unlock(tsk); - mutex_unlock(&callback_mutex); return mask; } @@ -2259,7 +1860,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) mutex_lock(&callback_mutex); task_lock(tsk); - guarantee_online_mems(tsk->cpuset, &mask); + guarantee_online_mems(task_cs(tsk), &mask); task_unlock(tsk); mutex_unlock(&callback_mutex); @@ -2390,7 +1991,7 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) mutex_lock(&callback_mutex); task_lock(current); - cs = nearest_exclusive_ancestor(current->cpuset); + cs = nearest_exclusive_ancestor(task_cs(current)); task_unlock(current); allowed = node_isset(node, cs->mems_allowed); @@ -2431,12 +2032,12 @@ int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) node = zone_to_nid(z); if (node_isset(node, current->mems_allowed)) return 1; - /* - * Allow tasks that have access to memory reserves because they have - * been OOM killed to get memory anywhere. - */ - if (unlikely(test_thread_flag(TIF_MEMDIE))) - return 1; + /* + * Allow tasks that have access to memory reserves because they have + * been OOM killed to get memory anywhere. + */ + if (unlikely(test_thread_flag(TIF_MEMDIE))) + return 1; return 0; } @@ -2550,14 +2151,12 @@ int cpuset_memory_pressure_enabled __read_mostly; void __cpuset_memory_pressure_bump(void) { - struct cpuset *cs; - task_lock(current); - cs = current->cpuset; - fmeter_markevent(&cs->fmeter); + fmeter_markevent(&task_cs(current)->fmeter); task_unlock(current); } +#ifdef CONFIG_PROC_PID_CPUSET /* * proc_cpuset_show() * - Print tasks cpuset path into seq_file. @@ -2569,11 +2168,12 @@ void __cpuset_memory_pressure_bump(void) * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks * cpuset to top_cpuset. */ -static int proc_cpuset_show(struct seq_file *m, void *v) +static int proc_cpuset_show(struct seq_file *m, void *unused_v) { struct pid *pid; struct task_struct *tsk; char *buf; + struct cgroup_subsys_state *css; int retval; retval = -ENOMEM; @@ -2588,15 +2188,15 @@ static int proc_cpuset_show(struct seq_file *m, void *v) goto out_free; retval = -EINVAL; - mutex_lock(&manage_mutex); - - retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE); + cgroup_lock(); + css = task_subsys_state(tsk, cpuset_subsys_id); + retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); if (retval < 0) goto out_unlock; seq_puts(m, buf); seq_putc(m, '\n'); out_unlock: - mutex_unlock(&manage_mutex); + cgroup_unlock(); put_task_struct(tsk); out_free: kfree(buf); @@ -2616,6 +2216,7 @@ const struct file_operations proc_cpuset_operations = { .llseek = seq_lseek, .release = single_release, }; +#endif /* CONFIG_PROC_PID_CPUSET */ /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ char *cpuset_task_status_allowed(struct task_struct *task, char *buffer) diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 09e9574eeb2..10e43fd8b72 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -115,6 +115,12 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) tmp += timespec_to_ns(&ts); d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; + tmp = (s64)d->cpu_scaled_run_real_total; + cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts); + tmp += timespec_to_ns(&ts); + d->cpu_scaled_run_real_total = + (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; + /* * No locking available for sched_info (and too expensive to add one) * Mitigate by taking snapshot of values diff --git a/kernel/die_notifier.c b/kernel/die_notifier.c deleted file mode 100644 index 0d98827887a..00000000000 --- a/kernel/die_notifier.c +++ /dev/null @@ -1,38 +0,0 @@ - -#include <linux/module.h> -#include <linux/notifier.h> -#include <linux/vmalloc.h> -#include <linux/kdebug.h> - - -static ATOMIC_NOTIFIER_HEAD(die_chain); - -int notify_die(enum die_val val, const char *str, - struct pt_regs *regs, long err, int trap, int sig) -{ - struct die_args args = { - .regs = regs, - .str = str, - .err = err, - .trapnr = trap, - .signr = sig, - - }; - - return atomic_notifier_call_chain(&die_chain, val, &args); -} - -int register_die_notifier(struct notifier_block *nb) -{ - vmalloc_sync_all(); - return atomic_notifier_chain_register(&die_chain, nb); -} -EXPORT_SYMBOL_GPL(register_die_notifier); - -int unregister_die_notifier(struct notifier_block *nb) -{ - return atomic_notifier_chain_unregister(&die_chain, nb); -} -EXPORT_SYMBOL_GPL(unregister_die_notifier); - - diff --git a/kernel/dma.c b/kernel/dma.c index 937b13ca33b..6a82bb716da 100644 --- a/kernel/dma.c +++ b/kernel/dma.c @@ -20,7 +20,7 @@ #include <asm/dma.h> #include <asm/system.h> - + /* A note on resource allocation: * @@ -95,7 +95,7 @@ void free_dma(unsigned int dmanr) if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) { printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr); return; - } + } } /* free_dma */ @@ -121,8 +121,8 @@ static int proc_dma_show(struct seq_file *m, void *v) for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) { if (dma_chan_busy[i].lock) { - seq_printf(m, "%2d: %s\n", i, - dma_chan_busy[i].device_id); + seq_printf(m, "%2d: %s\n", i, + dma_chan_busy[i].device_id); } } return 0; diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c index 3c2eaea66b1..a9e6bad9f70 100644 --- a/kernel/exec_domain.c +++ b/kernel/exec_domain.c @@ -57,7 +57,7 @@ lookup_exec_domain(u_long personality) { struct exec_domain * ep; u_long pers = personality(personality); - + read_lock(&exec_domains_lock); for (ep = exec_domains; ep; ep = ep->next) { if (pers >= ep->pers_low && pers <= ep->pers_high) diff --git a/kernel/exit.c b/kernel/exit.c index 2c704c86edb..f1aec27f1df 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -31,7 +31,7 @@ #include <linux/taskstats_kern.h> #include <linux/delayacct.h> #include <linux/freezer.h> -#include <linux/cpuset.h> +#include <linux/cgroup.h> #include <linux/syscalls.h> #include <linux/signal.h> #include <linux/posix-timers.h> @@ -148,6 +148,7 @@ void release_task(struct task_struct * p) int zap_leader; repeat: atomic_dec(&p->user->processes); + proc_flush_task(p); write_lock_irq(&tasklist_lock); ptrace_unlink(p); BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); @@ -175,7 +176,6 @@ repeat: } write_unlock_irq(&tasklist_lock); - proc_flush_task(p); release_thread(p); call_rcu(&p->rcu, delayed_put_task_struct); @@ -221,7 +221,7 @@ static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignor do_each_pid_task(pgrp, PIDTYPE_PGID, p) { if (p == ignored_task || p->exit_state - || is_init(p->real_parent)) + || is_global_init(p->real_parent)) continue; if (task_pgrp(p->real_parent) != pgrp && task_session(p->real_parent) == task_session(p)) { @@ -299,14 +299,14 @@ void __set_special_pids(pid_t session, pid_t pgrp) { struct task_struct *curr = current->group_leader; - if (process_session(curr) != session) { + if (task_session_nr(curr) != session) { detach_pid(curr, PIDTYPE_SID); - set_signal_session(curr->signal, session); + set_task_session(curr, session); attach_pid(curr, PIDTYPE_SID, find_pid(session)); } - if (process_group(curr) != pgrp) { + if (task_pgrp_nr(curr) != pgrp) { detach_pid(curr, PIDTYPE_PGID); - curr->signal->pgrp = pgrp; + set_task_pgrp(curr, pgrp); attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp)); } } @@ -400,11 +400,12 @@ void daemonize(const char *name, ...) current->fs = fs; atomic_inc(&fs->count); - exit_task_namespaces(current); - current->nsproxy = init_task.nsproxy; - get_task_namespaces(current); + if (current->nsproxy != init_task.nsproxy) { + get_nsproxy(init_task.nsproxy); + switch_task_namespaces(current, init_task.nsproxy); + } - exit_files(current); + exit_files(current); current->files = init_task.files; atomic_inc(¤t->files->count); @@ -492,7 +493,7 @@ void reset_files_struct(struct task_struct *tsk, struct files_struct *files) } EXPORT_SYMBOL(reset_files_struct); -static inline void __exit_files(struct task_struct *tsk) +static void __exit_files(struct task_struct *tsk) { struct files_struct * files = tsk->files; @@ -509,7 +510,7 @@ void exit_files(struct task_struct *tsk) __exit_files(tsk); } -static inline void __put_fs_struct(struct fs_struct *fs) +static void __put_fs_struct(struct fs_struct *fs) { /* No need to hold fs->lock if we are killing it */ if (atomic_dec_and_test(&fs->count)) { @@ -530,7 +531,7 @@ void put_fs_struct(struct fs_struct *fs) __put_fs_struct(fs); } -static inline void __exit_fs(struct task_struct *tsk) +static void __exit_fs(struct task_struct *tsk) { struct fs_struct * fs = tsk->fs; @@ -665,19 +666,22 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced) * the child reaper process (ie "init") in our pid * space. */ -static void -forget_original_parent(struct task_struct *father, struct list_head *to_release) +static void forget_original_parent(struct task_struct *father) { - struct task_struct *p, *reaper = father; - struct list_head *_p, *_n; + struct task_struct *p, *n, *reaper = father; + struct list_head ptrace_dead; + + INIT_LIST_HEAD(&ptrace_dead); + + write_lock_irq(&tasklist_lock); do { reaper = next_thread(reaper); if (reaper == father) { - reaper = child_reaper(father); + reaper = task_child_reaper(father); break; } - } while (reaper->exit_state); + } while (reaper->flags & PF_EXITING); /* * There are only two places where our children can be: @@ -687,9 +691,8 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release) * * Search them and reparent children. */ - list_for_each_safe(_p, _n, &father->children) { + list_for_each_entry_safe(p, n, &father->children, sibling) { int ptrace; - p = list_entry(_p, struct task_struct, sibling); ptrace = p->ptrace; @@ -715,13 +718,23 @@ forget_original_parent(struct task_struct *father, struct list_head *to_release) * while it was being traced by us, to be able to see it in wait4. */ if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1)) - list_add(&p->ptrace_list, to_release); + list_add(&p->ptrace_list, &ptrace_dead); } - list_for_each_safe(_p, _n, &father->ptrace_children) { - p = list_entry(_p, struct task_struct, ptrace_list); + + list_for_each_entry_safe(p, n, &father->ptrace_children, ptrace_list) { p->real_parent = reaper; reparent_thread(p, father, 1); } + + write_unlock_irq(&tasklist_lock); + BUG_ON(!list_empty(&father->children)); + BUG_ON(!list_empty(&father->ptrace_children)); + + list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_list) { + list_del_init(&p->ptrace_list); + release_task(p); + } + } /* @@ -732,7 +745,6 @@ static void exit_notify(struct task_struct *tsk) { int state; struct task_struct *t; - struct list_head ptrace_dead, *_p, *_n; struct pid *pgrp; if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT) @@ -753,8 +765,6 @@ static void exit_notify(struct task_struct *tsk) spin_unlock_irq(&tsk->sighand->siglock); } - write_lock_irq(&tasklist_lock); - /* * This does two things: * @@ -763,12 +773,10 @@ static void exit_notify(struct task_struct *tsk) * as a result of our exiting, and if they have any stopped * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) */ + forget_original_parent(tsk); + exit_task_namespaces(tsk); - INIT_LIST_HEAD(&ptrace_dead); - forget_original_parent(tsk, &ptrace_dead); - BUG_ON(!list_empty(&tsk->children)); - BUG_ON(!list_empty(&tsk->ptrace_children)); - + write_lock_irq(&tasklist_lock); /* * Check to see if any process groups have become orphaned * as a result of our exiting, and if they have any stopped @@ -792,7 +800,7 @@ static void exit_notify(struct task_struct *tsk) /* Let father know we died * * Thread signals are configurable, but you aren't going to use - * that to send signals to arbitary processes. + * that to send signals to arbitary processes. * That stops right now. * * If the parent exec id doesn't match the exec id we saved @@ -833,12 +841,6 @@ static void exit_notify(struct task_struct *tsk) write_unlock_irq(&tasklist_lock); - list_for_each_safe(_p, _n, &ptrace_dead) { - list_del_init(_p); - t = list_entry(_p, struct task_struct, ptrace_list); - release_task(t); - } - /* If the process is dead, release it - nobody will wait for it */ if (state == EXIT_DEAD) release_task(tsk); @@ -874,10 +876,35 @@ static inline void check_stack_usage(void) {} static inline void exit_child_reaper(struct task_struct *tsk) { - if (likely(tsk->group_leader != child_reaper(tsk))) + if (likely(tsk->group_leader != task_child_reaper(tsk))) return; - panic("Attempted to kill init!"); + if (tsk->nsproxy->pid_ns == &init_pid_ns) + panic("Attempted to kill init!"); + + /* + * @tsk is the last thread in the 'cgroup-init' and is exiting. + * Terminate all remaining processes in the namespace and reap them + * before exiting @tsk. + * + * Note that @tsk (last thread of cgroup-init) may not necessarily + * be the child-reaper (i.e main thread of cgroup-init) of the + * namespace i.e the child_reaper may have already exited. + * + * Even after a child_reaper exits, we let it inherit orphaned children, + * because, pid_ns->child_reaper remains valid as long as there is + * at least one living sub-thread in the cgroup init. + + * This living sub-thread of the cgroup-init will be notified when + * a child inherited by the 'child-reaper' exits (do_notify_parent() + * uses __group_send_sig_info()). Further, when reaping child processes, + * do_wait() iterates over children of all living sub threads. + + * i.e even though 'child_reaper' thread is listed as the parent of the + * orphaned children, any living sub-thread in the cgroup-init can + * perform the role of the child_reaper. + */ + zap_pid_ns_processes(tsk->nsproxy->pid_ns); } fastcall NORET_TYPE void do_exit(long code) @@ -932,7 +959,7 @@ fastcall NORET_TYPE void do_exit(long code) if (unlikely(in_atomic())) printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", - current->comm, current->pid, + current->comm, task_pid_nr(current), preempt_count()); acct_update_integrals(tsk); @@ -972,7 +999,7 @@ fastcall NORET_TYPE void do_exit(long code) __exit_fs(tsk); check_stack_usage(); exit_thread(); - cpuset_exit(tsk); + cgroup_exit(tsk, 1); exit_keys(tsk); if (group_dead && tsk->signal->leader) @@ -983,7 +1010,6 @@ fastcall NORET_TYPE void do_exit(long code) module_put(tsk->binfmt->module); proc_exit_connector(tsk); - exit_task_namespaces(tsk); exit_notify(tsk); #ifdef CONFIG_NUMA mpol_free(tsk->mempolicy); @@ -1086,15 +1112,17 @@ asmlinkage void sys_exit_group(int error_code) static int eligible_child(pid_t pid, int options, struct task_struct *p) { int err; + struct pid_namespace *ns; + ns = current->nsproxy->pid_ns; if (pid > 0) { - if (p->pid != pid) + if (task_pid_nr_ns(p, ns) != pid) return 0; } else if (!pid) { - if (process_group(p) != process_group(current)) + if (task_pgrp_nr_ns(p, ns) != task_pgrp_vnr(current)) return 0; } else if (pid != -1) { - if (process_group(p) != -pid) + if (task_pgrp_nr_ns(p, ns) != -pid) return 0; } @@ -1164,9 +1192,12 @@ static int wait_task_zombie(struct task_struct *p, int noreap, { unsigned long state; int retval, status, traced; + struct pid_namespace *ns; + + ns = current->nsproxy->pid_ns; if (unlikely(noreap)) { - pid_t pid = p->pid; + pid_t pid = task_pid_nr_ns(p, ns); uid_t uid = p->uid; int exit_code = p->exit_code; int why, status; @@ -1285,11 +1316,11 @@ static int wait_task_zombie(struct task_struct *p, int noreap, retval = put_user(status, &infop->si_status); } if (!retval && infop) - retval = put_user(p->pid, &infop->si_pid); + retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid); if (!retval && infop) retval = put_user(p->uid, &infop->si_uid); if (!retval) - retval = p->pid; + retval = task_pid_nr_ns(p, ns); if (traced) { write_lock_irq(&tasklist_lock); @@ -1326,6 +1357,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, int __user *stat_addr, struct rusage __user *ru) { int retval, exit_code; + struct pid_namespace *ns; if (!p->exit_code) return 0; @@ -1344,11 +1376,12 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, * keep holding onto the tasklist_lock while we call getrusage and * possibly take page faults for user memory. */ + ns = current->nsproxy->pid_ns; get_task_struct(p); read_unlock(&tasklist_lock); if (unlikely(noreap)) { - pid_t pid = p->pid; + pid_t pid = task_pid_nr_ns(p, ns); uid_t uid = p->uid; int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; @@ -1419,11 +1452,11 @@ bail_ref: if (!retval && infop) retval = put_user(exit_code, &infop->si_status); if (!retval && infop) - retval = put_user(p->pid, &infop->si_pid); + retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid); if (!retval && infop) retval = put_user(p->uid, &infop->si_uid); if (!retval) - retval = p->pid; + retval = task_pid_nr_ns(p, ns); put_task_struct(p); BUG_ON(!retval); @@ -1443,6 +1476,7 @@ static int wait_task_continued(struct task_struct *p, int noreap, int retval; pid_t pid; uid_t uid; + struct pid_namespace *ns; if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) return 0; @@ -1457,7 +1491,8 @@ static int wait_task_continued(struct task_struct *p, int noreap, p->signal->flags &= ~SIGNAL_STOP_CONTINUED; spin_unlock_irq(&p->sighand->siglock); - pid = p->pid; + ns = current->nsproxy->pid_ns; + pid = task_pid_nr_ns(p, ns); uid = p->uid; get_task_struct(p); read_unlock(&tasklist_lock); @@ -1468,7 +1503,7 @@ static int wait_task_continued(struct task_struct *p, int noreap, if (!retval && stat_addr) retval = put_user(0xffff, stat_addr); if (!retval) - retval = p->pid; + retval = task_pid_nr_ns(p, ns); } else { retval = wait_noreap_copyout(p, pid, uid, CLD_CONTINUED, SIGCONT, @@ -1517,12 +1552,9 @@ repeat: tsk = current; do { struct task_struct *p; - struct list_head *_p; int ret; - list_for_each(_p,&tsk->children) { - p = list_entry(_p, struct task_struct, sibling); - + list_for_each_entry(p, &tsk->children, sibling) { ret = eligible_child(pid, options, p); if (!ret) continue; @@ -1604,9 +1636,8 @@ check_continued: } } if (!flag) { - list_for_each(_p, &tsk->ptrace_children) { - p = list_entry(_p, struct task_struct, - ptrace_list); + list_for_each_entry(p, &tsk->ptrace_children, + ptrace_list) { if (!eligible_child(pid, options, p)) continue; flag = 1; diff --git a/kernel/fork.c b/kernel/fork.c index 490495a39c7..ddafdfac945 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -29,7 +29,7 @@ #include <linux/nsproxy.h> #include <linux/capability.h> #include <linux/cpu.h> -#include <linux/cpuset.h> +#include <linux/cgroup.h> #include <linux/security.h> #include <linux/swap.h> #include <linux/syscalls.h> @@ -50,6 +50,7 @@ #include <linux/taskstats_kern.h> #include <linux/random.h> #include <linux/tty.h> +#include <linux/proc_fs.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -116,7 +117,7 @@ EXPORT_SYMBOL(free_task); void __put_task_struct(struct task_struct *tsk) { - WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE))); + WARN_ON(!tsk->exit_state); WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); @@ -205,7 +206,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) } #ifdef CONFIG_MMU -static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) +static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) { struct vm_area_struct *mpnt, *tmp, **pprev; struct rb_node **rb_link, *rb_parent; @@ -268,7 +269,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) get_file(file); if (tmp->vm_flags & VM_DENYWRITE) atomic_dec(&inode->i_writecount); - + /* insert tmp into the share list, just after mpnt */ spin_lock(&file->f_mapping->i_mmap_lock); tmp->vm_truncate_count = mpnt->vm_truncate_count; @@ -331,7 +332,7 @@ static inline void mm_free_pgd(struct mm_struct * mm) #define mm_free_pgd(mm) #endif /* CONFIG_MMU */ - __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); +__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) @@ -583,7 +584,7 @@ fail_nomem: return retval; } -static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old) +static struct fs_struct *__copy_fs_struct(struct fs_struct *old) { struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); /* We don't need to lock fs - think why ;-) */ @@ -615,7 +616,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) EXPORT_SYMBOL_GPL(copy_fs_struct); -static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk) +static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) { if (clone_flags & CLONE_FS) { atomic_inc(¤t->fs->count); @@ -738,8 +739,8 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) /* compute the remainder to be cleared */ size = (new_fdt->max_fds - open_files) * sizeof(struct file *); - /* This is long word aligned thus could use a optimized version */ - memset(new_fds, 0, size); + /* This is long word aligned thus could use a optimized version */ + memset(new_fds, 0, size); if (new_fdt->max_fds > open_files) { int left = (new_fdt->max_fds-open_files)/8; @@ -818,7 +819,7 @@ int unshare_files(void) EXPORT_SYMBOL(unshare_files); -static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) +static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) { struct sighand_struct *sig; @@ -841,7 +842,7 @@ void __cleanup_sighand(struct sighand_struct *sighand) kmem_cache_free(sighand_cachep, sighand); } -static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) +static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) { struct signal_struct *sig; int ret; @@ -923,7 +924,7 @@ void __cleanup_signal(struct signal_struct *sig) kmem_cache_free(signal_cachep, sig); } -static inline void cleanup_signal(struct task_struct *tsk) +static void cleanup_signal(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; @@ -933,7 +934,7 @@ static inline void cleanup_signal(struct task_struct *tsk) __cleanup_signal(sig); } -static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) +static void copy_flags(unsigned long clone_flags, struct task_struct *p) { unsigned long new_flags = p->flags; @@ -942,16 +943,17 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) if (!(clone_flags & CLONE_PTRACE)) p->ptrace = 0; p->flags = new_flags; + clear_freeze_flag(p); } asmlinkage long sys_set_tid_address(int __user *tidptr) { current->clear_child_tid = tidptr; - return current->pid; + return task_pid_vnr(current); } -static inline void rt_mutex_init_task(struct task_struct *p) +static void rt_mutex_init_task(struct task_struct *p) { spin_lock_init(&p->pi_lock); #ifdef CONFIG_RT_MUTEXES @@ -972,12 +974,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, - int __user *parent_tidptr, int __user *child_tidptr, struct pid *pid) { int retval; - struct task_struct *p = NULL; + struct task_struct *p; + int cgroup_callbacks_done = 0; if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) return ERR_PTR(-EINVAL); @@ -1041,12 +1043,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->did_exec = 0; delayacct_tsk_init(p); /* Must remain after dup_task_struct() */ copy_flags(clone_flags, p); - p->pid = pid_nr(pid); - retval = -EFAULT; - if (clone_flags & CLONE_PARENT_SETTID) - if (put_user(p->pid, parent_tidptr)) - goto bad_fork_cleanup_delays_binfmt; - INIT_LIST_HEAD(&p->children); INIT_LIST_HEAD(&p->sibling); p->vfork_done = NULL; @@ -1058,6 +1054,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->utime = cputime_zero; p->stime = cputime_zero; p->gtime = cputime_zero; + p->utimescaled = cputime_zero; + p->stimescaled = cputime_zero; #ifdef CONFIG_TASK_XACCT p->rchar = 0; /* I/O counter: bytes read */ @@ -1068,12 +1066,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, task_io_accounting_init(p); acct_clear_integrals(p); - p->it_virt_expires = cputime_zero; + p->it_virt_expires = cputime_zero; p->it_prof_expires = cputime_zero; - p->it_sched_expires = 0; - INIT_LIST_HEAD(&p->cpu_timers[0]); - INIT_LIST_HEAD(&p->cpu_timers[1]); - INIT_LIST_HEAD(&p->cpu_timers[2]); + p->it_sched_expires = 0; + INIT_LIST_HEAD(&p->cpu_timers[0]); + INIT_LIST_HEAD(&p->cpu_timers[1]); + INIT_LIST_HEAD(&p->cpu_timers[2]); p->lock_depth = -1; /* -1 = no lock */ do_posix_clock_monotonic_gettime(&p->start_time); @@ -1083,15 +1081,14 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->security = NULL; #endif p->io_context = NULL; - p->io_wait = NULL; p->audit_context = NULL; - cpuset_fork(p); + cgroup_fork(p); #ifdef CONFIG_NUMA p->mempolicy = mpol_copy(p->mempolicy); if (IS_ERR(p->mempolicy)) { retval = PTR_ERR(p->mempolicy); p->mempolicy = NULL; - goto bad_fork_cleanup_cpuset; + goto bad_fork_cleanup_cgroup; } mpol_fix_fork_child_flag(p); #endif @@ -1124,10 +1121,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, p->blocked_on = NULL; /* not blocked yet */ #endif - p->tgid = p->pid; - if (clone_flags & CLONE_THREAD) - p->tgid = current->tgid; - if ((retval = security_task_alloc(p))) goto bad_fork_cleanup_policy; if ((retval = audit_alloc(p))) @@ -1153,6 +1146,24 @@ static struct task_struct *copy_process(unsigned long clone_flags, if (retval) goto bad_fork_cleanup_namespaces; + if (pid != &init_struct_pid) { + retval = -ENOMEM; + pid = alloc_pid(task_active_pid_ns(p)); + if (!pid) + goto bad_fork_cleanup_namespaces; + + if (clone_flags & CLONE_NEWPID) { + retval = pid_ns_prepare_proc(task_active_pid_ns(p)); + if (retval < 0) + goto bad_fork_free_pid; + } + } + + p->pid = pid_nr(pid); + p->tgid = p->pid; + if (clone_flags & CLONE_THREAD) + p->tgid = current->tgid; + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; /* * Clear TID on mm_release()? @@ -1202,6 +1213,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, /* Perform scheduler related setup. Assign this task to a CPU. */ sched_fork(p, clone_flags); + /* Now that the task is set up, run cgroup callbacks if + * necessary. We need to run them before the task is visible + * on the tasklist. */ + cgroup_fork_callbacks(p); + cgroup_callbacks_done = 1; + /* Need tasklist lock for parent etc handling! */ write_lock_irq(&tasklist_lock); @@ -1239,12 +1256,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, * A fatal signal pending means that current will exit, so the new * thread can't slip out of an OOM kill (or normal SIGKILL). */ - recalc_sigpending(); + recalc_sigpending(); if (signal_pending(current)) { spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; - goto bad_fork_cleanup_namespaces; + goto bad_fork_free_pid; } if (clone_flags & CLONE_THREAD) { @@ -1273,11 +1290,22 @@ static struct task_struct *copy_process(unsigned long clone_flags, __ptrace_link(p, current->parent); if (thread_group_leader(p)) { - p->signal->tty = current->signal->tty; - p->signal->pgrp = process_group(current); - set_signal_session(p->signal, process_session(current)); - attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); - attach_pid(p, PIDTYPE_SID, task_session(current)); + if (clone_flags & CLONE_NEWPID) { + p->nsproxy->pid_ns->child_reaper = p; + p->signal->tty = NULL; + set_task_pgrp(p, p->pid); + set_task_session(p, p->pid); + attach_pid(p, PIDTYPE_PGID, pid); + attach_pid(p, PIDTYPE_SID, pid); + } else { + p->signal->tty = current->signal->tty; + set_task_pgrp(p, task_pgrp_nr(current)); + set_task_session(p, task_session_nr(current)); + attach_pid(p, PIDTYPE_PGID, + task_pgrp(current)); + attach_pid(p, PIDTYPE_SID, + task_session(current)); + } list_add_tail_rcu(&p->tasks, &init_task.tasks); __get_cpu_var(process_counts)++; @@ -1290,8 +1318,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, spin_unlock(¤t->sighand->siglock); write_unlock_irq(&tasklist_lock); proc_fork_connector(p); + cgroup_post_fork(p); return p; +bad_fork_free_pid: + if (pid != &init_struct_pid) + free_pid(pid); bad_fork_cleanup_namespaces: exit_task_namespaces(p); bad_fork_cleanup_keys: @@ -1316,10 +1348,9 @@ bad_fork_cleanup_security: bad_fork_cleanup_policy: #ifdef CONFIG_NUMA mpol_free(p->mempolicy); -bad_fork_cleanup_cpuset: +bad_fork_cleanup_cgroup: #endif - cpuset_exit(p); -bad_fork_cleanup_delays_binfmt: + cgroup_exit(p, cgroup_callbacks_done); delayacct_tsk_free(p); if (p->binfmt) module_put(p->binfmt->module); @@ -1346,7 +1377,7 @@ struct task_struct * __cpuinit fork_idle(int cpu) struct task_struct *task; struct pt_regs regs; - task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, + task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, &init_struct_pid); if (!IS_ERR(task)) init_idle(task, cpu); @@ -1354,7 +1385,7 @@ struct task_struct * __cpuinit fork_idle(int cpu) return task; } -static inline int fork_traceflag (unsigned clone_flags) +static int fork_traceflag(unsigned clone_flags) { if (clone_flags & CLONE_UNTRACED) return 0; @@ -1385,19 +1416,16 @@ long do_fork(unsigned long clone_flags, { struct task_struct *p; int trace = 0; - struct pid *pid = alloc_pid(); long nr; - if (!pid) - return -EAGAIN; - nr = pid->nr; if (unlikely(current->ptrace)) { trace = fork_traceflag (clone_flags); if (trace) clone_flags |= CLONE_PTRACE; } - p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, pid); + p = copy_process(clone_flags, stack_start, regs, stack_size, + child_tidptr, NULL); /* * Do this prior waking up the new thread - the thread pointer * might get invalid after that point, if the thread exits quickly. @@ -1405,6 +1433,17 @@ long do_fork(unsigned long clone_flags, if (!IS_ERR(p)) { struct completion vfork; + /* + * this is enough to call pid_nr_ns here, but this if + * improves optimisation of regular fork() + */ + nr = (clone_flags & CLONE_NEWPID) ? + task_pid_nr_ns(p, current->nsproxy->pid_ns) : + task_pid_vnr(p); + + if (clone_flags & CLONE_PARENT_SETTID) + put_user(nr, parent_tidptr); + if (clone_flags & CLONE_VFORK) { p->vfork_done = &vfork; init_completion(&vfork); @@ -1438,7 +1477,6 @@ long do_fork(unsigned long clone_flags, } } } else { - free_pid(pid); nr = PTR_ERR(p); } return nr; @@ -1483,7 +1521,7 @@ void __init proc_caches_init(void) * Check constraints on flags passed to the unshare system call and * force unsharing of additional process context as appropriate. */ -static inline void check_unshare_flags(unsigned long *flags_ptr) +static void check_unshare_flags(unsigned long *flags_ptr) { /* * If unsharing a thread from a thread group, must also @@ -1615,7 +1653,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; struct files_struct *fd, *new_fd = NULL; struct sem_undo_list *new_ulist = NULL; - struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL; + struct nsproxy *new_nsproxy = NULL; check_unshare_flags(&unshare_flags); @@ -1645,14 +1683,13 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) { - task_lock(current); - if (new_nsproxy) { - old_nsproxy = current->nsproxy; - current->nsproxy = new_nsproxy; - new_nsproxy = old_nsproxy; + switch_task_namespaces(current, new_nsproxy); + new_nsproxy = NULL; } + task_lock(current); + if (new_fs) { fs = current->fs; current->fs = new_fs; diff --git a/kernel/futex.c b/kernel/futex.c index d725676d84f..32710451dc2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -53,6 +53,9 @@ #include <linux/signal.h> #include <linux/module.h> #include <linux/magic.h> +#include <linux/pid.h> +#include <linux/nsproxy.h> + #include <asm/futex.h> #include "rtmutex_common.h" @@ -293,7 +296,7 @@ EXPORT_SYMBOL_GPL(get_futex_key_refs); */ void drop_futex_key_refs(union futex_key *key) { - if (key->both.ptr == 0) + if (!key->both.ptr) return; switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { case FUT_OFF_INODE: @@ -443,8 +446,7 @@ static struct task_struct * futex_find_get_task(pid_t pid) struct task_struct *p; rcu_read_lock(); - p = find_task_by_pid(pid); - + p = find_task_by_vpid(pid); if (!p || ((current->euid != p->euid) && (current->euid != p->uid))) p = ERR_PTR(-ESRCH); else @@ -653,7 +655,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) if (!(uval & FUTEX_OWNER_DIED)) { int ret = 0; - newval = FUTEX_WAITERS | new_owner->pid; + newval = FUTEX_WAITERS | task_pid_vnr(new_owner); curval = cmpxchg_futex_value_locked(uaddr, uval, newval); @@ -1046,7 +1048,7 @@ static int unqueue_me(struct futex_q *q) retry: lock_ptr = q->lock_ptr; barrier(); - if (lock_ptr != 0) { + if (lock_ptr != NULL) { spin_lock(lock_ptr); /* * q->lock_ptr can change between reading it and @@ -1106,7 +1108,7 @@ static void unqueue_me_pi(struct futex_q *q) static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, struct task_struct *curr) { - u32 newtid = curr->pid | FUTEX_WAITERS; + u32 newtid = task_pid_vnr(curr) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; u32 uval, curval, newval; int ret; @@ -1368,7 +1370,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, * (by doing a 0 -> TID atomic cmpxchg), while holding all * the locks. It will most likely not succeed. */ - newval = current->pid; + newval = task_pid_vnr(current); curval = cmpxchg_futex_value_locked(uaddr, 0, newval); @@ -1379,7 +1381,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, * Detect deadlocks. In case of REQUEUE_PI this is a valid * situation and we return success to user space. */ - if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) { + if (unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(current))) { ret = -EDEADLK; goto out_unlock_release_sem; } @@ -1408,7 +1410,7 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, */ if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { /* Keep the OWNER_DIED bit */ - newval = (curval & ~FUTEX_TID_MASK) | current->pid; + newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(current); ownerdied = 0; lock_taken = 1; } @@ -1587,7 +1589,7 @@ retry: /* * We release only a lock we actually own: */ - if ((uval & FUTEX_TID_MASK) != current->pid) + if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) return -EPERM; /* * First take all the futex related locks: @@ -1608,7 +1610,7 @@ retry_unlocked: * anyone else up: */ if (!(uval & FUTEX_OWNER_DIED)) - uval = cmpxchg_futex_value_locked(uaddr, current->pid, 0); + uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0); if (unlikely(uval == -EFAULT)) @@ -1617,7 +1619,7 @@ retry_unlocked: * Rare case: we managed to release the lock atomically, * no need to wake anyone else up: */ - if (unlikely(uval == current->pid)) + if (unlikely(uval == task_pid_vnr(current))) goto out_unlock; /* @@ -1854,7 +1856,7 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, ret = -ESRCH; rcu_read_lock(); - p = find_task_by_pid(pid); + p = find_task_by_vpid(pid); if (!p) goto err_unlock; ret = -EPERM; @@ -1887,7 +1889,7 @@ retry: if (get_user(uval, uaddr)) return -1; - if ((uval & FUTEX_TID_MASK) == curr->pid) { + if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { /* * Ok, this dying thread is truly holding a futex * of interest. Set the OWNER_DIED bit atomically diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 2c2e2954b71..00b572666cc 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -8,6 +8,7 @@ #include <linux/linkage.h> #include <linux/compat.h> +#include <linux/nsproxy.h> #include <linux/futex.h> #include <asm/uaccess.h> @@ -124,7 +125,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, ret = -ESRCH; read_lock(&tasklist_lock); - p = find_task_by_pid(pid); + p = find_task_by_vpid(pid); if (!p) goto err_unlock; ret = -EPERM; diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index dc8a4451d79..b2b2c2b0a49 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -1286,8 +1286,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod long __sched hrtimer_nanosleep_restart(struct restart_block *restart) { struct hrtimer_sleeper t; - struct timespec __user *rmtp; - struct timespec tu; + struct timespec *rmtp; ktime_t time; restart->fn = do_no_restart_syscall; @@ -1298,14 +1297,12 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) if (do_nanosleep(&t, HRTIMER_MODE_ABS)) return 0; - rmtp = (struct timespec __user *) restart->arg1; + rmtp = (struct timespec *)restart->arg1; if (rmtp) { time = ktime_sub(t.timer.expires, t.timer.base->get_time()); if (time.tv64 <= 0) return 0; - tu = ktime_to_timespec(time); - if (copy_to_user(rmtp, &tu, sizeof(tu))) - return -EFAULT; + *rmtp = ktime_to_timespec(time); } restart->fn = hrtimer_nanosleep_restart; @@ -1314,12 +1311,11 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) return -ERESTART_RESTARTBLOCK; } -long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, +long hrtimer_nanosleep(struct timespec *rqtp, struct timespec *rmtp, const enum hrtimer_mode mode, const clockid_t clockid) { struct restart_block *restart; struct hrtimer_sleeper t; - struct timespec tu; ktime_t rem; hrtimer_init(&t.timer, clockid, mode); @@ -1335,9 +1331,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, rem = ktime_sub(t.timer.expires, t.timer.base->get_time()); if (rem.tv64 <= 0) return 0; - tu = ktime_to_timespec(rem); - if (copy_to_user(rmtp, &tu, sizeof(tu))) - return -EFAULT; + *rmtp = ktime_to_timespec(rem); } restart = ¤t_thread_info()->restart_block; @@ -1353,7 +1347,8 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) { - struct timespec tu; + struct timespec tu, rmt; + int ret; if (copy_from_user(&tu, rqtp, sizeof(tu))) return -EFAULT; @@ -1361,7 +1356,15 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) if (!timespec_valid(&tu)) return -EINVAL; - return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); + ret = hrtimer_nanosleep(&tu, rmtp ? &rmt : NULL, HRTIMER_MODE_REL, + CLOCK_MONOTONIC); + + if (ret && rmtp) { + if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) + return -EFAULT; + } + + return ret; } /* diff --git a/kernel/itimer.c b/kernel/itimer.c index 3205e8e114f..2fab344dbf5 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c @@ -130,7 +130,7 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value) enum hrtimer_restart it_real_fn(struct hrtimer *timer) { struct signal_struct *sig = - container_of(timer, struct signal_struct, real_timer); + container_of(timer, struct signal_struct, real_timer); send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk); @@ -291,6 +291,6 @@ asmlinkage long sys_setitimer(int which, return error; if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer))) - return -EFAULT; + return -EFAULT; return 0; } diff --git a/kernel/kexec.c b/kernel/kexec.c index 7885269b0da..aa74a1ef2da 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -51,7 +51,7 @@ struct resource crashk_res = { int kexec_should_crash(struct task_struct *p) { - if (in_interrupt() || !p->pid || is_init(p) || panic_on_oops) + if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) return 1; return 0; } @@ -785,7 +785,7 @@ static int kimage_load_normal_segment(struct kimage *image, size_t uchunk, mchunk; page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); - if (page == 0) { + if (!page) { result = -ENOMEM; goto out; } @@ -844,7 +844,7 @@ static int kimage_load_crash_segment(struct kimage *image, size_t uchunk, mchunk; page = pfn_to_page(maddr >> PAGE_SHIFT); - if (page == 0) { + if (!page) { result = -ENOMEM; goto out; } @@ -1146,6 +1146,172 @@ static int __init crash_notes_memory_init(void) } module_init(crash_notes_memory_init) + +/* + * parsing the "crashkernel" commandline + * + * this code is intended to be called from architecture specific code + */ + + +/* + * This function parses command lines in the format + * + * crashkernel=ramsize-range:size[,...][@offset] + * + * The function returns 0 on success and -EINVAL on failure. + */ +static int __init parse_crashkernel_mem(char *cmdline, + unsigned long long system_ram, + unsigned long long *crash_size, + unsigned long long *crash_base) +{ + char *cur = cmdline, *tmp; + + /* for each entry of the comma-separated list */ + do { + unsigned long long start, end = ULLONG_MAX, size; + + /* get the start of the range */ + start = memparse(cur, &tmp); + if (cur == tmp) { + pr_warning("crashkernel: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (*cur != '-') { + pr_warning("crashkernel: '-' expected\n"); + return -EINVAL; + } + cur++; + + /* if no ':' is here, than we read the end */ + if (*cur != ':') { + end = memparse(cur, &tmp); + if (cur == tmp) { + pr_warning("crashkernel: Memory " + "value expected\n"); + return -EINVAL; + } + cur = tmp; + if (end <= start) { + pr_warning("crashkernel: end <= start\n"); + return -EINVAL; + } + } + + if (*cur != ':') { + pr_warning("crashkernel: ':' expected\n"); + return -EINVAL; + } + cur++; + + size = memparse(cur, &tmp); + if (cur == tmp) { + pr_warning("Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (size >= system_ram) { + pr_warning("crashkernel: invalid size\n"); + return -EINVAL; + } + + /* match ? */ + if (system_ram >= start && system_ram <= end) { + *crash_size = size; + break; + } + } while (*cur++ == ','); + + if (*crash_size > 0) { + while (*cur != ' ' && *cur != '@') + cur++; + if (*cur == '@') { + cur++; + *crash_base = memparse(cur, &tmp); + if (cur == tmp) { + pr_warning("Memory value expected " + "after '@'\n"); + return -EINVAL; + } + } + } + + return 0; +} + +/* + * That function parses "simple" (old) crashkernel command lines like + * + * crashkernel=size[@offset] + * + * It returns 0 on success and -EINVAL on failure. + */ +static int __init parse_crashkernel_simple(char *cmdline, + unsigned long long *crash_size, + unsigned long long *crash_base) +{ + char *cur = cmdline; + + *crash_size = memparse(cmdline, &cur); + if (cmdline == cur) { + pr_warning("crashkernel: memory value expected\n"); + return -EINVAL; + } + + if (*cur == '@') + *crash_base = memparse(cur+1, &cur); + + return 0; +} + +/* + * That function is the entry point for command line parsing and should be + * called from the arch-specific code. + */ +int __init parse_crashkernel(char *cmdline, + unsigned long long system_ram, + unsigned long long *crash_size, + unsigned long long *crash_base) +{ + char *p = cmdline, *ck_cmdline = NULL; + char *first_colon, *first_space; + + BUG_ON(!crash_size || !crash_base); + *crash_size = 0; + *crash_base = 0; + + /* find crashkernel and use the last one if there are more */ + p = strstr(p, "crashkernel="); + while (p) { + ck_cmdline = p; + p = strstr(p+1, "crashkernel="); + } + + if (!ck_cmdline) + return -EINVAL; + + ck_cmdline += 12; /* strlen("crashkernel=") */ + + /* + * if the commandline contains a ':', then that's the extended + * syntax -- if not, it must be the classic syntax + */ + first_colon = strchr(ck_cmdline, ':'); + first_space = strchr(ck_cmdline, ' '); + if (first_colon && (!first_space || first_colon < first_space)) + return parse_crashkernel_mem(ck_cmdline, system_ram, + crash_size, crash_base); + else + return parse_crashkernel_simple(ck_cmdline, crash_size, + crash_base); + + return 0; +} + + + void crash_save_vmcoreinfo(void) { u32 *buf; diff --git a/kernel/lockdep.c b/kernel/lockdep.c index a6f1ee9c92d..55fe0c7cd95 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -511,11 +511,11 @@ static void lockdep_print_held_locks(struct task_struct *curr) int i, depth = curr->lockdep_depth; if (!depth) { - printk("no locks held by %s/%d.\n", curr->comm, curr->pid); + printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); return; } printk("%d lock%s held by %s/%d:\n", - depth, depth > 1 ? "s" : "", curr->comm, curr->pid); + depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); for (i = 0; i < depth; i++) { printk(" #%d: ", i); @@ -904,7 +904,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth) print_kernel_version(); printk( "-------------------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); print_lock(check_source); printk("\nbut task is already holding lock:\n"); print_lock(check_target); @@ -1085,7 +1085,7 @@ print_bad_irq_dependency(struct task_struct *curr, print_kernel_version(); printk( "------------------------------------------------------\n"); printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", - curr->comm, curr->pid, + curr->comm, task_pid_nr(curr), curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, curr->hardirqs_enabled, @@ -1237,7 +1237,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, print_kernel_version(); printk( "---------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); print_lock(next); printk("\nbut task is already holding lock:\n"); print_lock(prev); @@ -1521,7 +1521,7 @@ cache_hit: } static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, - struct held_lock *hlock, int chain_head, u64 chain_key) + struct held_lock *hlock, int chain_head, u64 chain_key) { /* * Trylock needs to maintain the stack of held locks, but it @@ -1641,7 +1641,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, usage_str[prev_bit], usage_str[new_bit]); printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", - curr->comm, curr->pid, + curr->comm, task_pid_nr(curr), trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, trace_hardirqs_enabled(curr), @@ -1694,7 +1694,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, print_kernel_version(); printk( "---------------------------------------------------------\n"); printk("%s/%d just changed the state of lock:\n", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); print_lock(this); if (forwards) printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); @@ -2487,7 +2487,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock, printk( "[ BUG: bad unlock balance detected! ]\n"); printk( "-------------------------------------\n"); printk("%s/%d is trying to release lock (", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); print_lockdep_cache(lock); printk(") at:\n"); print_ip_sym(ip); @@ -2737,7 +2737,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, printk( "[ BUG: bad contention detected! ]\n"); printk( "---------------------------------\n"); printk("%s/%d is trying to contend lock (", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); print_lockdep_cache(lock); printk(") at:\n"); print_ip_sym(ip); @@ -3072,7 +3072,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, printk( "[ BUG: held lock freed! ]\n"); printk( "-------------------------\n"); printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", - curr->comm, curr->pid, mem_from, mem_to-1); + curr->comm, task_pid_nr(curr), mem_from, mem_to-1); print_lock(hlock); lockdep_print_held_locks(curr); @@ -3125,7 +3125,7 @@ static void print_held_locks_bug(struct task_struct *curr) printk( "[ BUG: lock held at task exit time! ]\n"); printk( "-------------------------------------\n"); printk("%s/%d is exiting with locks still held!\n", - curr->comm, curr->pid); + curr->comm, task_pid_nr(curr)); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); diff --git a/kernel/marker.c b/kernel/marker.c new file mode 100644 index 00000000000..ccb48d9a365 --- /dev/null +++ b/kernel/marker.c @@ -0,0 +1,525 @@ +/* + * Copyright (C) 2007 Mathieu Desnoyers + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/jhash.h> +#include <linux/list.h> +#include <linux/rcupdate.h> +#include <linux/marker.h> +#include <linux/err.h> + +extern struct marker __start___markers[]; +extern struct marker __stop___markers[]; + +/* + * module_mutex nests inside markers_mutex. Markers mutex protects the builtin + * and module markers, the hash table and deferred_sync. + */ +static DEFINE_MUTEX(markers_mutex); + +/* + * Marker deferred synchronization. + * Upon marker probe_unregister, we delay call to synchronize_sched() to + * accelerate mass unregistration (only when there is no more reference to a + * given module do we call synchronize_sched()). However, we need to make sure + * every critical region has ended before we re-arm a marker that has been + * unregistered and then registered back with a different probe data. + */ +static int deferred_sync; + +/* + * Marker hash table, containing the active markers. + * Protected by module_mutex. + */ +#define MARKER_HASH_BITS 6 +#define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS) + +struct marker_entry { + struct hlist_node hlist; + char *format; + marker_probe_func *probe; + void *private; + int refcount; /* Number of times armed. 0 if disarmed. */ + char name[0]; /* Contains name'\0'format'\0' */ +}; + +static struct hlist_head marker_table[MARKER_TABLE_SIZE]; + +/** + * __mark_empty_function - Empty probe callback + * @mdata: pointer of type const struct marker + * @fmt: format string + * @...: variable argument list + * + * Empty callback provided as a probe to the markers. By providing this to a + * disabled marker, we make sure the execution flow is always valid even + * though the function pointer change and the marker enabling are two distinct + * operations that modifies the execution flow of preemptible code. + */ +void __mark_empty_function(const struct marker *mdata, void *private, + const char *fmt, ...) +{ +} +EXPORT_SYMBOL_GPL(__mark_empty_function); + +/* + * Get marker if the marker is present in the marker hash table. + * Must be called with markers_mutex held. + * Returns NULL if not present. + */ +static struct marker_entry *get_marker(const char *name) +{ + struct hlist_head *head; + struct hlist_node *node; + struct marker_entry *e; + u32 hash = jhash(name, strlen(name), 0); + + head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; + hlist_for_each_entry(e, node, head, hlist) { + if (!strcmp(name, e->name)) + return e; + } + return NULL; +} + +/* + * Add the marker to the marker hash table. Must be called with markers_mutex + * held. + */ +static int add_marker(const char *name, const char *format, + marker_probe_func *probe, void *private) +{ + struct hlist_head *head; + struct hlist_node *node; + struct marker_entry *e; + size_t name_len = strlen(name) + 1; + size_t format_len = 0; + u32 hash = jhash(name, name_len-1, 0); + + if (format) + format_len = strlen(format) + 1; + head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; + hlist_for_each_entry(e, node, head, hlist) { + if (!strcmp(name, e->name)) { + printk(KERN_NOTICE + "Marker %s busy, probe %p already installed\n", + name, e->probe); + return -EBUSY; /* Already there */ + } + } + /* + * Using kmalloc here to allocate a variable length element. Could + * cause some memory fragmentation if overused. + */ + e = kmalloc(sizeof(struct marker_entry) + name_len + format_len, + GFP_KERNEL); + if (!e) + return -ENOMEM; + memcpy(&e->name[0], name, name_len); + if (format) { + e->format = &e->name[name_len]; + memcpy(e->format, format, format_len); + trace_mark(core_marker_format, "name %s format %s", + e->name, e->format); + } else + e->format = NULL; + e->probe = probe; + e->private = private; + e->refcount = 0; + hlist_add_head(&e->hlist, head); + return 0; +} + +/* + * Remove the marker from the marker hash table. Must be called with mutex_lock + * held. + */ +static void *remove_marker(const char *name) +{ + struct hlist_head *head; + struct hlist_node *node; + struct marker_entry *e; + int found = 0; + size_t len = strlen(name) + 1; + void *private = NULL; + u32 hash = jhash(name, len-1, 0); + + head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; + hlist_for_each_entry(e, node, head, hlist) { + if (!strcmp(name, e->name)) { + found = 1; + break; + } + } + if (found) { + private = e->private; + hlist_del(&e->hlist); + kfree(e); + } + return private; +} + +/* + * Set the mark_entry format to the format found in the element. + */ +static int marker_set_format(struct marker_entry **entry, const char *format) +{ + struct marker_entry *e; + size_t name_len = strlen((*entry)->name) + 1; + size_t format_len = strlen(format) + 1; + + e = kmalloc(sizeof(struct marker_entry) + name_len + format_len, + GFP_KERNEL); + if (!e) + return -ENOMEM; + memcpy(&e->name[0], (*entry)->name, name_len); + e->format = &e->name[name_len]; + memcpy(e->format, format, format_len); + e->probe = (*entry)->probe; + e->private = (*entry)->private; + e->refcount = (*entry)->refcount; + hlist_add_before(&e->hlist, &(*entry)->hlist); + hlist_del(&(*entry)->hlist); + kfree(*entry); + *entry = e; + trace_mark(core_marker_format, "name %s format %s", + e->name, e->format); + return 0; +} + +/* + * Sets the probe callback corresponding to one marker. + */ +static int set_marker(struct marker_entry **entry, struct marker *elem) +{ + int ret; + WARN_ON(strcmp((*entry)->name, elem->name) != 0); + + if ((*entry)->format) { + if (strcmp((*entry)->format, elem->format) != 0) { + printk(KERN_NOTICE + "Format mismatch for probe %s " + "(%s), marker (%s)\n", + (*entry)->name, + (*entry)->format, + elem->format); + return -EPERM; + } + } else { + ret = marker_set_format(entry, elem->format); + if (ret) + return ret; + } + elem->call = (*entry)->probe; + elem->private = (*entry)->private; + elem->state = 1; + return 0; +} + +/* + * Disable a marker and its probe callback. + * Note: only after a synchronize_sched() issued after setting elem->call to the + * empty function insures that the original callback is not used anymore. This + * insured by preemption disabling around the call site. + */ +static void disable_marker(struct marker *elem) +{ + elem->state = 0; + elem->call = __mark_empty_function; + /* + * Leave the private data and id there, because removal is racy and + * should be done only after a synchronize_sched(). These are never used + * until the next initialization anyway. + */ +} + +/** + * marker_update_probe_range - Update a probe range + * @begin: beginning of the range + * @end: end of the range + * @probe_module: module address of the probe being updated + * @refcount: number of references left to the given probe_module (out) + * + * Updates the probe callback corresponding to a range of markers. + * Must be called with markers_mutex held. + */ +void marker_update_probe_range(struct marker *begin, + struct marker *end, struct module *probe_module, + int *refcount) +{ + struct marker *iter; + struct marker_entry *mark_entry; + + for (iter = begin; iter < end; iter++) { + mark_entry = get_marker(iter->name); + if (mark_entry && mark_entry->refcount) { + set_marker(&mark_entry, iter); + /* + * ignore error, continue + */ + if (probe_module) + if (probe_module == + __module_text_address((unsigned long)mark_entry->probe)) + (*refcount)++; + } else { + disable_marker(iter); + } + } +} + +/* + * Update probes, removing the faulty probes. + * Issues a synchronize_sched() when no reference to the module passed + * as parameter is found in the probes so the probe module can be + * safely unloaded from now on. + */ +static void marker_update_probes(struct module *probe_module) +{ + int refcount = 0; + + mutex_lock(&markers_mutex); + /* Core kernel markers */ + marker_update_probe_range(__start___markers, + __stop___markers, probe_module, &refcount); + /* Markers in modules. */ + module_update_markers(probe_module, &refcount); + if (probe_module && refcount == 0) { + synchronize_sched(); + deferred_sync = 0; + } + mutex_unlock(&markers_mutex); +} + +/** + * marker_probe_register - Connect a probe to a marker + * @name: marker name + * @format: format string + * @probe: probe handler + * @private: probe private data + * + * private data must be a valid allocated memory address, or NULL. + * Returns 0 if ok, error value on error. + */ +int marker_probe_register(const char *name, const char *format, + marker_probe_func *probe, void *private) +{ + struct marker_entry *entry; + int ret = 0, need_update = 0; + + mutex_lock(&markers_mutex); + entry = get_marker(name); + if (entry && entry->refcount) { + ret = -EBUSY; + goto end; + } + if (deferred_sync) { + synchronize_sched(); + deferred_sync = 0; + } + ret = add_marker(name, format, probe, private); + if (ret) + goto end; + need_update = 1; +end: + mutex_unlock(&markers_mutex); + if (need_update) + marker_update_probes(NULL); + return ret; +} +EXPORT_SYMBOL_GPL(marker_probe_register); + +/** + * marker_probe_unregister - Disconnect a probe from a marker + * @name: marker name + * + * Returns the private data given to marker_probe_register, or an ERR_PTR(). + */ +void *marker_probe_unregister(const char *name) +{ + struct module *probe_module; + struct marker_entry *entry; + void *private; + int need_update = 0; + + mutex_lock(&markers_mutex); + entry = get_marker(name); + if (!entry) { + private = ERR_PTR(-ENOENT); + goto end; + } + entry->refcount = 0; + /* In what module is the probe handler ? */ + probe_module = __module_text_address((unsigned long)entry->probe); + private = remove_marker(name); + deferred_sync = 1; + need_update = 1; +end: + mutex_unlock(&markers_mutex); + if (need_update) + marker_update_probes(probe_module); + return private; +} +EXPORT_SYMBOL_GPL(marker_probe_unregister); + +/** + * marker_probe_unregister_private_data - Disconnect a probe from a marker + * @private: probe private data + * + * Unregister a marker by providing the registered private data. + * Returns the private data given to marker_probe_register, or an ERR_PTR(). + */ +void *marker_probe_unregister_private_data(void *private) +{ + struct module *probe_module; + struct hlist_head *head; + struct hlist_node *node; + struct marker_entry *entry; + int found = 0; + unsigned int i; + int need_update = 0; + + mutex_lock(&markers_mutex); + for (i = 0; i < MARKER_TABLE_SIZE; i++) { + head = &marker_table[i]; + hlist_for_each_entry(entry, node, head, hlist) { + if (entry->private == private) { + found = 1; + goto iter_end; + } + } + } +iter_end: + if (!found) { + private = ERR_PTR(-ENOENT); + goto end; + } + entry->refcount = 0; + /* In what module is the probe handler ? */ + probe_module = __module_text_address((unsigned long)entry->probe); + private = remove_marker(entry->name); + deferred_sync = 1; + need_update = 1; +end: + mutex_unlock(&markers_mutex); + if (need_update) + marker_update_probes(probe_module); + return private; +} +EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data); + +/** + * marker_arm - Arm a marker + * @name: marker name + * + * Activate a marker. It keeps a reference count of the number of + * arming/disarming done. + * Returns 0 if ok, error value on error. + */ +int marker_arm(const char *name) +{ + struct marker_entry *entry; + int ret = 0, need_update = 0; + + mutex_lock(&markers_mutex); + entry = get_marker(name); + if (!entry) { + ret = -ENOENT; + goto end; + } + /* + * Only need to update probes when refcount passes from 0 to 1. + */ + if (entry->refcount++) + goto end; + need_update = 1; +end: + mutex_unlock(&markers_mutex); + if (need_update) + marker_update_probes(NULL); + return ret; +} +EXPORT_SYMBOL_GPL(marker_arm); + +/** + * marker_disarm - Disarm a marker + * @name: marker name + * + * Disarm a marker. It keeps a reference count of the number of arming/disarming + * done. + * Returns 0 if ok, error value on error. + */ +int marker_disarm(const char *name) +{ + struct marker_entry *entry; + int ret = 0, need_update = 0; + + mutex_lock(&markers_mutex); + entry = get_marker(name); + if (!entry) { + ret = -ENOENT; + goto end; + } + /* + * Only permit decrement refcount if higher than 0. + * Do probe update only on 1 -> 0 transition. + */ + if (entry->refcount) { + if (--entry->refcount) + goto end; + } else { + ret = -EPERM; + goto end; + } + need_update = 1; +end: + mutex_unlock(&markers_mutex); + if (need_update) + marker_update_probes(NULL); + return ret; +} +EXPORT_SYMBOL_GPL(marker_disarm); + +/** + * marker_get_private_data - Get a marker's probe private data + * @name: marker name + * + * Returns the private data pointer, or an ERR_PTR. + * The private data pointer should _only_ be dereferenced if the caller is the + * owner of the data, or its content could vanish. This is mostly used to + * confirm that a caller is the owner of a registered probe. + */ +void *marker_get_private_data(const char *name) +{ + struct hlist_head *head; + struct hlist_node *node; + struct marker_entry *e; + size_t name_len = strlen(name) + 1; + u32 hash = jhash(name, name_len-1, 0); + int found = 0; + + head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)]; + hlist_for_each_entry(e, node, head, hlist) { + if (!strcmp(name, e->name)) { + found = 1; + return e->private; + } + } + return ERR_PTR(-ENOENT); +} +EXPORT_SYMBOL_GPL(marker_get_private_data); diff --git a/kernel/module.c b/kernel/module.c index a389b423c27..3202c995007 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -105,7 +105,7 @@ void __module_put_and_exit(struct module *mod, long code) do_exit(code); } EXPORT_SYMBOL(__module_put_and_exit); - + /* Find a module section: 0 means not found. */ static unsigned int find_sec(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, @@ -179,7 +179,7 @@ static unsigned long __find_symbol(const char *name, struct module *mod; const struct kernel_symbol *ks; - /* Core kernel first. */ + /* Core kernel first. */ *owner = NULL; ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); if (ks) { @@ -231,7 +231,7 @@ static unsigned long __find_symbol(const char *name, return ks->value; } - /* Now try modules. */ + /* Now try modules. */ list_for_each_entry(mod, &modules, list) { *owner = mod; ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); @@ -285,7 +285,7 @@ static unsigned long __find_symbol(const char *name, } } DEBUGP("Failed to find symbol %s\n", name); - return 0; + return 0; } /* Search for module by name: must hold module_mutex. */ @@ -441,7 +441,7 @@ static int percpu_modinit(void) } return 0; -} +} __initcall(percpu_modinit); #else /* ... !CONFIG_SMP */ static inline void *percpu_modalloc(unsigned long size, unsigned long align, @@ -483,8 +483,8 @@ static int modinfo_##field##_exists(struct module *mod) \ } \ static void free_modinfo_##field(struct module *mod) \ { \ - kfree(mod->field); \ - mod->field = NULL; \ + kfree(mod->field); \ + mod->field = NULL; \ } \ static struct module_attribute modinfo_##field = { \ .attr = { .name = __stringify(field), .mode = 0444 }, \ @@ -990,7 +990,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect, struct module_sect_attrs *sect_attrs; struct module_sect_attr *sattr; struct attribute **gattr; - + /* Count loaded sections and allocate structures */ for (i = 0; i < nsect; i++) if (sechdrs[i].sh_flags & SHF_ALLOC) @@ -1348,14 +1348,14 @@ static int verify_export_symbols(struct module *mod) const unsigned long *crc; for (i = 0; i < mod->num_syms; i++) - if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) { + if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) { name = mod->syms[i].name; ret = -ENOEXEC; goto dup; } for (i = 0; i < mod->num_gpl_syms; i++) - if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) { + if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) { name = mod->gpl_syms[i].name; ret = -ENOEXEC; goto dup; @@ -1673,6 +1673,8 @@ static struct module *load_module(void __user *umod, unsigned int unusedcrcindex; unsigned int unusedgplindex; unsigned int unusedgplcrcindex; + unsigned int markersindex; + unsigned int markersstringsindex; struct module *mod; long err = 0; void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ @@ -1929,7 +1931,7 @@ static struct module *load_module(void __user *umod, mod->unused_crcs = (void *)sechdrs[unusedgplcrcindex].sh_addr; #ifdef CONFIG_MODVERSIONS - if ((mod->num_syms && !crcindex) || + if ((mod->num_syms && !crcindex) || (mod->num_gpl_syms && !gplcrcindex) || (mod->num_gpl_future_syms && !gplfuturecrcindex) || (mod->num_unused_syms && !unusedcrcindex) || @@ -1939,6 +1941,9 @@ static struct module *load_module(void __user *umod, add_taint_module(mod, TAINT_FORCED_MODULE); } #endif + markersindex = find_sec(hdr, sechdrs, secstrings, "__markers"); + markersstringsindex = find_sec(hdr, sechdrs, secstrings, + "__markers_strings"); /* Now do relocations. */ for (i = 1; i < hdr->e_shnum; i++) { @@ -1961,6 +1966,11 @@ static struct module *load_module(void __user *umod, if (err < 0) goto cleanup; } +#ifdef CONFIG_MARKERS + mod->markers = (void *)sechdrs[markersindex].sh_addr; + mod->num_markers = + sechdrs[markersindex].sh_size / sizeof(*mod->markers); +#endif /* Find duplicate symbols */ err = verify_export_symbols(mod); @@ -1979,6 +1989,11 @@ static struct module *load_module(void __user *umod, add_kallsyms(mod, sechdrs, symindex, strindex, secstrings); +#ifdef CONFIG_MARKERS + if (!mod->taints) + marker_update_probe_range(mod->markers, + mod->markers + mod->num_markers, NULL, NULL); +#endif err = module_finalize(hdr, sechdrs, mod); if (err < 0) goto cleanup; @@ -2016,7 +2031,7 @@ static struct module *load_module(void __user *umod, if (err < 0) goto arch_cleanup; - err = mod_sysfs_setup(mod, + err = mod_sysfs_setup(mod, (struct kernel_param *) sechdrs[setupindex].sh_addr, sechdrs[setupindex].sh_size @@ -2028,8 +2043,8 @@ static struct module *load_module(void __user *umod, /* Size of section 0 is 0, so this works well if no unwind info. */ mod->unwind_info = unwind_add_table(mod, - (void *)sechdrs[unwindex].sh_addr, - sechdrs[unwindex].sh_size); + (void *)sechdrs[unwindex].sh_addr, + sechdrs[unwindex].sh_size); /* Get rid of temporary copy */ vfree(hdr); @@ -2146,7 +2161,7 @@ static inline int within(unsigned long addr, void *start, unsigned long size) */ static inline int is_arm_mapping_symbol(const char *str) { - return str[0] == '$' && strchr("atd", str[1]) + return str[0] == '$' && strchr("atd", str[1]) && (str[2] == '\0' || str[2] == '.'); } @@ -2161,11 +2176,11 @@ static const char *get_ksymbol(struct module *mod, /* At worse, next value is at end of module */ if (within(addr, mod->module_init, mod->init_size)) nextval = (unsigned long)mod->module_init+mod->init_text_size; - else + else nextval = (unsigned long)mod->module_core+mod->core_text_size; /* Scan for closest preceeding symbol, and next symbol. (ELF - starts real symbols at 1). */ + starts real symbols at 1). */ for (i = 1; i < mod->num_symtab; i++) { if (mod->symtab[i].st_shndx == SHN_UNDEF) continue; @@ -2407,7 +2422,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) list_for_each_entry(mod, &modules, list) { if (mod->num_exentries == 0) continue; - + e = search_extable(mod->extable, mod->extable + mod->num_exentries - 1, addr); @@ -2417,7 +2432,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) preempt_enable(); /* Now, if we found one, we are running inside it now, hence - we cannot unload the module, hence no refcnt needed. */ + we cannot unload the module, hence no refcnt needed. */ return e; } @@ -2570,3 +2585,18 @@ EXPORT_SYMBOL(module_remove_driver); void struct_module(struct module *mod) { return; } EXPORT_SYMBOL(struct_module); #endif + +#ifdef CONFIG_MARKERS +void module_update_markers(struct module *probe_module, int *refcount) +{ + struct module *mod; + + mutex_lock(&module_mutex); + list_for_each_entry(mod, &modules, list) + if (!mod->taints) + marker_update_probe_range(mod->markers, + mod->markers + mod->num_markers, + probe_module, refcount); + mutex_unlock(&module_mutex); +} +#endif diff --git a/kernel/notifier.c b/kernel/notifier.c new file mode 100644 index 00000000000..4253f472f06 --- /dev/null +++ b/kernel/notifier.c @@ -0,0 +1,539 @@ +#include <linux/kdebug.h> +#include <linux/kprobes.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/rcupdate.h> +#include <linux/vmalloc.h> + +/* + * Notifier list for kernel code which wants to be called + * at shutdown. This is used to stop any idling DMA operations + * and the like. + */ +BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); + +/* + * Notifier chain core routines. The exported routines below + * are layered on top of these, with appropriate locking added. + */ + +static int notifier_chain_register(struct notifier_block **nl, + struct notifier_block *n) +{ + while ((*nl) != NULL) { + if (n->priority > (*nl)->priority) + break; + nl = &((*nl)->next); + } + n->next = *nl; + rcu_assign_pointer(*nl, n); + return 0; +} + +static int notifier_chain_unregister(struct notifier_block **nl, + struct notifier_block *n) +{ + while ((*nl) != NULL) { + if ((*nl) == n) { + rcu_assign_pointer(*nl, n->next); + return 0; + } + nl = &((*nl)->next); + } + return -ENOENT; +} + +/** + * notifier_call_chain - Informs the registered notifiers about an event. + * @nl: Pointer to head of the blocking notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function + * @nr_to_call: Number of notifier functions to be called. Don't care + * value of this parameter is -1. + * @nr_calls: Records the number of notifications sent. Don't care + * value of this field is NULL. + * @returns: notifier_call_chain returns the value returned by the + * last notifier function called. + */ +static int __kprobes notifier_call_chain(struct notifier_block **nl, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) +{ + int ret = NOTIFY_DONE; + struct notifier_block *nb, *next_nb; + + nb = rcu_dereference(*nl); + + while (nb && nr_to_call) { + next_nb = rcu_dereference(nb->next); + ret = nb->notifier_call(nb, val, v); + + if (nr_calls) + (*nr_calls)++; + + if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) + break; + nb = next_nb; + nr_to_call--; + } + return ret; +} + +/* + * Atomic notifier chain routines. Registration and unregistration + * use a spinlock, and call_chain is synchronized by RCU (no locks). + */ + +/** + * atomic_notifier_chain_register - Add notifier to an atomic notifier chain + * @nh: Pointer to head of the atomic notifier chain + * @n: New entry in notifier chain + * + * Adds a notifier to an atomic notifier chain. + * + * Currently always returns zero. + */ +int atomic_notifier_chain_register(struct atomic_notifier_head *nh, + struct notifier_block *n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&nh->lock, flags); + ret = notifier_chain_register(&nh->head, n); + spin_unlock_irqrestore(&nh->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); + +/** + * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain + * @nh: Pointer to head of the atomic notifier chain + * @n: Entry to remove from notifier chain + * + * Removes a notifier from an atomic notifier chain. + * + * Returns zero on success or %-ENOENT on failure. + */ +int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, + struct notifier_block *n) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&nh->lock, flags); + ret = notifier_chain_unregister(&nh->head, n); + spin_unlock_irqrestore(&nh->lock, flags); + synchronize_rcu(); + return ret; +} +EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); + +/** + * __atomic_notifier_call_chain - Call functions in an atomic notifier chain + * @nh: Pointer to head of the atomic notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function + * @nr_to_call: See the comment for notifier_call_chain. + * @nr_calls: See the comment for notifier_call_chain. + * + * Calls each function in a notifier chain in turn. The functions + * run in an atomic context, so they must not block. + * This routine uses RCU to synchronize with changes to the chain. + * + * If the return value of the notifier can be and'ed + * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain() + * will return immediately, with the return value of + * the notifier function which halted execution. + * Otherwise the return value is the return value + * of the last notifier function called. + */ +int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) +{ + int ret; + + rcu_read_lock(); + ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); + +int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, + unsigned long val, void *v) +{ + return __atomic_notifier_call_chain(nh, val, v, -1, NULL); +} +EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); + +/* + * Blocking notifier chain routines. All access to the chain is + * synchronized by an rwsem. + */ + +/** + * blocking_notifier_chain_register - Add notifier to a blocking notifier chain + * @nh: Pointer to head of the blocking notifier chain + * @n: New entry in notifier chain + * + * Adds a notifier to a blocking notifier chain. + * Must be called in process context. + * + * Currently always returns zero. + */ +int blocking_notifier_chain_register(struct blocking_notifier_head *nh, + struct notifier_block *n) +{ + int ret; + + /* + * This code gets used during boot-up, when task switching is + * not yet working and interrupts must remain disabled. At + * such times we must not call down_write(). + */ + if (unlikely(system_state == SYSTEM_BOOTING)) + return notifier_chain_register(&nh->head, n); + + down_write(&nh->rwsem); + ret = notifier_chain_register(&nh->head, n); + up_write(&nh->rwsem); + return ret; +} +EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); + +/** + * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain + * @nh: Pointer to head of the blocking notifier chain + * @n: Entry to remove from notifier chain + * + * Removes a notifier from a blocking notifier chain. + * Must be called from process context. + * + * Returns zero on success or %-ENOENT on failure. + */ +int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, + struct notifier_block *n) +{ + int ret; + + /* + * This code gets used during boot-up, when task switching is + * not yet working and interrupts must remain disabled. At + * such times we must not call down_write(). + */ + if (unlikely(system_state == SYSTEM_BOOTING)) + return notifier_chain_unregister(&nh->head, n); + + down_write(&nh->rwsem); + ret = notifier_chain_unregister(&nh->head, n); + up_write(&nh->rwsem); + return ret; +} +EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); + +/** + * __blocking_notifier_call_chain - Call functions in a blocking notifier chain + * @nh: Pointer to head of the blocking notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function + * @nr_to_call: See comment for notifier_call_chain. + * @nr_calls: See comment for notifier_call_chain. + * + * Calls each function in a notifier chain in turn. The functions + * run in a process context, so they are allowed to block. + * + * If the return value of the notifier can be and'ed + * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain() + * will return immediately, with the return value of + * the notifier function which halted execution. + * Otherwise the return value is the return value + * of the last notifier function called. + */ +int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) +{ + int ret = NOTIFY_DONE; + + /* + * We check the head outside the lock, but if this access is + * racy then it does not matter what the result of the test + * is, we re-check the list after having taken the lock anyway: + */ + if (rcu_dereference(nh->head)) { + down_read(&nh->rwsem); + ret = notifier_call_chain(&nh->head, val, v, nr_to_call, + nr_calls); + up_read(&nh->rwsem); + } + return ret; +} +EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain); + +int blocking_notifier_call_chain(struct blocking_notifier_head *nh, + unsigned long val, void *v) +{ + return __blocking_notifier_call_chain(nh, val, v, -1, NULL); +} +EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); + +/* + * Raw notifier chain routines. There is no protection; + * the caller must provide it. Use at your own risk! + */ + +/** + * raw_notifier_chain_register - Add notifier to a raw notifier chain + * @nh: Pointer to head of the raw notifier chain + * @n: New entry in notifier chain + * + * Adds a notifier to a raw notifier chain. + * All locking must be provided by the caller. + * + * Currently always returns zero. + */ +int raw_notifier_chain_register(struct raw_notifier_head *nh, + struct notifier_block *n) +{ + return notifier_chain_register(&nh->head, n); +} +EXPORT_SYMBOL_GPL(raw_notifier_chain_register); + +/** + * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain + * @nh: Pointer to head of the raw notifier chain + * @n: Entry to remove from notifier chain + * + * Removes a notifier from a raw notifier chain. + * All locking must be provided by the caller. + * + * Returns zero on success or %-ENOENT on failure. + */ +int raw_notifier_chain_unregister(struct raw_notifier_head *nh, + struct notifier_block *n) +{ + return notifier_chain_unregister(&nh->head, n); +} +EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); + +/** + * __raw_notifier_call_chain - Call functions in a raw notifier chain + * @nh: Pointer to head of the raw notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function + * @nr_to_call: See comment for notifier_call_chain. + * @nr_calls: See comment for notifier_call_chain + * + * Calls each function in a notifier chain in turn. The functions + * run in an undefined context. + * All locking must be provided by the caller. + * + * If the return value of the notifier can be and'ed + * with %NOTIFY_STOP_MASK then raw_notifier_call_chain() + * will return immediately, with the return value of + * the notifier function which halted execution. + * Otherwise the return value is the return value + * of the last notifier function called. + */ +int __raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) +{ + return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); +} +EXPORT_SYMBOL_GPL(__raw_notifier_call_chain); + +int raw_notifier_call_chain(struct raw_notifier_head *nh, + unsigned long val, void *v) +{ + return __raw_notifier_call_chain(nh, val, v, -1, NULL); +} +EXPORT_SYMBOL_GPL(raw_notifier_call_chain); + +/* + * SRCU notifier chain routines. Registration and unregistration + * use a mutex, and call_chain is synchronized by SRCU (no locks). + */ + +/** + * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain + * @nh: Pointer to head of the SRCU notifier chain + * @n: New entry in notifier chain + * + * Adds a notifier to an SRCU notifier chain. + * Must be called in process context. + * + * Currently always returns zero. + */ +int srcu_notifier_chain_register(struct srcu_notifier_head *nh, + struct notifier_block *n) +{ + int ret; + + /* + * This code gets used during boot-up, when task switching is + * not yet working and interrupts must remain disabled. At + * such times we must not call mutex_lock(). + */ + if (unlikely(system_state == SYSTEM_BOOTING)) + return notifier_chain_register(&nh->head, n); + + mutex_lock(&nh->mutex); + ret = notifier_chain_register(&nh->head, n); + mutex_unlock(&nh->mutex); + return ret; +} +EXPORT_SYMBOL_GPL(srcu_notifier_chain_register); + +/** + * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain + * @nh: Pointer to head of the SRCU notifier chain + * @n: Entry to remove from notifier chain + * + * Removes a notifier from an SRCU notifier chain. + * Must be called from process context. + * + * Returns zero on success or %-ENOENT on failure. + */ +int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, + struct notifier_block *n) +{ + int ret; + + /* + * This code gets used during boot-up, when task switching is + * not yet working and interrupts must remain disabled. At + * such times we must not call mutex_lock(). + */ + if (unlikely(system_state == SYSTEM_BOOTING)) + return notifier_chain_unregister(&nh->head, n); + + mutex_lock(&nh->mutex); + ret = notifier_chain_unregister(&nh->head, n); + mutex_unlock(&nh->mutex); + synchronize_srcu(&nh->srcu); + return ret; +} +EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); + +/** + * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain + * @nh: Pointer to head of the SRCU notifier chain + * @val: Value passed unmodified to notifier function + * @v: Pointer passed unmodified to notifier function + * @nr_to_call: See comment for notifier_call_chain. + * @nr_calls: See comment for notifier_call_chain + * + * Calls each function in a notifier chain in turn. The functions + * run in a process context, so they are allowed to block. + * + * If the return value of the notifier can be and'ed + * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain() + * will return immediately, with the return value of + * the notifier function which halted execution. + * Otherwise the return value is the return value + * of the last notifier function called. + */ +int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v, + int nr_to_call, int *nr_calls) +{ + int ret; + int idx; + + idx = srcu_read_lock(&nh->srcu); + ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); + srcu_read_unlock(&nh->srcu, idx); + return ret; +} +EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain); + +int srcu_notifier_call_chain(struct srcu_notifier_head *nh, + unsigned long val, void *v) +{ + return __srcu_notifier_call_chain(nh, val, v, -1, NULL); +} +EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); + +/** + * srcu_init_notifier_head - Initialize an SRCU notifier head + * @nh: Pointer to head of the srcu notifier chain + * + * Unlike other sorts of notifier heads, SRCU notifier heads require + * dynamic initialization. Be sure to call this routine before + * calling any of the other SRCU notifier routines for this head. + * + * If an SRCU notifier head is deallocated, it must first be cleaned + * up by calling srcu_cleanup_notifier_head(). Otherwise the head's + * per-cpu data (used by the SRCU mechanism) will leak. + */ +void srcu_init_notifier_head(struct srcu_notifier_head *nh) +{ + mutex_init(&nh->mutex); + if (init_srcu_struct(&nh->srcu) < 0) + BUG(); + nh->head = NULL; +} +EXPORT_SYMBOL_GPL(srcu_init_notifier_head); + +/** + * register_reboot_notifier - Register function to be called at reboot time + * @nb: Info about notifier function to be called + * + * Registers a function with the list of functions + * to be called at reboot time. + * + * Currently always returns zero, as blocking_notifier_chain_register() + * always returns zero. + */ +int register_reboot_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&reboot_notifier_list, nb); +} +EXPORT_SYMBOL(register_reboot_notifier); + +/** + * unregister_reboot_notifier - Unregister previously registered reboot notifier + * @nb: Hook to be unregistered + * + * Unregisters a previously registered reboot + * notifier function. + * + * Returns zero on success, or %-ENOENT on failure. + */ +int unregister_reboot_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); +} +EXPORT_SYMBOL(unregister_reboot_notifier); + +static ATOMIC_NOTIFIER_HEAD(die_chain); + +int notify_die(enum die_val val, const char *str, + struct pt_regs *regs, long err, int trap, int sig) +{ + struct die_args args = { + .regs = regs, + .str = str, + .err = err, + .trapnr = trap, + .signr = sig, + + }; + return atomic_notifier_call_chain(&die_chain, val, &args); +} + +int register_die_notifier(struct notifier_block *nb) +{ + vmalloc_sync_all(); + return atomic_notifier_chain_register(&die_chain, nb); +} +EXPORT_SYMBOL_GPL(register_die_notifier); + +int unregister_die_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&die_chain, nb); +} +EXPORT_SYMBOL_GPL(unregister_die_notifier); diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c new file mode 100644 index 00000000000..aead4d69f62 --- /dev/null +++ b/kernel/ns_cgroup.c @@ -0,0 +1,100 @@ +/* + * ns_cgroup.c - namespace cgroup subsystem + * + * Copyright 2006, 2007 IBM Corp + */ + +#include <linux/module.h> +#include <linux/cgroup.h> +#include <linux/fs.h> + +struct ns_cgroup { + struct cgroup_subsys_state css; + spinlock_t lock; +}; + +struct cgroup_subsys ns_subsys; + +static inline struct ns_cgroup *cgroup_to_ns( + struct cgroup *cgroup) +{ + return container_of(cgroup_subsys_state(cgroup, ns_subsys_id), + struct ns_cgroup, css); +} + +int ns_cgroup_clone(struct task_struct *task) +{ + return cgroup_clone(task, &ns_subsys); +} + +/* + * Rules: + * 1. you can only enter a cgroup which is a child of your current + * cgroup + * 2. you can only place another process into a cgroup if + * a. you have CAP_SYS_ADMIN + * b. your cgroup is an ancestor of task's destination cgroup + * (hence either you are in the same cgroup as task, or in an + * ancestor cgroup thereof) + */ +static int ns_can_attach(struct cgroup_subsys *ss, + struct cgroup *new_cgroup, struct task_struct *task) +{ + struct cgroup *orig; + + if (current != task) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (!cgroup_is_descendant(new_cgroup)) + return -EPERM; + } + + if (atomic_read(&new_cgroup->count) != 0) + return -EPERM; + + orig = task_cgroup(task, ns_subsys_id); + if (orig && orig != new_cgroup->parent) + return -EPERM; + + return 0; +} + +/* + * Rules: you can only create a cgroup if + * 1. you are capable(CAP_SYS_ADMIN) + * 2. the target cgroup is a descendant of your own cgroup + */ +static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss, + struct cgroup *cgroup) +{ + struct ns_cgroup *ns_cgroup; + + if (!capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + if (!cgroup_is_descendant(cgroup)) + return ERR_PTR(-EPERM); + + ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL); + if (!ns_cgroup) + return ERR_PTR(-ENOMEM); + spin_lock_init(&ns_cgroup->lock); + return &ns_cgroup->css; +} + +static void ns_destroy(struct cgroup_subsys *ss, + struct cgroup *cgroup) +{ + struct ns_cgroup *ns_cgroup; + + ns_cgroup = cgroup_to_ns(cgroup); + kfree(ns_cgroup); +} + +struct cgroup_subsys ns_subsys = { + .name = "ns", + .can_attach = ns_can_attach, + .create = ns_create, + .destroy = ns_destroy, + .subsys_id = ns_subsys_id, +}; diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 049e7c0ac56..79f871bc0ef 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -26,19 +26,6 @@ static struct kmem_cache *nsproxy_cachep; struct nsproxy init_nsproxy = INIT_NSPROXY(init_nsproxy); -static inline void get_nsproxy(struct nsproxy *ns) -{ - atomic_inc(&ns->count); -} - -void get_task_namespaces(struct task_struct *tsk) -{ - struct nsproxy *ns = tsk->nsproxy; - if (ns) { - get_nsproxy(ns); - } -} - /* * creates a copy of "orig" with refcount 1. */ @@ -87,7 +74,7 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, goto out_ipc; } - new_nsp->pid_ns = copy_pid_ns(flags, tsk->nsproxy->pid_ns); + new_nsp->pid_ns = copy_pid_ns(flags, task_active_pid_ns(tsk)); if (IS_ERR(new_nsp->pid_ns)) { err = PTR_ERR(new_nsp->pid_ns); goto out_pid; @@ -142,7 +129,8 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) get_nsproxy(old_ns); - if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER | CLONE_NEWNET))) + if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | + CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNET))) return 0; if (!capable(CAP_SYS_ADMIN)) { @@ -156,7 +144,14 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) goto out; } + err = ns_cgroup_clone(tsk); + if (err) { + put_nsproxy(new_ns); + goto out; + } + tsk->nsproxy = new_ns; + out: put_nsproxy(old_ns); return err; @@ -196,11 +191,46 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags, *new_nsp = create_new_namespaces(unshare_flags, current, new_fs ? new_fs : current->fs); - if (IS_ERR(*new_nsp)) + if (IS_ERR(*new_nsp)) { err = PTR_ERR(*new_nsp); + goto out; + } + + err = ns_cgroup_clone(current); + if (err) + put_nsproxy(*new_nsp); + +out: return err; } +void switch_task_namespaces(struct task_struct *p, struct nsproxy *new) +{ + struct nsproxy *ns; + + might_sleep(); + + ns = p->nsproxy; + + rcu_assign_pointer(p->nsproxy, new); + + if (ns && atomic_dec_and_test(&ns->count)) { + /* + * wait for others to get what they want from this nsproxy. + * + * cannot release this nsproxy via the call_rcu() since + * put_mnt_ns() will want to sleep + */ + synchronize_rcu(); + free_nsproxy(ns); + } +} + +void exit_task_namespaces(struct task_struct *p) +{ + switch_task_namespaces(p, NULL); +} + static int __init nsproxy_cache_init(void) { nsproxy_cachep = KMEM_CACHE(nsproxy, SLAB_PANIC); diff --git a/kernel/panic.c b/kernel/panic.c index f64f4c1ac11..3886bd8230f 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -56,14 +56,14 @@ EXPORT_SYMBOL(panic_blink); * * This function never returns. */ - + NORET_TYPE void panic(const char * fmt, ...) { long i; static char buf[1024]; va_list args; #if defined(CONFIG_S390) - unsigned long caller = (unsigned long) __builtin_return_address(0); + unsigned long caller = (unsigned long) __builtin_return_address(0); #endif /* @@ -128,7 +128,7 @@ NORET_TYPE void panic(const char * fmt, ...) } #endif #if defined(CONFIG_S390) - disabled_wait(caller); + disabled_wait(caller); #endif local_irq_enable(); for (i = 0;;) { @@ -154,7 +154,7 @@ EXPORT_SYMBOL(panic); * * The string is overwritten by the next call to print_taint(). */ - + const char *print_tainted(void) { static char buf[20]; @@ -164,7 +164,7 @@ const char *print_tainted(void) tainted & TAINT_FORCED_MODULE ? 'F' : ' ', tainted & TAINT_UNSAFE_SMP ? 'S' : ' ', tainted & TAINT_FORCED_RMMOD ? 'R' : ' ', - tainted & TAINT_MACHINE_CHECK ? 'M' : ' ', + tainted & TAINT_MACHINE_CHECK ? 'M' : ' ', tainted & TAINT_BAD_PAGE ? 'B' : ' ', tainted & TAINT_USER ? 'U' : ' ', tainted & TAINT_DIE ? 'D' : ' '); diff --git a/kernel/params.c b/kernel/params.c index 1d6aca288cd..16f269e9ddc 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -592,11 +592,17 @@ static void __init param_sysfs_builtin(void) for (i=0; i < __stop___param - __start___param; i++) { char *dot; + size_t kplen; kp = &__start___param[i]; + kplen = strlen(kp->name); /* We do not handle args without periods. */ - dot = memchr(kp->name, '.', MAX_KBUILD_MODNAME); + if (kplen > MAX_KBUILD_MODNAME) { + DEBUGP("kernel parameter name is too long: %s\n", kp->name); + continue; + } + dot = memchr(kp->name, '.', kplen); if (!dot) { DEBUGP("couldn't find period in %s\n", kp->name); continue; diff --git a/kernel/pid.c b/kernel/pid.c index c6e3f9ffff8..d1db36b9467 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -18,6 +18,12 @@ * allocation scenario when all but one out of 1 million PIDs possible are * allocated already: the scanning of 32 list entries and at most PAGE_SIZE * bytes. The typical fastpath is a single successful setbit. Freeing is O(1). + * + * Pid namespaces: + * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc. + * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM + * Many thanks to Oleg Nesterov for comments and help + * */ #include <linux/mm.h> @@ -28,12 +34,14 @@ #include <linux/hash.h> #include <linux/pid_namespace.h> #include <linux/init_task.h> +#include <linux/syscalls.h> -#define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift) +#define pid_hashfn(nr, ns) \ + hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) static struct hlist_head *pid_hash; static int pidhash_shift; -static struct kmem_cache *pid_cachep; struct pid init_struct_pid = INIT_STRUCT_PID; +static struct kmem_cache *pid_ns_cachep; int pid_max = PID_MAX_DEFAULT; @@ -68,8 +76,25 @@ struct pid_namespace init_pid_ns = { [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }, .last_pid = 0, - .child_reaper = &init_task + .level = 0, + .child_reaper = &init_task, }; +EXPORT_SYMBOL_GPL(init_pid_ns); + +int is_container_init(struct task_struct *tsk) +{ + int ret = 0; + struct pid *pid; + + rcu_read_lock(); + pid = task_pid(tsk); + if (pid != NULL && pid->numbers[pid->level].nr == 1) + ret = 1; + rcu_read_unlock(); + + return ret; +} +EXPORT_SYMBOL(is_container_init); /* * Note: disable interrupts while the pidmap_lock is held as an @@ -176,11 +201,17 @@ static int next_pidmap(struct pid_namespace *pid_ns, int last) fastcall void put_pid(struct pid *pid) { + struct pid_namespace *ns; + if (!pid) return; + + ns = pid->numbers[pid->level].ns; if ((atomic_read(&pid->count) == 1) || - atomic_dec_and_test(&pid->count)) - kmem_cache_free(pid_cachep, pid); + atomic_dec_and_test(&pid->count)) { + kmem_cache_free(ns->pid_cachep, pid); + put_pid_ns(ns); + } } EXPORT_SYMBOL_GPL(put_pid); @@ -193,60 +224,94 @@ static void delayed_put_pid(struct rcu_head *rhp) fastcall void free_pid(struct pid *pid) { /* We can be called with write_lock_irq(&tasklist_lock) held */ + int i; unsigned long flags; spin_lock_irqsave(&pidmap_lock, flags); - hlist_del_rcu(&pid->pid_chain); + for (i = 0; i <= pid->level; i++) + hlist_del_rcu(&pid->numbers[i].pid_chain); spin_unlock_irqrestore(&pidmap_lock, flags); - free_pidmap(&init_pid_ns, pid->nr); + for (i = 0; i <= pid->level; i++) + free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr); + call_rcu(&pid->rcu, delayed_put_pid); } -struct pid *alloc_pid(void) +struct pid *alloc_pid(struct pid_namespace *ns) { struct pid *pid; enum pid_type type; - int nr = -1; + int i, nr; + struct pid_namespace *tmp; + struct upid *upid; - pid = kmem_cache_alloc(pid_cachep, GFP_KERNEL); + pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL); if (!pid) goto out; - nr = alloc_pidmap(current->nsproxy->pid_ns); - if (nr < 0) - goto out_free; + tmp = ns; + for (i = ns->level; i >= 0; i--) { + nr = alloc_pidmap(tmp); + if (nr < 0) + goto out_free; + + pid->numbers[i].nr = nr; + pid->numbers[i].ns = tmp; + tmp = tmp->parent; + } + get_pid_ns(ns); + pid->level = ns->level; atomic_set(&pid->count, 1); - pid->nr = nr; for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); spin_lock_irq(&pidmap_lock); - hlist_add_head_rcu(&pid->pid_chain, &pid_hash[pid_hashfn(pid->nr)]); + for (i = ns->level; i >= 0; i--) { + upid = &pid->numbers[i]; + hlist_add_head_rcu(&upid->pid_chain, + &pid_hash[pid_hashfn(upid->nr, upid->ns)]); + } spin_unlock_irq(&pidmap_lock); out: return pid; out_free: - kmem_cache_free(pid_cachep, pid); + for (i++; i <= ns->level; i++) + free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr); + + kmem_cache_free(ns->pid_cachep, pid); pid = NULL; goto out; } -struct pid * fastcall find_pid(int nr) +struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns) { struct hlist_node *elem; - struct pid *pid; + struct upid *pnr; + + hlist_for_each_entry_rcu(pnr, elem, + &pid_hash[pid_hashfn(nr, ns)], pid_chain) + if (pnr->nr == nr && pnr->ns == ns) + return container_of(pnr, struct pid, + numbers[ns->level]); - hlist_for_each_entry_rcu(pid, elem, - &pid_hash[pid_hashfn(nr)], pid_chain) { - if (pid->nr == nr) - return pid; - } return NULL; } +EXPORT_SYMBOL_GPL(find_pid_ns); + +struct pid *find_vpid(int nr) +{ + return find_pid_ns(nr, current->nsproxy->pid_ns); +} +EXPORT_SYMBOL_GPL(find_vpid); + +struct pid *find_pid(int nr) +{ + return find_pid_ns(nr, &init_pid_ns); +} EXPORT_SYMBOL_GPL(find_pid); /* @@ -307,12 +372,32 @@ struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) /* * Must be called under rcu_read_lock() or with tasklist_lock read-held. */ -struct task_struct *find_task_by_pid_type(int type, int nr) +struct task_struct *find_task_by_pid_type_ns(int type, int nr, + struct pid_namespace *ns) { - return pid_task(find_pid(nr), type); + return pid_task(find_pid_ns(nr, ns), type); } -EXPORT_SYMBOL(find_task_by_pid_type); +EXPORT_SYMBOL(find_task_by_pid_type_ns); + +struct task_struct *find_task_by_pid(pid_t nr) +{ + return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns); +} +EXPORT_SYMBOL(find_task_by_pid); + +struct task_struct *find_task_by_vpid(pid_t vnr) +{ + return find_task_by_pid_type_ns(PIDTYPE_PID, vnr, + current->nsproxy->pid_ns); +} +EXPORT_SYMBOL(find_task_by_vpid); + +struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) +{ + return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns); +} +EXPORT_SYMBOL(find_task_by_pid_ns); struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { @@ -339,45 +424,239 @@ struct pid *find_get_pid(pid_t nr) struct pid *pid; rcu_read_lock(); - pid = get_pid(find_pid(nr)); + pid = get_pid(find_vpid(nr)); rcu_read_unlock(); return pid; } +pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) +{ + struct upid *upid; + pid_t nr = 0; + + if (pid && ns->level <= pid->level) { + upid = &pid->numbers[ns->level]; + if (upid->ns == ns) + nr = upid->nr; + } + return nr; +} + +pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return pid_nr_ns(task_pid(tsk), ns); +} +EXPORT_SYMBOL(task_pid_nr_ns); + +pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return pid_nr_ns(task_tgid(tsk), ns); +} +EXPORT_SYMBOL(task_tgid_nr_ns); + +pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return pid_nr_ns(task_pgrp(tsk), ns); +} +EXPORT_SYMBOL(task_pgrp_nr_ns); + +pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +{ + return pid_nr_ns(task_session(tsk), ns); +} +EXPORT_SYMBOL(task_session_nr_ns); + /* * Used by proc to find the first pid that is greater then or equal to nr. * * If there is a pid at nr this function is exactly the same as find_pid. */ -struct pid *find_ge_pid(int nr) +struct pid *find_ge_pid(int nr, struct pid_namespace *ns) { struct pid *pid; do { - pid = find_pid(nr); + pid = find_pid_ns(nr, ns); if (pid) break; - nr = next_pidmap(current->nsproxy->pid_ns, nr); + nr = next_pidmap(ns, nr); } while (nr > 0); return pid; } EXPORT_SYMBOL_GPL(find_get_pid); +struct pid_cache { + int nr_ids; + char name[16]; + struct kmem_cache *cachep; + struct list_head list; +}; + +static LIST_HEAD(pid_caches_lh); +static DEFINE_MUTEX(pid_caches_mutex); + +/* + * creates the kmem cache to allocate pids from. + * @nr_ids: the number of numerical ids this pid will have to carry + */ + +static struct kmem_cache *create_pid_cachep(int nr_ids) +{ + struct pid_cache *pcache; + struct kmem_cache *cachep; + + mutex_lock(&pid_caches_mutex); + list_for_each_entry (pcache, &pid_caches_lh, list) + if (pcache->nr_ids == nr_ids) + goto out; + + pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL); + if (pcache == NULL) + goto err_alloc; + + snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids); + cachep = kmem_cache_create(pcache->name, + sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (cachep == NULL) + goto err_cachep; + + pcache->nr_ids = nr_ids; + pcache->cachep = cachep; + list_add(&pcache->list, &pid_caches_lh); +out: + mutex_unlock(&pid_caches_mutex); + return pcache->cachep; + +err_cachep: + kfree(pcache); +err_alloc: + mutex_unlock(&pid_caches_mutex); + return NULL; +} + +static struct pid_namespace *create_pid_namespace(int level) +{ + struct pid_namespace *ns; + int i; + + ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL); + if (ns == NULL) + goto out; + + ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!ns->pidmap[0].page) + goto out_free; + + ns->pid_cachep = create_pid_cachep(level + 1); + if (ns->pid_cachep == NULL) + goto out_free_map; + + kref_init(&ns->kref); + ns->last_pid = 0; + ns->child_reaper = NULL; + ns->level = level; + + set_bit(0, ns->pidmap[0].page); + atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1); + + for (i = 1; i < PIDMAP_ENTRIES; i++) { + ns->pidmap[i].page = 0; + atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE); + } + + return ns; + +out_free_map: + kfree(ns->pidmap[0].page); +out_free: + kmem_cache_free(pid_ns_cachep, ns); +out: + return ERR_PTR(-ENOMEM); +} + +static void destroy_pid_namespace(struct pid_namespace *ns) +{ + int i; + + for (i = 0; i < PIDMAP_ENTRIES; i++) + kfree(ns->pidmap[i].page); + kmem_cache_free(pid_ns_cachep, ns); +} + struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns) { + struct pid_namespace *new_ns; + BUG_ON(!old_ns); - get_pid_ns(old_ns); - return old_ns; + new_ns = get_pid_ns(old_ns); + if (!(flags & CLONE_NEWPID)) + goto out; + + new_ns = ERR_PTR(-EINVAL); + if (flags & CLONE_THREAD) + goto out_put; + + new_ns = create_pid_namespace(old_ns->level + 1); + if (!IS_ERR(new_ns)) + new_ns->parent = get_pid_ns(old_ns); + +out_put: + put_pid_ns(old_ns); +out: + return new_ns; } void free_pid_ns(struct kref *kref) { - struct pid_namespace *ns; + struct pid_namespace *ns, *parent; ns = container_of(kref, struct pid_namespace, kref); - kfree(ns); + + parent = ns->parent; + destroy_pid_namespace(ns); + + if (parent != NULL) + put_pid_ns(parent); +} + +void zap_pid_ns_processes(struct pid_namespace *pid_ns) +{ + int nr; + int rc; + + /* + * The last thread in the cgroup-init thread group is terminating. + * Find remaining pid_ts in the namespace, signal and wait for them + * to exit. + * + * Note: This signals each threads in the namespace - even those that + * belong to the same thread group, To avoid this, we would have + * to walk the entire tasklist looking a processes in this + * namespace, but that could be unnecessarily expensive if the + * pid namespace has just a few processes. Or we need to + * maintain a tasklist for each pid namespace. + * + */ + read_lock(&tasklist_lock); + nr = next_pidmap(pid_ns, 1); + while (nr > 0) { + kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr); + nr = next_pidmap(pid_ns, nr); + } + read_unlock(&tasklist_lock); + + do { + clear_thread_flag(TIF_SIGPENDING); + rc = sys_wait4(-1, NULL, __WALL, NULL); + } while (rc != -ECHILD); + + + /* Child reaper for the pid namespace is going away */ + pid_ns->child_reaper = NULL; + return; } /* @@ -412,5 +691,9 @@ void __init pidmap_init(void) set_bit(0, init_pid_ns.pidmap[0].page); atomic_dec(&init_pid_ns.pidmap[0].nr_free); - pid_cachep = KMEM_CACHE(pid, SLAB_PANIC); + init_pid_ns.pid_cachep = create_pid_cachep(1); + if (init_pid_ns.pid_cachep == NULL) + panic("Can't create pid_1 cachep\n"); + + pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); } diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index b53c8fcd9d8..68c96376e84 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -21,8 +21,8 @@ static int check_clock(const clockid_t which_clock) read_lock(&tasklist_lock); p = find_task_by_pid(pid); - if (!p || (CPUCLOCK_PERTHREAD(which_clock) ? - p->tgid != current->tgid : p->tgid != pid)) { + if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? + same_thread_group(p, current) : thread_group_leader(p))) { error = -EINVAL; } read_unlock(&tasklist_lock); @@ -308,13 +308,13 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) p = find_task_by_pid(pid); if (p) { if (CPUCLOCK_PERTHREAD(which_clock)) { - if (p->tgid == current->tgid) { + if (same_thread_group(p, current)) { error = cpu_clock_sample(which_clock, p, &rtn); } } else { read_lock(&tasklist_lock); - if (p->tgid == pid && p->signal) { + if (thread_group_leader(p) && p->signal) { error = cpu_clock_sample_group(which_clock, p, &rtn); @@ -355,7 +355,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) p = current; } else { p = find_task_by_pid(pid); - if (p && p->tgid != current->tgid) + if (p && !same_thread_group(p, current)) p = NULL; } } else { @@ -363,7 +363,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) p = current->group_leader; } else { p = find_task_by_pid(pid); - if (p && p->tgid != pid) + if (p && !thread_group_leader(p)) p = NULL; } } diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index d71ed09fe1d..35b4bbfc78f 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -404,7 +404,7 @@ static struct task_struct * good_sigevent(sigevent_t * event) if ((event->sigev_notify & SIGEV_THREAD_ID ) && (!(rtn = find_task_by_pid(event->sigev_notify_thread_id)) || - rtn->tgid != current->tgid || + !same_thread_group(rtn, current) || (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) return NULL; @@ -608,7 +608,7 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) spin_lock(&timr->it_lock); if ((timr->it_id != timer_id) || !(timr->it_process) || - timr->it_process->tgid != current->tgid) { + !same_thread_group(timr->it_process, current)) { spin_unlock(&timr->it_lock); spin_unlock_irqrestore(&idr_lock, *flags); timr = NULL; @@ -981,9 +981,20 @@ sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp) static int common_nsleep(const clockid_t which_clock, int flags, struct timespec *tsave, struct timespec __user *rmtp) { - return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ? - HRTIMER_MODE_ABS : HRTIMER_MODE_REL, - which_clock); + struct timespec rmt; + int ret; + + ret = hrtimer_nanosleep(tsave, rmtp ? &rmt : NULL, + flags & TIMER_ABSTIME ? + HRTIMER_MODE_ABS : HRTIMER_MODE_REL, + which_clock); + + if (ret && rmtp) { + if (copy_to_user(rmtp, &rmt, sizeof(*rmtp))) + return -EFAULT; + } + + return ret; } asmlinkage long diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 14b0e10dc95..8e186c67814 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -44,17 +44,6 @@ config PM_VERBOSE ---help--- This option enables verbose messages from the Power Management code. -config DISABLE_CONSOLE_SUSPEND - bool "Keep console(s) enabled during suspend/resume (DANGEROUS)" - depends on PM_DEBUG && PM_SLEEP - default n - ---help--- - This option turns off the console suspend mechanism that prevents - debug messages from reaching the console during the suspend/resume - operations. This may be helpful when debugging device drivers' - suspend/resume routines, but may itself lead to problems, for example - if netconsole is used. - config PM_TRACE bool "Suspend/resume event tracing" depends on PM_DEBUG && X86 && PM_SLEEP && EXPERIMENTAL diff --git a/kernel/power/disk.c b/kernel/power/disk.c index eb72255b5c8..8b15f777010 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c @@ -45,17 +45,18 @@ enum { static int hibernation_mode = HIBERNATION_SHUTDOWN; -static struct hibernation_ops *hibernation_ops; +static struct platform_hibernation_ops *hibernation_ops; /** * hibernation_set_ops - set the global hibernate operations * @ops: the hibernation operations to use in subsequent hibernation transitions */ -void hibernation_set_ops(struct hibernation_ops *ops) +void hibernation_set_ops(struct platform_hibernation_ops *ops) { - if (ops && !(ops->prepare && ops->enter && ops->finish - && ops->pre_restore && ops->restore_cleanup)) { + if (ops && !(ops->start && ops->pre_snapshot && ops->finish + && ops->prepare && ops->enter && ops->pre_restore + && ops->restore_cleanup)) { WARN_ON(1); return; } @@ -69,16 +70,37 @@ void hibernation_set_ops(struct hibernation_ops *ops) mutex_unlock(&pm_mutex); } +/** + * platform_start - tell the platform driver that we're starting + * hibernation + */ + +static int platform_start(int platform_mode) +{ + return (platform_mode && hibernation_ops) ? + hibernation_ops->start() : 0; +} /** - * platform_prepare - prepare the machine for hibernation using the + * platform_pre_snapshot - prepare the machine for hibernation using the * platform driver if so configured and return an error code if it fails */ -static int platform_prepare(int platform_mode) +static int platform_pre_snapshot(int platform_mode) { return (platform_mode && hibernation_ops) ? - hibernation_ops->prepare() : 0; + hibernation_ops->pre_snapshot() : 0; +} + +/** + * platform_leave - prepare the machine for switching to the normal mode + * of operation using the platform driver (called with interrupts disabled) + */ + +static void platform_leave(int platform_mode) +{ + if (platform_mode && hibernation_ops) + hibernation_ops->leave(); } /** @@ -118,6 +140,51 @@ static void platform_restore_cleanup(int platform_mode) } /** + * create_image - freeze devices that need to be frozen with interrupts + * off, create the hibernation image and thaw those devices. Control + * reappears in this routine after a restore. + */ + +int create_image(int platform_mode) +{ + int error; + + error = arch_prepare_suspend(); + if (error) + return error; + + local_irq_disable(); + /* At this point, device_suspend() has been called, but *not* + * device_power_down(). We *must* call device_power_down() now. + * Otherwise, drivers for some devices (e.g. interrupt controllers) + * become desynchronized with the actual state of the hardware + * at resume time, and evil weirdness ensues. + */ + error = device_power_down(PMSG_FREEZE); + if (error) { + printk(KERN_ERR "Some devices failed to power down, " + KERN_ERR "aborting suspend\n"); + goto Enable_irqs; + } + + save_processor_state(); + error = swsusp_arch_suspend(); + if (error) + printk(KERN_ERR "Error %d while creating the image\n", error); + /* Restore control flow magically appears here */ + restore_processor_state(); + if (!in_suspend) + platform_leave(platform_mode); + /* NOTE: device_power_up() is just a resume() for devices + * that suspended with irqs off ... no overall powerup. + */ + device_power_up(); + Enable_irqs: + local_irq_enable(); + return error; +} + +/** * hibernation_snapshot - quiesce devices and create the hibernation * snapshot image. * @platform_mode - if set, use the platform driver, if available, to @@ -135,12 +202,16 @@ int hibernation_snapshot(int platform_mode) if (error) return error; + error = platform_start(platform_mode); + if (error) + return error; + suspend_console(); error = device_suspend(PMSG_FREEZE); if (error) goto Resume_console; - error = platform_prepare(platform_mode); + error = platform_pre_snapshot(platform_mode); if (error) goto Resume_devices; @@ -148,7 +219,7 @@ int hibernation_snapshot(int platform_mode) if (!error) { if (hibernation_mode != HIBERNATION_TEST) { in_suspend = 1; - error = swsusp_suspend(); + error = create_image(platform_mode); /* Control returns here after successful restore */ } else { printk("swsusp debug: Waiting for 5 seconds.\n"); @@ -207,21 +278,50 @@ int hibernation_platform_enter(void) { int error; - if (hibernation_ops) { - kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK); - /* - * We have cancelled the power transition by running - * hibernation_ops->finish() before saving the image, so we - * should let the firmware know that we're going to enter the - * sleep state after all - */ - error = hibernation_ops->prepare(); - sysdev_shutdown(); - if (!error) - error = hibernation_ops->enter(); - } else { - error = -ENOSYS; + if (!hibernation_ops) + return -ENOSYS; + + /* + * We have cancelled the power transition by running + * hibernation_ops->finish() before saving the image, so we should let + * the firmware know that we're going to enter the sleep state after all + */ + error = hibernation_ops->start(); + if (error) + return error; + + suspend_console(); + error = device_suspend(PMSG_SUSPEND); + if (error) + goto Resume_console; + + error = hibernation_ops->prepare(); + if (error) + goto Resume_devices; + + error = disable_nonboot_cpus(); + if (error) + goto Finish; + + local_irq_disable(); + error = device_power_down(PMSG_SUSPEND); + if (!error) { + hibernation_ops->enter(); + /* We should never get here */ + while (1); } + local_irq_enable(); + + /* + * We don't need to reenable the nonboot CPUs or resume consoles, since + * the system is going to be halted anyway. + */ + Finish: + hibernation_ops->finish(); + Resume_devices: + device_resume(); + Resume_console: + resume_console(); return error; } @@ -238,14 +338,14 @@ static void power_down(void) case HIBERNATION_TEST: case HIBERNATION_TESTPROC: break; - case HIBERNATION_SHUTDOWN: - kernel_power_off(); - break; case HIBERNATION_REBOOT: kernel_restart(NULL); break; case HIBERNATION_PLATFORM: hibernation_platform_enter(); + case HIBERNATION_SHUTDOWN: + kernel_power_off(); + break; } kernel_halt(); /* @@ -298,6 +398,10 @@ int hibernate(void) if (error) goto Exit; + printk("Syncing filesystems ... "); + sys_sync(); + printk("done.\n"); + error = prepare_processes(); if (error) goto Finish; diff --git a/kernel/power/main.c b/kernel/power/main.c index 350b485b3b6..3cdf95b1dc9 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -20,6 +20,7 @@ #include <linux/resume-trace.h> #include <linux/freezer.h> #include <linux/vmstat.h> +#include <linux/syscalls.h> #include "power.h" @@ -32,39 +33,32 @@ DEFINE_MUTEX(pm_mutex); /* This is just an arbitrary number */ #define FREE_PAGE_NUMBER (100) -struct pm_ops *pm_ops; +static struct platform_suspend_ops *suspend_ops; /** - * pm_set_ops - Set the global power method table. + * suspend_set_ops - Set the global suspend method table. * @ops: Pointer to ops structure. */ -void pm_set_ops(struct pm_ops * ops) +void suspend_set_ops(struct platform_suspend_ops *ops) { mutex_lock(&pm_mutex); - pm_ops = ops; + suspend_ops = ops; mutex_unlock(&pm_mutex); } /** - * pm_valid_only_mem - generic memory-only valid callback + * suspend_valid_only_mem - generic memory-only valid callback * - * pm_ops drivers that implement mem suspend only and only need + * Platform drivers that implement mem suspend only and only need * to check for that in their .valid callback can use this instead * of rolling their own .valid callback. */ -int pm_valid_only_mem(suspend_state_t state) +int suspend_valid_only_mem(suspend_state_t state) { return state == PM_SUSPEND_MEM; } - -static inline void pm_finish(suspend_state_t state) -{ - if (pm_ops->finish) - pm_ops->finish(state); -} - /** * suspend_prepare - Do prep work before entering low-power state. * @@ -76,7 +70,7 @@ static int suspend_prepare(void) int error; unsigned int free_pages; - if (!pm_ops || !pm_ops->enter) + if (!suspend_ops || !suspend_ops->enter) return -EPERM; error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); @@ -128,7 +122,7 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void) * * This function should be called after devices have been suspended. */ -int suspend_enter(suspend_state_t state) +static int suspend_enter(suspend_state_t state) { int error = 0; @@ -139,7 +133,7 @@ int suspend_enter(suspend_state_t state) printk(KERN_ERR "Some devices failed to power down\n"); goto Done; } - error = pm_ops->enter(state); + error = suspend_ops->enter(state); device_power_up(); Done: arch_suspend_enable_irqs(); @@ -156,11 +150,11 @@ int suspend_devices_and_enter(suspend_state_t state) { int error; - if (!pm_ops) + if (!suspend_ops) return -ENOSYS; - if (pm_ops->set_target) { - error = pm_ops->set_target(state); + if (suspend_ops->set_target) { + error = suspend_ops->set_target(state); if (error) return error; } @@ -170,8 +164,8 @@ int suspend_devices_and_enter(suspend_state_t state) printk(KERN_ERR "Some devices failed to suspend\n"); goto Resume_console; } - if (pm_ops->prepare) { - error = pm_ops->prepare(state); + if (suspend_ops->prepare) { + error = suspend_ops->prepare(); if (error) goto Resume_devices; } @@ -180,7 +174,8 @@ int suspend_devices_and_enter(suspend_state_t state) suspend_enter(state); enable_nonboot_cpus(); - pm_finish(state); + if (suspend_ops->finish) + suspend_ops->finish(); Resume_devices: device_resume(); Resume_console: @@ -214,7 +209,7 @@ static inline int valid_state(suspend_state_t state) /* All states need lowlevel support and need to be valid * to the lowlevel implementation, no valid callback * implies that none are valid. */ - if (!pm_ops || !pm_ops->valid || !pm_ops->valid(state)) + if (!suspend_ops || !suspend_ops->valid || !suspend_ops->valid(state)) return 0; return 1; } @@ -236,9 +231,14 @@ static int enter_state(suspend_state_t state) if (!valid_state(state)) return -ENODEV; + if (!mutex_trylock(&pm_mutex)) return -EBUSY; + printk("Syncing filesystems ... "); + sys_sync(); + printk("done.\n"); + pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); if ((error = suspend_prepare())) goto Unlock; diff --git a/kernel/power/power.h b/kernel/power/power.h index 95fbf2dd3fe..195dc461176 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -11,14 +11,32 @@ struct swsusp_info { unsigned long size; } __attribute__((aligned(PAGE_SIZE))); +#ifdef CONFIG_HIBERNATION +#ifdef CONFIG_ARCH_HIBERNATION_HEADER +/* Maximum size of architecture specific data in a hibernation header */ +#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4) +extern int arch_hibernation_header_save(void *addr, unsigned int max_size); +extern int arch_hibernation_header_restore(void *addr); + +static inline int init_header_complete(struct swsusp_info *info) +{ + return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE); +} + +static inline char *check_image_kernel(struct swsusp_info *info) +{ + return arch_hibernation_header_restore(info) ? + "architecture specific data" : NULL; +} +#endif /* CONFIG_ARCH_HIBERNATION_HEADER */ -#ifdef CONFIG_HIBERNATION /* * Keep some memory free so that I/O operations can succeed without paging * [Might this be more than 4 MB?] */ #define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT) + /* * Keep 1 MB of memory free so that device drivers can allocate some pages in * their .suspend() routines without breaking the suspend to disk. @@ -165,7 +183,6 @@ extern int swsusp_swap_in_use(void); extern int swsusp_check(void); extern int swsusp_shrink_memory(void); extern void swsusp_free(void); -extern int swsusp_suspend(void); extern int swsusp_resume(void); extern int swsusp_read(unsigned int *flags_p); extern int swsusp_write(unsigned int flags); diff --git a/kernel/power/process.c b/kernel/power/process.c index 3434940a3df..6533923e711 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -75,21 +75,79 @@ void refrigerator(void) __set_current_state(save); } -static void freeze_task(struct task_struct *p) +static void fake_signal_wake_up(struct task_struct *p, int resume) { unsigned long flags; - if (!freezing(p)) { + spin_lock_irqsave(&p->sighand->siglock, flags); + signal_wake_up(p, resume); + spin_unlock_irqrestore(&p->sighand->siglock, flags); +} + +static void send_fake_signal(struct task_struct *p) +{ + if (p->state == TASK_STOPPED) + force_sig_specific(SIGSTOP, p); + fake_signal_wake_up(p, p->state == TASK_STOPPED); +} + +static int has_mm(struct task_struct *p) +{ + return (p->mm && !(p->flags & PF_BORROWED_MM)); +} + +/** + * freeze_task - send a freeze request to given task + * @p: task to send the request to + * @with_mm_only: if set, the request will only be sent if the task has its + * own mm + * Return value: 0, if @with_mm_only is set and the task has no mm of its + * own or the task is frozen, 1, otherwise + * + * The freeze request is sent by seting the tasks's TIF_FREEZE flag and + * either sending a fake signal to it or waking it up, depending on whether + * or not it has its own mm (ie. it is a user land task). If @with_mm_only + * is set and the task has no mm of its own (ie. it is a kernel thread), + * its TIF_FREEZE flag should not be set. + * + * The task_lock() is necessary to prevent races with exit_mm() or + * use_mm()/unuse_mm() from occuring. + */ +static int freeze_task(struct task_struct *p, int with_mm_only) +{ + int ret = 1; + + task_lock(p); + if (freezing(p)) { + if (has_mm(p)) { + if (!signal_pending(p)) + fake_signal_wake_up(p, 0); + } else { + if (with_mm_only) + ret = 0; + else + wake_up_state(p, TASK_INTERRUPTIBLE); + } + } else { rmb(); - if (!frozen(p)) { - set_freeze_flag(p); - if (p->state == TASK_STOPPED) - force_sig_specific(SIGSTOP, p); - spin_lock_irqsave(&p->sighand->siglock, flags); - signal_wake_up(p, p->state == TASK_STOPPED); - spin_unlock_irqrestore(&p->sighand->siglock, flags); + if (frozen(p)) { + ret = 0; + } else { + if (has_mm(p)) { + set_freeze_flag(p); + send_fake_signal(p); + } else { + if (with_mm_only) { + ret = 0; + } else { + set_freeze_flag(p); + wake_up_state(p, TASK_INTERRUPTIBLE); + } + } } } + task_unlock(p); + return ret; } static void cancel_freezing(struct task_struct *p) @@ -110,6 +168,11 @@ static int try_to_freeze_tasks(int freeze_user_space) struct task_struct *g, *p; unsigned long end_time; unsigned int todo; + struct timeval start, end; + s64 elapsed_csecs64; + unsigned int elapsed_csecs; + + do_gettimeofday(&start); end_time = jiffies + TIMEOUT; do { @@ -119,31 +182,14 @@ static int try_to_freeze_tasks(int freeze_user_space) if (frozen(p) || !freezeable(p)) continue; - if (freeze_user_space) { - if (p->state == TASK_TRACED && - frozen(p->parent)) { - cancel_freezing(p); - continue; - } - /* - * Kernel threads should not have TIF_FREEZE set - * at this point, so we must ensure that either - * p->mm is not NULL *and* PF_BORROWED_MM is - * unset, or TIF_FRREZE is left unset. - * The task_lock() is necessary to prevent races - * with exit_mm() or use_mm()/unuse_mm() from - * occuring. - */ - task_lock(p); - if (!p->mm || (p->flags & PF_BORROWED_MM)) { - task_unlock(p); - continue; - } - freeze_task(p); - task_unlock(p); - } else { - freeze_task(p); + if (p->state == TASK_TRACED && frozen(p->parent)) { + cancel_freezing(p); + continue; } + + if (!freeze_task(p, freeze_user_space)) + continue; + if (!freezer_should_skip(p)) todo++; } while_each_thread(g, p); @@ -153,6 +199,11 @@ static int try_to_freeze_tasks(int freeze_user_space) break; } while (todo); + do_gettimeofday(&end); + elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start); + do_div(elapsed_csecs64, NSEC_PER_SEC / 100); + elapsed_csecs = elapsed_csecs64; + if (todo) { /* This does not unfreeze processes that are already frozen * (we have slightly ugly calling convention in that respect, @@ -160,10 +211,9 @@ static int try_to_freeze_tasks(int freeze_user_space) * but it cleans up leftover PF_FREEZE requests. */ printk("\n"); - printk(KERN_ERR "Freezing of %s timed out after %d seconds " + printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " "(%d tasks refusing to freeze):\n", - freeze_user_space ? "user space " : "tasks ", - TIMEOUT / HZ, todo); + elapsed_csecs / 100, elapsed_csecs % 100, todo); show_state(); read_lock(&tasklist_lock); do_each_thread(g, p) { @@ -174,6 +224,9 @@ static int try_to_freeze_tasks(int freeze_user_space) task_unlock(p); } while_each_thread(g, p); read_unlock(&tasklist_lock); + } else { + printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100, + elapsed_csecs % 100); } return todo ? -EBUSY : 0; @@ -186,19 +239,21 @@ int freeze_processes(void) { int error; - printk("Stopping tasks ... "); + printk("Freezing user space processes ... "); error = try_to_freeze_tasks(FREEZER_USER_SPACE); if (error) - return error; + goto Exit; + printk("done.\n"); - sys_sync(); + printk("Freezing remaining freezable tasks ... "); error = try_to_freeze_tasks(FREEZER_KERNEL_THREADS); if (error) - return error; - - printk("done.\n"); + goto Exit; + printk("done."); + Exit: BUG_ON(in_atomic()); - return 0; + printk("\n"); + return error; } static void thaw_tasks(int thaw_user_space) diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index a686590d88c..ccc95ac07be 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1239,17 +1239,39 @@ asmlinkage int swsusp_save(void) return 0; } -static void init_header(struct swsusp_info *info) +#ifndef CONFIG_ARCH_HIBERNATION_HEADER +static int init_header_complete(struct swsusp_info *info) { - memset(info, 0, sizeof(struct swsusp_info)); + memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname)); info->version_code = LINUX_VERSION_CODE; + return 0; +} + +static char *check_image_kernel(struct swsusp_info *info) +{ + if (info->version_code != LINUX_VERSION_CODE) + return "kernel version"; + if (strcmp(info->uts.sysname,init_utsname()->sysname)) + return "system type"; + if (strcmp(info->uts.release,init_utsname()->release)) + return "kernel release"; + if (strcmp(info->uts.version,init_utsname()->version)) + return "version"; + if (strcmp(info->uts.machine,init_utsname()->machine)) + return "machine"; + return NULL; +} +#endif /* CONFIG_ARCH_HIBERNATION_HEADER */ + +static int init_header(struct swsusp_info *info) +{ + memset(info, 0, sizeof(struct swsusp_info)); info->num_physpages = num_physpages; - memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname)); - info->cpus = num_online_cpus(); info->image_pages = nr_copy_pages; info->pages = nr_copy_pages + nr_meta_pages + 1; info->size = info->pages; info->size <<= PAGE_SHIFT; + return init_header_complete(info); } /** @@ -1303,7 +1325,11 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count) return -ENOMEM; } if (!handle->offset) { - init_header((struct swsusp_info *)buffer); + int error; + + error = init_header((struct swsusp_info *)buffer); + if (error) + return error; handle->buffer = buffer; memory_bm_position_reset(&orig_bm); memory_bm_position_reset(©_bm); @@ -1394,22 +1420,13 @@ duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src) } } -static inline int check_header(struct swsusp_info *info) +static int check_header(struct swsusp_info *info) { - char *reason = NULL; + char *reason; - if (info->version_code != LINUX_VERSION_CODE) - reason = "kernel version"; - if (info->num_physpages != num_physpages) + reason = check_image_kernel(info); + if (!reason && info->num_physpages != num_physpages) reason = "memory size"; - if (strcmp(info->uts.sysname,init_utsname()->sysname)) - reason = "system type"; - if (strcmp(info->uts.release,init_utsname()->release)) - reason = "kernel release"; - if (strcmp(info->uts.version,init_utsname()->version)) - reason = "version"; - if (strcmp(info->uts.machine,init_utsname()->machine)) - reason = "machine"; if (reason) { printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); return -EPERM; diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 5da304c8f1f..e1722d3155f 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c @@ -270,39 +270,6 @@ int swsusp_shrink_memory(void) return 0; } -int swsusp_suspend(void) -{ - int error; - - if ((error = arch_prepare_suspend())) - return error; - - local_irq_disable(); - /* At this point, device_suspend() has been called, but *not* - * device_power_down(). We *must* device_power_down() now. - * Otherwise, drivers for some devices (e.g. interrupt controllers) - * become desynchronized with the actual state of the hardware - * at resume time, and evil weirdness ensues. - */ - if ((error = device_power_down(PMSG_FREEZE))) { - printk(KERN_ERR "Some devices failed to power down, aborting suspend\n"); - goto Enable_irqs; - } - - save_processor_state(); - if ((error = swsusp_arch_suspend())) - printk(KERN_ERR "Error %d suspending\n", error); - /* Restore control flow magically appears here */ - restore_processor_state(); - /* NOTE: device_power_up() is just a resume() for devices - * that suspended with irqs off ... no overall powerup. - */ - device_power_up(); - Enable_irqs: - local_irq_enable(); - return error; -} - int swsusp_resume(void) { int error; diff --git a/kernel/power/user.c b/kernel/power/user.c index bd0723a7df3..5bd321bcbb7 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -153,6 +153,10 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, mutex_lock(&pm_mutex); error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); if (!error) { + printk("Syncing filesystems ... "); + sys_sync(); + printk("done.\n"); + error = freeze_processes(); if (error) thaw_processes(); diff --git a/kernel/printk.c b/kernel/printk.c index 52493474f0a..a30fe33de39 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -862,7 +862,16 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha return -1; } -#ifndef CONFIG_DISABLE_CONSOLE_SUSPEND +int console_suspend_enabled = 1; +EXPORT_SYMBOL(console_suspend_enabled); + +static int __init console_suspend_disable(char *str) +{ + console_suspend_enabled = 0; + return 1; +} +__setup("no_console_suspend", console_suspend_disable); + /** * suspend_console - suspend the console subsystem * @@ -870,6 +879,8 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha */ void suspend_console(void) { + if (!console_suspend_enabled) + return; printk("Suspending console(s)\n"); acquire_console_sem(); console_suspended = 1; @@ -877,10 +888,11 @@ void suspend_console(void) void resume_console(void) { + if (!console_suspend_enabled) + return; console_suspended = 0; release_console_sem(); } -#endif /* CONFIG_DISABLE_CONSOLE_SUSPEND */ /** * acquire_console_sem - lock the console system for exclusive use. diff --git a/kernel/ptrace.c b/kernel/ptrace.c index a73ebd3b9d4..7c76f2ffaea 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -19,6 +19,7 @@ #include <linux/security.h> #include <linux/signal.h> #include <linux/audit.h> +#include <linux/pid_namespace.h> #include <asm/pgtable.h> #include <asm/uaccess.h> @@ -168,7 +169,7 @@ int ptrace_attach(struct task_struct *task) retval = -EPERM; if (task->pid <= 1) goto out; - if (task->tgid == current->tgid) + if (same_thread_group(task, current)) goto out; repeat: @@ -443,7 +444,7 @@ struct task_struct *ptrace_get_task_struct(pid_t pid) return ERR_PTR(-EPERM); read_lock(&tasklist_lock); - child = find_task_by_pid(pid); + child = find_task_by_vpid(pid); if (child) get_task_struct(child); diff --git a/kernel/relay.c b/kernel/relay.c index ad855017bc5..61134eb7a0c 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -370,7 +370,7 @@ void relay_reset(struct rchan *chan) if (!chan) return; - if (chan->is_global && chan->buf[0]) { + if (chan->is_global && chan->buf[0]) { __relay_reset(chan->buf[0], 0); return; } @@ -850,13 +850,13 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos) buf->subbufs_consumed = consumed; buf->bytes_consumed = 0; } - + produced = (produced % n_subbufs) * subbuf_size + buf->offset; consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed; if (consumed > produced) produced += n_subbufs * subbuf_size; - + if (consumed == produced) return 0; diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c index 6b0703db152..56d73cb8826 100644 --- a/kernel/rtmutex-debug.c +++ b/kernel/rtmutex-debug.c @@ -87,7 +87,7 @@ static int rt_trace_on = 1; static void printk_task(struct task_struct *p) { if (p) - printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); + printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio); else printk("<none>"); } @@ -152,22 +152,25 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) printk( "[ BUG: circular locking deadlock detected! ]\n"); printk( "--------------------------------------------\n"); printk("%s/%d is deadlocking current task %s/%d\n\n", - task->comm, task->pid, current->comm, current->pid); + task->comm, task_pid_nr(task), + current->comm, task_pid_nr(current)); printk("\n1) %s/%d is trying to acquire this lock:\n", - current->comm, current->pid); + current->comm, task_pid_nr(current)); printk_lock(waiter->lock, 1); - printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); + printk("\n2) %s/%d is blocked on this lock:\n", + task->comm, task_pid_nr(task)); printk_lock(waiter->deadlock_lock, 1); debug_show_held_locks(current); debug_show_held_locks(task); - printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); + printk("\n%s/%d's [blocked] stackdump:\n\n", + task->comm, task_pid_nr(task)); show_stack(task, NULL); printk("\n%s/%d's [current] stackdump:\n\n", - current->comm, current->pid); + current->comm, task_pid_nr(current)); dump_stack(); debug_show_all_locks(); diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 8cd9bd2cdb3..0deef71ff8d 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -185,7 +185,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, prev_max = max_lock_depth; printk(KERN_WARNING "Maximum lock depth %d reached " "task: %s (%d)\n", max_lock_depth, - top_task->comm, top_task->pid); + top_task->comm, task_pid_nr(top_task)); } put_task_struct(task); diff --git a/kernel/sched.c b/kernel/sched.c index 92721d1534b..afe76ec2e7f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -44,6 +44,7 @@ #include <linux/vmalloc.h> #include <linux/blkdev.h> #include <linux/delay.h> +#include <linux/pid_namespace.h> #include <linux/smp.h> #include <linux/threads.h> #include <linux/timer.h> @@ -51,6 +52,7 @@ #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/percpu.h> +#include <linux/cpu_acct.h> #include <linux/kthread.h> #include <linux/seq_file.h> #include <linux/sysctl.h> @@ -153,10 +155,15 @@ struct rt_prio_array { #ifdef CONFIG_FAIR_GROUP_SCHED +#include <linux/cgroup.h> + struct cfs_rq; /* task group related information */ struct task_group { +#ifdef CONFIG_FAIR_CGROUP_SCHED + struct cgroup_subsys_state css; +#endif /* schedulable entities of this group on each cpu */ struct sched_entity **se; /* runqueue "owned" by this group on each cpu */ @@ -197,6 +204,9 @@ static inline struct task_group *task_group(struct task_struct *p) #ifdef CONFIG_FAIR_USER_SCHED tg = p->user->tg; +#elif defined(CONFIG_FAIR_CGROUP_SCHED) + tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), + struct task_group, css); #else tg = &init_task_group; #endif @@ -266,7 +276,8 @@ struct rt_rq { * acquire operations must be ordered by ascending &runqueue. */ struct rq { - spinlock_t lock; /* runqueue lock */ + /* runqueue lock: */ + spinlock_t lock; /* * nr_running and cpu_load should be in the same cacheline because @@ -279,13 +290,15 @@ struct rq { #ifdef CONFIG_NO_HZ unsigned char in_nohz_recently; #endif - struct load_weight load; /* capture load from *all* tasks on this cpu */ + /* capture load from *all* tasks on this cpu: */ + struct load_weight load; unsigned long nr_load_updates; u64 nr_switches; struct cfs_rq cfs; #ifdef CONFIG_FAIR_GROUP_SCHED - struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */ + /* list of leaf cfs_rq on this cpu: */ + struct list_head leaf_cfs_rq_list; #endif struct rt_rq rt; @@ -317,7 +330,8 @@ struct rq { /* For active balancing */ int active_balance; int push_cpu; - int cpu; /* cpu of this runqueue */ + /* cpu of this runqueue: */ + int cpu; struct task_struct *migration_thread; struct list_head migration_queue; @@ -328,22 +342,22 @@ struct rq { struct sched_info rq_sched_info; /* sys_sched_yield() stats */ - unsigned long yld_exp_empty; - unsigned long yld_act_empty; - unsigned long yld_both_empty; - unsigned long yld_count; + unsigned int yld_exp_empty; + unsigned int yld_act_empty; + unsigned int yld_both_empty; + unsigned int yld_count; /* schedule() stats */ - unsigned long sched_switch; - unsigned long sched_count; - unsigned long sched_goidle; + unsigned int sched_switch; + unsigned int sched_count; + unsigned int sched_goidle; /* try_to_wake_up() stats */ - unsigned long ttwu_count; - unsigned long ttwu_local; + unsigned int ttwu_count; + unsigned int ttwu_local; /* BKL stats */ - unsigned long bkl_count; + unsigned int bkl_count; #endif struct lock_class_key rq_lock_key; }; @@ -449,12 +463,12 @@ enum { }; const_debug unsigned int sysctl_sched_features = - SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | - SCHED_FEAT_START_DEBIT *1 | - SCHED_FEAT_TREE_AVG *0 | - SCHED_FEAT_APPROX_AVG *0 | - SCHED_FEAT_WAKEUP_PREEMPT *1 | - SCHED_FEAT_PREEMPT_RESTRICT *1; + SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | + SCHED_FEAT_START_DEBIT * 1 | + SCHED_FEAT_TREE_AVG * 0 | + SCHED_FEAT_APPROX_AVG * 0 | + SCHED_FEAT_WAKEUP_PREEMPT * 1 | + SCHED_FEAT_PREEMPT_RESTRICT * 1; #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) @@ -1871,7 +1885,7 @@ asmlinkage void schedule_tail(struct task_struct *prev) preempt_enable(); #endif if (current->set_child_tid) - put_user(current->pid, current->set_child_tid); + put_user(task_pid_vnr(current), current->set_child_tid); } /* @@ -3303,9 +3317,13 @@ void account_user_time(struct task_struct *p, cputime_t cputime) { struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; cputime64_t tmp; + struct rq *rq = this_rq(); p->utime = cputime_add(p->utime, cputime); + if (p != rq->idle) + cpuacct_charge(p, cputime); + /* Add user time to cpustat. */ tmp = cputime_to_cputime64(cputime); if (TASK_NICE(p) > 0) @@ -3334,6 +3352,16 @@ void account_guest_time(struct task_struct *p, cputime_t cputime) } /* + * Account scaled user cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @cputime: the cpu time spent in user space since the last update + */ +void account_user_time_scaled(struct task_struct *p, cputime_t cputime) +{ + p->utimescaled = cputime_add(p->utimescaled, cputime); +} + +/* * Account system cpu time to a process. * @p: the process that the cpu time gets accounted to * @hardirq_offset: the offset to subtract from hardirq_count() @@ -3360,9 +3388,10 @@ void account_system_time(struct task_struct *p, int hardirq_offset, cpustat->irq = cputime64_add(cpustat->irq, tmp); else if (softirq_count()) cpustat->softirq = cputime64_add(cpustat->softirq, tmp); - else if (p != rq->idle) + else if (p != rq->idle) { cpustat->system = cputime64_add(cpustat->system, tmp); - else if (atomic_read(&rq->nr_iowait) > 0) + cpuacct_charge(p, cputime); + } else if (atomic_read(&rq->nr_iowait) > 0) cpustat->iowait = cputime64_add(cpustat->iowait, tmp); else cpustat->idle = cputime64_add(cpustat->idle, tmp); @@ -3371,6 +3400,17 @@ void account_system_time(struct task_struct *p, int hardirq_offset, } /* + * Account scaled system cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @hardirq_offset: the offset to subtract from hardirq_count() + * @cputime: the cpu time spent in kernel space since the last update + */ +void account_system_time_scaled(struct task_struct *p, cputime_t cputime) +{ + p->stimescaled = cputime_add(p->stimescaled, cputime); +} + +/* * Account for involuntary wait time. * @p: the process from which the cpu time has been stolen * @steal: the cpu time spent in involuntary wait @@ -3387,8 +3427,10 @@ void account_steal_time(struct task_struct *p, cputime_t steal) cpustat->iowait = cputime64_add(cpustat->iowait, tmp); else cpustat->idle = cputime64_add(cpustat->idle, tmp); - } else + } else { cpustat->steal = cputime64_add(cpustat->steal, tmp); + cpuacct_charge(p, -tmp); + } } /* @@ -3468,7 +3510,7 @@ EXPORT_SYMBOL(sub_preempt_count); static noinline void __schedule_bug(struct task_struct *prev) { printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n", - prev->comm, preempt_count(), prev->pid); + prev->comm, preempt_count(), task_pid_nr(prev)); debug_show_held_locks(prev); if (irqs_disabled()) print_irqtrace_events(prev); @@ -3859,7 +3901,10 @@ EXPORT_SYMBOL(wait_for_completion_timeout); int __sched wait_for_completion_interruptible(struct completion *x) { - return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); + long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); + if (t == -ERESTARTSYS) + return t; + return 0; } EXPORT_SYMBOL(wait_for_completion_interruptible); @@ -4131,7 +4176,7 @@ struct task_struct *idle_task(int cpu) */ static struct task_struct *find_process_by_pid(pid_t pid) { - return pid ? find_task_by_pid(pid) : current; + return pid ? find_task_by_vpid(pid) : current; } /* Actually do priority change: must hold rq lock. */ @@ -4434,8 +4479,21 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask) cpus_allowed = cpuset_cpus_allowed(p); cpus_and(new_mask, new_mask, cpus_allowed); + again: retval = set_cpus_allowed(p, new_mask); + if (!retval) { + cpus_allowed = cpuset_cpus_allowed(p); + if (!cpus_subset(new_mask, cpus_allowed)) { + /* + * We must have raced with a concurrent cpuset + * update. Just reset the cpus_allowed to the + * cpuset's cpus_allowed + */ + new_mask = cpus_allowed; + goto again; + } + } out_unlock: put_task_struct(p); mutex_unlock(&sched_hotcpu_mutex); @@ -4794,18 +4852,18 @@ static void show_task(struct task_struct *p) unsigned state; state = p->state ? __ffs(p->state) + 1 : 0; - printk("%-13.13s %c", p->comm, + printk(KERN_INFO "%-13.13s %c", p->comm, state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); #if BITS_PER_LONG == 32 if (state == TASK_RUNNING) - printk(" running "); + printk(KERN_CONT " running "); else - printk(" %08lx ", thread_saved_pc(p)); + printk(KERN_CONT " %08lx ", thread_saved_pc(p)); #else if (state == TASK_RUNNING) - printk(" running task "); + printk(KERN_CONT " running task "); else - printk(" %016lx ", thread_saved_pc(p)); + printk(KERN_CONT " %016lx ", thread_saved_pc(p)); #endif #ifdef CONFIG_DEBUG_STACK_USAGE { @@ -4815,7 +4873,8 @@ static void show_task(struct task_struct *p) free = (unsigned long)n - (unsigned long)end_of_stack(p); } #endif - printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid); + printk(KERN_CONT "%5lu %5d %6d\n", free, + task_pid_nr(p), task_pid_nr(p->parent)); if (state != TASK_RUNNING) show_stack(p, NULL); @@ -5109,8 +5168,16 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) /* No more Mr. Nice Guy. */ if (dest_cpu == NR_CPUS) { + cpumask_t cpus_allowed = cpuset_cpus_allowed_locked(p); + /* + * Try to stay on the same cpuset, where the + * current cpuset may be a subset of all cpus. + * The cpuset_cpus_allowed_locked() variant of + * cpuset_cpus_allowed() will not block. It must be + * called within calls to cpuset_lock/cpuset_unlock. + */ rq = task_rq_lock(p, &flags); - cpus_setall(p->cpus_allowed); + p->cpus_allowed = cpus_allowed; dest_cpu = any_online_cpu(p->cpus_allowed); task_rq_unlock(rq, &flags); @@ -5122,7 +5189,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) if (p->mm && printk_ratelimit()) printk(KERN_INFO "process %d (%s) no " "longer affine to cpu%d\n", - p->pid, p->comm, dead_cpu); + task_pid_nr(p), p->comm, dead_cpu); } } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); } @@ -5229,7 +5296,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p) struct rq *rq = cpu_rq(dead_cpu); /* Must be exiting, otherwise would be on tasklist. */ - BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD); + BUG_ON(!p->exit_state); /* Cannot have done final schedule yet: would have vanished. */ BUG_ON(p->state == TASK_DEAD); @@ -5364,7 +5431,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) return table; } -static ctl_table *sd_alloc_ctl_cpu_table(int cpu) +static ctl_table * sd_alloc_ctl_cpu_table(int cpu) { struct ctl_table *entry, *table; struct sched_domain *sd; @@ -5476,6 +5543,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) case CPU_DEAD: case CPU_DEAD_FROZEN: + cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */ migrate_live_tasks(cpu); rq = cpu_rq(cpu); kthread_stop(rq->migration_thread); @@ -5489,6 +5557,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) rq->idle->sched_class = &idle_sched_class; migrate_dead_tasks(cpu); spin_unlock_irq(&rq->lock); + cpuset_unlock(); migrate_nr_uninterruptible(rq); BUG_ON(rq->nr_running != 0); @@ -5598,20 +5667,20 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) } if (!group->__cpu_power) { - printk("\n"); + printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: domain->cpu_power not " "set\n"); break; } if (!cpus_weight(group->cpumask)) { - printk("\n"); + printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: empty group\n"); break; } if (cpus_intersects(groupmask, group->cpumask)) { - printk("\n"); + printk(KERN_CONT "\n"); printk(KERN_ERR "ERROR: repeated CPUs\n"); break; } @@ -5619,11 +5688,11 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) cpus_or(groupmask, groupmask, group->cpumask); cpumask_scnprintf(str, NR_CPUS, group->cpumask); - printk(" %s", str); + printk(KERN_CONT " %s", str); group = group->next; } while (group != sd->groups); - printk("\n"); + printk(KERN_CONT "\n"); if (!cpus_equal(sd->span, groupmask)) printk(KERN_ERR "ERROR: groups don't span " @@ -6339,26 +6408,31 @@ error: return -ENOMEM; #endif } + +static cpumask_t *doms_cur; /* current sched domains */ +static int ndoms_cur; /* number of sched domains in 'doms_cur' */ + +/* + * Special case: If a kmalloc of a doms_cur partition (array of + * cpumask_t) fails, then fallback to a single sched domain, + * as determined by the single cpumask_t fallback_doms. + */ +static cpumask_t fallback_doms; + /* * Set up scheduler domains and groups. Callers must hold the hotplug lock. + * For now this just excludes isolated cpus, but could be used to + * exclude other special cases in the future. */ static int arch_init_sched_domains(const cpumask_t *cpu_map) { - cpumask_t cpu_default_map; - int err; - - /* - * Setup mask for cpus without special case scheduling requirements. - * For now this just excludes isolated cpus, but could be used to - * exclude other special cases in the future. - */ - cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map); - - err = build_sched_domains(&cpu_default_map); - + ndoms_cur = 1; + doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); + if (!doms_cur) + doms_cur = &fallback_doms; + cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); register_sched_domain_sysctl(); - - return err; + return build_sched_domains(doms_cur); } static void arch_destroy_sched_domains(const cpumask_t *cpu_map) @@ -6382,6 +6456,68 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) arch_destroy_sched_domains(cpu_map); } +/* + * Partition sched domains as specified by the 'ndoms_new' + * cpumasks in the array doms_new[] of cpumasks. This compares + * doms_new[] to the current sched domain partitioning, doms_cur[]. + * It destroys each deleted domain and builds each new domain. + * + * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. + * The masks don't intersect (don't overlap.) We should setup one + * sched domain for each mask. CPUs not in any of the cpumasks will + * not be load balanced. If the same cpumask appears both in the + * current 'doms_cur' domains and in the new 'doms_new', we can leave + * it as it is. + * + * The passed in 'doms_new' should be kmalloc'd. This routine takes + * ownership of it and will kfree it when done with it. If the caller + * failed the kmalloc call, then it can pass in doms_new == NULL, + * and partition_sched_domains() will fallback to the single partition + * 'fallback_doms'. + * + * Call with hotplug lock held + */ +void partition_sched_domains(int ndoms_new, cpumask_t *doms_new) +{ + int i, j; + + if (doms_new == NULL) { + ndoms_new = 1; + doms_new = &fallback_doms; + cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); + } + + /* Destroy deleted domains */ + for (i = 0; i < ndoms_cur; i++) { + for (j = 0; j < ndoms_new; j++) { + if (cpus_equal(doms_cur[i], doms_new[j])) + goto match1; + } + /* no match - a current sched domain not in new doms_new[] */ + detach_destroy_domains(doms_cur + i); +match1: + ; + } + + /* Build new domains */ + for (i = 0; i < ndoms_new; i++) { + for (j = 0; j < ndoms_cur; j++) { + if (cpus_equal(doms_new[i], doms_cur[j])) + goto match2; + } + /* no match - add a new doms_new */ + build_sched_domains(doms_new + i); +match2: + ; + } + + /* Remember the new sched domains */ + if (doms_cur != &fallback_doms) + kfree(doms_cur); + doms_cur = doms_new; + ndoms_cur = ndoms_new; +} + #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) static int arch_reinit_sched_domains(void) { @@ -6963,3 +7099,116 @@ unsigned long sched_group_shares(struct task_group *tg) } #endif /* CONFIG_FAIR_GROUP_SCHED */ + +#ifdef CONFIG_FAIR_CGROUP_SCHED + +/* return corresponding task_group object of a cgroup */ +static inline struct task_group *cgroup_tg(struct cgroup *cont) +{ + return container_of(cgroup_subsys_state(cont, cpu_cgroup_subsys_id), + struct task_group, css); +} + +static struct cgroup_subsys_state * +cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) +{ + struct task_group *tg; + + if (!cont->parent) { + /* This is early initialization for the top cgroup */ + init_task_group.css.cgroup = cont; + return &init_task_group.css; + } + + /* we support only 1-level deep hierarchical scheduler atm */ + if (cont->parent->parent) + return ERR_PTR(-EINVAL); + + tg = sched_create_group(); + if (IS_ERR(tg)) + return ERR_PTR(-ENOMEM); + + /* Bind the cgroup to task_group object we just created */ + tg->css.cgroup = cont; + + return &tg->css; +} + +static void cpu_cgroup_destroy(struct cgroup_subsys *ss, + struct cgroup *cont) +{ + struct task_group *tg = cgroup_tg(cont); + + sched_destroy_group(tg); +} + +static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, + struct cgroup *cont, struct task_struct *tsk) +{ + /* We don't support RT-tasks being in separate groups */ + if (tsk->sched_class != &fair_sched_class) + return -EINVAL; + + return 0; +} + +static void +cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cont, + struct cgroup *old_cont, struct task_struct *tsk) +{ + sched_move_task(tsk); +} + +static ssize_t cpu_shares_write(struct cgroup *cont, struct cftype *cftype, + struct file *file, const char __user *userbuf, + size_t nbytes, loff_t *ppos) +{ + unsigned long shareval; + struct task_group *tg = cgroup_tg(cont); + char buffer[2*sizeof(unsigned long) + 1]; + int rc; + + if (nbytes > 2*sizeof(unsigned long)) /* safety check */ + return -E2BIG; + + if (copy_from_user(buffer, userbuf, nbytes)) + return -EFAULT; + + buffer[nbytes] = 0; /* nul-terminate */ + shareval = simple_strtoul(buffer, NULL, 10); + + rc = sched_group_set_shares(tg, shareval); + + return (rc < 0 ? rc : nbytes); +} + +static u64 cpu_shares_read_uint(struct cgroup *cont, struct cftype *cft) +{ + struct task_group *tg = cgroup_tg(cont); + + return (u64) tg->shares; +} + +static struct cftype cpu_shares = { + .name = "shares", + .read_uint = cpu_shares_read_uint, + .write = cpu_shares_write, +}; + +static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont) +{ + return cgroup_add_file(cont, ss, &cpu_shares); +} + +struct cgroup_subsys cpu_cgroup_subsys = { + .name = "cpu", + .create = cpu_cgroup_create, + .destroy = cpu_cgroup_destroy, + .can_attach = cpu_cgroup_can_attach, + .attach = cpu_cgroup_attach, + .populate = cpu_cgroup_populate, + .subsys_id = cpu_cgroup_subsys_id, + .early_init = 1, +}; + +#endif /* CONFIG_FAIR_CGROUP_SCHED */ diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index a5e517ec07c..e6fb392e516 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -137,7 +137,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); #ifdef CONFIG_SCHEDSTATS - SEQ_printf(m, " .%-30s: %ld\n", "bkl_count", + SEQ_printf(m, " .%-30s: %d\n", "bkl_count", rq->bkl_count); #endif SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 1c084842c3e..ef1a7df80ea 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h @@ -21,7 +21,7 @@ static int show_schedstat(struct seq_file *seq, void *v) /* runqueue-specific stats */ seq_printf(seq, - "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu", + "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu", cpu, rq->yld_both_empty, rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, rq->sched_switch, rq->sched_count, rq->sched_goidle, @@ -42,8 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v) seq_printf(seq, "domain%d %s", dcount++, mask_str); for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; itype++) { - seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " - "%lu", + seq_printf(seq, " %u %u %u %u %u %u %u %u", sd->lb_count[itype], sd->lb_balanced[itype], sd->lb_failed[itype], @@ -53,8 +52,7 @@ static int show_schedstat(struct seq_file *seq, void *v) sd->lb_nobusyq[itype], sd->lb_nobusyg[itype]); } - seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu" - " %lu %lu %lu\n", + seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n", sd->alb_count, sd->alb_failed, sd->alb_pushed, sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, diff --git a/kernel/signal.c b/kernel/signal.c index 2124ffadcfd..12006308c7e 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -99,7 +99,6 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) static int recalc_sigpending_tsk(struct task_struct *t) { if (t->signal->group_stop_count > 0 || - (freezing(t)) || PENDING(&t->pending, &t->blocked) || PENDING(&t->signal->shared_pending, &t->blocked)) { set_tsk_thread_flag(t, TIF_SIGPENDING); @@ -257,7 +256,7 @@ flush_signal_handlers(struct task_struct *t, int force_default) int unhandled_signal(struct task_struct *tsk, int sig) { - if (is_init(tsk)) + if (is_global_init(tsk)) return 1; if (tsk->ptrace & PT_PTRACED) return 0; @@ -537,7 +536,7 @@ static int check_kill_permission(int sig, struct siginfo *info, return error; error = -EPERM; if (((sig != SIGCONT) || - (process_session(current) != process_session(t))) + (task_session_nr(current) != task_session_nr(t))) && (current->euid ^ t->suid) && (current->euid ^ t->uid) && (current->uid ^ t->suid) && (current->uid ^ t->uid) && !capable(CAP_KILL)) @@ -695,7 +694,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, q->info.si_signo = sig; q->info.si_errno = 0; q->info.si_code = SI_USER; - q->info.si_pid = current->pid; + q->info.si_pid = task_pid_vnr(current); q->info.si_uid = current->uid; break; case (unsigned long) SEND_SIG_PRIV: @@ -731,7 +730,7 @@ int print_fatal_signals; static void print_fatal_signal(struct pt_regs *regs, int signr) { printk("%s/%d: potentially unexpected fatal signal %d.\n", - current->comm, current->pid, signr); + current->comm, task_pid_nr(current), signr); #ifdef __i386__ printk("code at %08lx: ", regs->eip); @@ -1090,7 +1089,7 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid) { int error; rcu_read_lock(); - error = kill_pid_info(sig, info, find_pid(pid)); + error = kill_pid_info(sig, info, find_vpid(pid)); rcu_read_unlock(); return error; } @@ -1151,7 +1150,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) read_lock(&tasklist_lock); for_each_process(p) { - if (p->pid > 1 && p->tgid != current->tgid) { + if (p->pid > 1 && !same_thread_group(p, current)) { int err = group_send_sig_info(sig, info, p); ++count; if (err != -EPERM) @@ -1161,9 +1160,9 @@ static int kill_something_info(int sig, struct siginfo *info, int pid) read_unlock(&tasklist_lock); ret = count ? retval : -ESRCH; } else if (pid < 0) { - ret = kill_pgrp_info(sig, info, find_pid(-pid)); + ret = kill_pgrp_info(sig, info, find_vpid(-pid)); } else { - ret = kill_pid_info(sig, info, find_pid(pid)); + ret = kill_pid_info(sig, info, find_vpid(pid)); } rcu_read_unlock(); return ret; @@ -1267,7 +1266,12 @@ EXPORT_SYMBOL(kill_pid); int kill_proc(pid_t pid, int sig, int priv) { - return kill_proc_info(sig, __si_special(priv), pid); + int ret; + + rcu_read_lock(); + ret = kill_pid_info(sig, __si_special(priv), find_pid(pid)); + rcu_read_unlock(); + return ret; } /* @@ -1444,7 +1448,22 @@ void do_notify_parent(struct task_struct *tsk, int sig) info.si_signo = sig; info.si_errno = 0; - info.si_pid = tsk->pid; + /* + * we are under tasklist_lock here so our parent is tied to + * us and cannot exit and release its namespace. + * + * the only it can is to switch its nsproxy with sys_unshare, + * bu uncharing pid namespaces is not allowed, so we'll always + * see relevant namespace + * + * write_lock() currently calls preempt_disable() which is the + * same as rcu_read_lock(), but according to Oleg, this is not + * correct to rely on this + */ + rcu_read_lock(); + info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + rcu_read_unlock(); + info.si_uid = tsk->uid; /* FIXME: find out whether or not this is supposed to be c*time. */ @@ -1509,7 +1528,13 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) info.si_signo = SIGCHLD; info.si_errno = 0; - info.si_pid = tsk->pid; + /* + * see comment in do_notify_parent() abot the following 3 lines + */ + rcu_read_lock(); + info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); + rcu_read_unlock(); + info.si_uid = tsk->uid; /* FIXME: find out whether or not this is supposed to be c*time. */ @@ -1635,7 +1660,7 @@ void ptrace_notify(int exit_code) memset(&info, 0, sizeof info); info.si_signo = SIGTRAP; info.si_code = exit_code; - info.si_pid = current->pid; + info.si_pid = task_pid_vnr(current); info.si_uid = current->uid; /* Let the debugger run. */ @@ -1805,7 +1830,7 @@ relock: info->si_signo = signr; info->si_errno = 0; info->si_code = SI_USER; - info->si_pid = current->parent->pid; + info->si_pid = task_pid_vnr(current->parent); info->si_uid = current->parent->uid; } @@ -1836,11 +1861,9 @@ relock: continue; /* - * Init of a pid space gets no signals it doesn't want from - * within that pid space. It can of course get signals from - * its parent pid space. + * Global init gets no signals it doesn't want. */ - if (current == child_reaper(current)) + if (is_global_init(current)) continue; if (sig_kernel_stop(signr)) { @@ -2194,7 +2217,7 @@ sys_kill(int pid, int sig) info.si_signo = sig; info.si_errno = 0; info.si_code = SI_USER; - info.si_pid = current->tgid; + info.si_pid = task_tgid_vnr(current); info.si_uid = current->uid; return kill_something_info(sig, &info, pid); @@ -2210,12 +2233,12 @@ static int do_tkill(int tgid, int pid, int sig) info.si_signo = sig; info.si_errno = 0; info.si_code = SI_TKILL; - info.si_pid = current->tgid; + info.si_pid = task_tgid_vnr(current); info.si_uid = current->uid; read_lock(&tasklist_lock); - p = find_task_by_pid(pid); - if (p && (tgid <= 0 || p->tgid == tgid)) { + p = find_task_by_vpid(pid); + if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { error = check_kill_permission(sig, &info, p); /* * The null signal is a permissions and process existence diff --git a/kernel/softlockup.c b/kernel/softlockup.c index edeeef3a6a3..11df812263c 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -113,7 +113,7 @@ void softlockup_tick(void) spin_lock(&print_lock); printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", this_cpu, now - touch_timestamp, - current->comm, current->pid); + current->comm, task_pid_nr(current)); if (regs) show_regs(regs); else diff --git a/kernel/sys.c b/kernel/sys.c index 8ae2e636eb1..304b5410d74 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -105,538 +105,6 @@ EXPORT_SYMBOL(cad_pid); */ void (*pm_power_off_prepare)(void); -EXPORT_SYMBOL(pm_power_off_prepare); - -/* - * Notifier list for kernel code which wants to be called - * at shutdown. This is used to stop any idling DMA operations - * and the like. - */ - -static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); - -/* - * Notifier chain core routines. The exported routines below - * are layered on top of these, with appropriate locking added. - */ - -static int notifier_chain_register(struct notifier_block **nl, - struct notifier_block *n) -{ - while ((*nl) != NULL) { - if (n->priority > (*nl)->priority) - break; - nl = &((*nl)->next); - } - n->next = *nl; - rcu_assign_pointer(*nl, n); - return 0; -} - -static int notifier_chain_unregister(struct notifier_block **nl, - struct notifier_block *n) -{ - while ((*nl) != NULL) { - if ((*nl) == n) { - rcu_assign_pointer(*nl, n->next); - return 0; - } - nl = &((*nl)->next); - } - return -ENOENT; -} - -/** - * notifier_call_chain - Informs the registered notifiers about an event. - * @nl: Pointer to head of the blocking notifier chain - * @val: Value passed unmodified to notifier function - * @v: Pointer passed unmodified to notifier function - * @nr_to_call: Number of notifier functions to be called. Don't care - * value of this parameter is -1. - * @nr_calls: Records the number of notifications sent. Don't care - * value of this field is NULL. - * @returns: notifier_call_chain returns the value returned by the - * last notifier function called. - */ - -static int __kprobes notifier_call_chain(struct notifier_block **nl, - unsigned long val, void *v, - int nr_to_call, int *nr_calls) -{ - int ret = NOTIFY_DONE; - struct notifier_block *nb, *next_nb; - - nb = rcu_dereference(*nl); - - while (nb && nr_to_call) { - next_nb = rcu_dereference(nb->next); - ret = nb->notifier_call(nb, val, v); - - if (nr_calls) - (*nr_calls)++; - - if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) - break; - nb = next_nb; - nr_to_call--; - } - return ret; -} - -/* - * Atomic notifier chain routines. Registration and unregistration - * use a spinlock, and call_chain is synchronized by RCU (no locks). - */ - -/** - * atomic_notifier_chain_register - Add notifier to an atomic notifier chain - * @nh: Pointer to head of the atomic notifier chain - * @n: New entry in notifier chain - * - * Adds a notifier to an atomic notifier chain. - * - * Currently always returns zero. - */ - -int atomic_notifier_chain_register(struct atomic_notifier_head *nh, - struct notifier_block *n) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&nh->lock, flags); - ret = notifier_chain_register(&nh->head, n); - spin_unlock_irqrestore(&nh->lock, flags); - return ret; -} - -EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); - -/** - * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain - * @nh: Pointer to head of the atomic notifier chain - * @n: Entry to remove from notifier chain - * - * Removes a notifier from an atomic notifier chain. - * - * Returns zero on success or %-ENOENT on failure. - */ -int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, - struct notifier_block *n) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&nh->lock, flags); - ret = notifier_chain_unregister(&nh->head, n); - spin_unlock_irqrestore(&nh->lock, flags); - synchronize_rcu(); - return ret; -} - -EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); - -/** - * __atomic_notifier_call_chain - Call functions in an atomic notifier chain - * @nh: Pointer to head of the atomic notifier chain - * @val: Value passed unmodified to notifier function - * @v: Pointer passed unmodified to notifier function - * @nr_to_call: See the comment for notifier_call_chain. - * @nr_calls: See the comment for notifier_call_chain. - * - * Calls each function in a notifier chain in turn. The functions - * run in an atomic context, so they must not block. - * This routine uses RCU to synchronize with changes to the chain. - * - * If the return value of the notifier can be and'ed - * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain() - * will return immediately, with the return value of - * the notifier function which halted execution. - * Otherwise the return value is the return value - * of the last notifier function called. - */ - -int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, - unsigned long val, void *v, - int nr_to_call, int *nr_calls) -{ - int ret; - - rcu_read_lock(); - ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); - rcu_read_unlock(); - return ret; -} - -EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); - -int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, - unsigned long val, void *v) -{ - return __atomic_notifier_call_chain(nh, val, v, -1, NULL); -} - -EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); -/* - * Blocking notifier chain routines. All access to the chain is - * synchronized by an rwsem. - */ - -/** - * blocking_notifier_chain_register - Add notifier to a blocking notifier chain - * @nh: Pointer to head of the blocking notifier chain - * @n: New entry in notifier chain - * - * Adds a notifier to a blocking notifier chain. - * Must be called in process context. - * - * Currently always returns zero. - */ - -int blocking_notifier_chain_register(struct blocking_notifier_head *nh, - struct notifier_block *n) -{ - int ret; - - /* - * This code gets used during boot-up, when task switching is - * not yet working and interrupts must remain disabled. At - * such times we must not call down_write(). - */ - if (unlikely(system_state == SYSTEM_BOOTING)) - return notifier_chain_register(&nh->head, n); - - down_write(&nh->rwsem); - ret = notifier_chain_register(&nh->head, n); - up_write(&nh->rwsem); - return ret; -} - -EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); - -/** - * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain - * @nh: Pointer to head of the blocking notifier chain - * @n: Entry to remove from notifier chain - * - * Removes a notifier from a blocking notifier chain. - * Must be called from process context. - * - * Returns zero on success or %-ENOENT on failure. - */ -int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, - struct notifier_block *n) -{ - int ret; - - /* - * This code gets used during boot-up, when task switching is - * not yet working and interrupts must remain disabled. At - * such times we must not call down_write(). - */ - if (unlikely(system_state == SYSTEM_BOOTING)) - return notifier_chain_unregister(&nh->head, n); - - down_write(&nh->rwsem); - ret = notifier_chain_unregister(&nh->head, n); - up_write(&nh->rwsem); - return ret; -} - -EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); - -/** - * __blocking_notifier_call_chain - Call functions in a blocking notifier chain - * @nh: Pointer to head of the blocking notifier chain - * @val: Value passed unmodified to notifier function - * @v: Pointer passed unmodified to notifier function - * @nr_to_call: See comment for notifier_call_chain. - * @nr_calls: See comment for notifier_call_chain. - * - * Calls each function in a notifier chain in turn. The functions - * run in a process context, so they are allowed to block. - * - * If the return value of the notifier can be and'ed - * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain() - * will return immediately, with the return value of - * the notifier function which halted execution. - * Otherwise the return value is the return value - * of the last notifier function called. - */ - -int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, - unsigned long val, void *v, - int nr_to_call, int *nr_calls) -{ - int ret = NOTIFY_DONE; - - /* - * We check the head outside the lock, but if this access is - * racy then it does not matter what the result of the test - * is, we re-check the list after having taken the lock anyway: - */ - if (rcu_dereference(nh->head)) { - down_read(&nh->rwsem); - ret = notifier_call_chain(&nh->head, val, v, nr_to_call, - nr_calls); - up_read(&nh->rwsem); - } - return ret; -} -EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain); - -int blocking_notifier_call_chain(struct blocking_notifier_head *nh, - unsigned long val, void *v) -{ - return __blocking_notifier_call_chain(nh, val, v, -1, NULL); -} -EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); - -/* - * Raw notifier chain routines. There is no protection; - * the caller must provide it. Use at your own risk! - */ - -/** - * raw_notifier_chain_register - Add notifier to a raw notifier chain - * @nh: Pointer to head of the raw notifier chain - * @n: New entry in notifier chain - * - * Adds a notifier to a raw notifier chain. - * All locking must be provided by the caller. - * - * Currently always returns zero. - */ - -int raw_notifier_chain_register(struct raw_notifier_head *nh, - struct notifier_block *n) -{ - return notifier_chain_register(&nh->head, n); -} - -EXPORT_SYMBOL_GPL(raw_notifier_chain_register); - -/** - * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain - * @nh: Pointer to head of the raw notifier chain - * @n: Entry to remove from notifier chain - * - * Removes a notifier from a raw notifier chain. - * All locking must be provided by the caller. - * - * Returns zero on success or %-ENOENT on failure. - */ -int raw_notifier_chain_unregister(struct raw_notifier_head *nh, - struct notifier_block *n) -{ - return notifier_chain_unregister(&nh->head, n); -} - -EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); - -/** - * __raw_notifier_call_chain - Call functions in a raw notifier chain - * @nh: Pointer to head of the raw notifier chain - * @val: Value passed unmodified to notifier function - * @v: Pointer passed unmodified to notifier function - * @nr_to_call: See comment for notifier_call_chain. - * @nr_calls: See comment for notifier_call_chain - * - * Calls each function in a notifier chain in turn. The functions - * run in an undefined context. - * All locking must be provided by the caller. - * - * If the return value of the notifier can be and'ed - * with %NOTIFY_STOP_MASK then raw_notifier_call_chain() - * will return immediately, with the return value of - * the notifier function which halted execution. - * Otherwise the return value is the return value - * of the last notifier function called. - */ - -int __raw_notifier_call_chain(struct raw_notifier_head *nh, - unsigned long val, void *v, - int nr_to_call, int *nr_calls) -{ - return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); -} - -EXPORT_SYMBOL_GPL(__raw_notifier_call_chain); - -int raw_notifier_call_chain(struct raw_notifier_head *nh, - unsigned long val, void *v) -{ - return __raw_notifier_call_chain(nh, val, v, -1, NULL); -} - -EXPORT_SYMBOL_GPL(raw_notifier_call_chain); - -/* - * SRCU notifier chain routines. Registration and unregistration - * use a mutex, and call_chain is synchronized by SRCU (no locks). - */ - -/** - * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain - * @nh: Pointer to head of the SRCU notifier chain - * @n: New entry in notifier chain - * - * Adds a notifier to an SRCU notifier chain. - * Must be called in process context. - * - * Currently always returns zero. - */ - -int srcu_notifier_chain_register(struct srcu_notifier_head *nh, - struct notifier_block *n) -{ - int ret; - - /* - * This code gets used during boot-up, when task switching is - * not yet working and interrupts must remain disabled. At - * such times we must not call mutex_lock(). - */ - if (unlikely(system_state == SYSTEM_BOOTING)) - return notifier_chain_register(&nh->head, n); - - mutex_lock(&nh->mutex); - ret = notifier_chain_register(&nh->head, n); - mutex_unlock(&nh->mutex); - return ret; -} - -EXPORT_SYMBOL_GPL(srcu_notifier_chain_register); - -/** - * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain - * @nh: Pointer to head of the SRCU notifier chain - * @n: Entry to remove from notifier chain - * - * Removes a notifier from an SRCU notifier chain. - * Must be called from process context. - * - * Returns zero on success or %-ENOENT on failure. - */ -int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, - struct notifier_block *n) -{ - int ret; - - /* - * This code gets used during boot-up, when task switching is - * not yet working and interrupts must remain disabled. At - * such times we must not call mutex_lock(). - */ - if (unlikely(system_state == SYSTEM_BOOTING)) - return notifier_chain_unregister(&nh->head, n); - - mutex_lock(&nh->mutex); - ret = notifier_chain_unregister(&nh->head, n); - mutex_unlock(&nh->mutex); - synchronize_srcu(&nh->srcu); - return ret; -} - -EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); - -/** - * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain - * @nh: Pointer to head of the SRCU notifier chain - * @val: Value passed unmodified to notifier function - * @v: Pointer passed unmodified to notifier function - * @nr_to_call: See comment for notifier_call_chain. - * @nr_calls: See comment for notifier_call_chain - * - * Calls each function in a notifier chain in turn. The functions - * run in a process context, so they are allowed to block. - * - * If the return value of the notifier can be and'ed - * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain() - * will return immediately, with the return value of - * the notifier function which halted execution. - * Otherwise the return value is the return value - * of the last notifier function called. - */ - -int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, - unsigned long val, void *v, - int nr_to_call, int *nr_calls) -{ - int ret; - int idx; - - idx = srcu_read_lock(&nh->srcu); - ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); - srcu_read_unlock(&nh->srcu, idx); - return ret; -} -EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain); - -int srcu_notifier_call_chain(struct srcu_notifier_head *nh, - unsigned long val, void *v) -{ - return __srcu_notifier_call_chain(nh, val, v, -1, NULL); -} -EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); - -/** - * srcu_init_notifier_head - Initialize an SRCU notifier head - * @nh: Pointer to head of the srcu notifier chain - * - * Unlike other sorts of notifier heads, SRCU notifier heads require - * dynamic initialization. Be sure to call this routine before - * calling any of the other SRCU notifier routines for this head. - * - * If an SRCU notifier head is deallocated, it must first be cleaned - * up by calling srcu_cleanup_notifier_head(). Otherwise the head's - * per-cpu data (used by the SRCU mechanism) will leak. - */ - -void srcu_init_notifier_head(struct srcu_notifier_head *nh) -{ - mutex_init(&nh->mutex); - if (init_srcu_struct(&nh->srcu) < 0) - BUG(); - nh->head = NULL; -} - -EXPORT_SYMBOL_GPL(srcu_init_notifier_head); - -/** - * register_reboot_notifier - Register function to be called at reboot time - * @nb: Info about notifier function to be called - * - * Registers a function with the list of functions - * to be called at reboot time. - * - * Currently always returns zero, as blocking_notifier_chain_register() - * always returns zero. - */ - -int register_reboot_notifier(struct notifier_block * nb) -{ - return blocking_notifier_chain_register(&reboot_notifier_list, nb); -} - -EXPORT_SYMBOL(register_reboot_notifier); - -/** - * unregister_reboot_notifier - Unregister previously registered reboot notifier - * @nb: Hook to be unregistered - * - * Unregisters a previously registered reboot - * notifier function. - * - * Returns zero on success, or %-ENOENT on failure. - */ - -int unregister_reboot_notifier(struct notifier_block * nb) -{ - return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); -} - -EXPORT_SYMBOL(unregister_reboot_notifier); static int set_one_prio(struct task_struct *p, int niceval, int error) { @@ -684,7 +152,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) switch (which) { case PRIO_PROCESS: if (who) - p = find_task_by_pid(who); + p = find_task_by_vpid(who); else p = current; if (p) @@ -692,7 +160,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) break; case PRIO_PGRP: if (who) - pgrp = find_pid(who); + pgrp = find_vpid(who); else pgrp = task_pgrp(current); do_each_pid_task(pgrp, PIDTYPE_PGID, p) { @@ -741,7 +209,7 @@ asmlinkage long sys_getpriority(int which, int who) switch (which) { case PRIO_PROCESS: if (who) - p = find_task_by_pid(who); + p = find_task_by_vpid(who); else p = current; if (p) { @@ -752,7 +220,7 @@ asmlinkage long sys_getpriority(int which, int who) break; case PRIO_PGRP: if (who) - pgrp = find_pid(who); + pgrp = find_vpid(who); else pgrp = task_pgrp(current); do_each_pid_task(pgrp, PIDTYPE_PGID, p) { @@ -1449,9 +917,10 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) struct task_struct *p; struct task_struct *group_leader = current->group_leader; int err = -EINVAL; + struct pid_namespace *ns; if (!pid) - pid = group_leader->pid; + pid = task_pid_vnr(group_leader); if (!pgid) pgid = pid; if (pgid < 0) @@ -1460,10 +929,12 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) /* From this point forward we keep holding onto the tasklist lock * so that our parent does not change from under us. -DaveM */ + ns = current->nsproxy->pid_ns; + write_lock_irq(&tasklist_lock); err = -ESRCH; - p = find_task_by_pid(pid); + p = find_task_by_pid_ns(pid, ns); if (!p) goto out; @@ -1489,9 +960,9 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) goto out; if (pgid != pid) { - struct task_struct *g = - find_task_by_pid_type(PIDTYPE_PGID, pgid); + struct task_struct *g; + g = find_task_by_pid_type_ns(PIDTYPE_PGID, pgid, ns); if (!g || task_session(g) != task_session(group_leader)) goto out; } @@ -1500,10 +971,13 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) if (err) goto out; - if (process_group(p) != pgid) { + if (task_pgrp_nr_ns(p, ns) != pgid) { + struct pid *pid; + detach_pid(p, PIDTYPE_PGID); - p->signal->pgrp = pgid; - attach_pid(p, PIDTYPE_PGID, find_pid(pgid)); + pid = find_vpid(pgid); + attach_pid(p, PIDTYPE_PGID, pid); + set_task_pgrp(p, pid_nr(pid)); } err = 0; @@ -1516,19 +990,21 @@ out: asmlinkage long sys_getpgid(pid_t pid) { if (!pid) - return process_group(current); + return task_pgrp_vnr(current); else { int retval; struct task_struct *p; + struct pid_namespace *ns; - read_lock(&tasklist_lock); - p = find_task_by_pid(pid); + ns = current->nsproxy->pid_ns; + read_lock(&tasklist_lock); + p = find_task_by_pid_ns(pid, ns); retval = -ESRCH; if (p) { retval = security_task_getpgid(p); if (!retval) - retval = process_group(p); + retval = task_pgrp_nr_ns(p, ns); } read_unlock(&tasklist_lock); return retval; @@ -1540,7 +1016,7 @@ asmlinkage long sys_getpgid(pid_t pid) asmlinkage long sys_getpgrp(void) { /* SMP - assuming writes are word atomic this is fine */ - return process_group(current); + return task_pgrp_vnr(current); } #endif @@ -1548,19 +1024,21 @@ asmlinkage long sys_getpgrp(void) asmlinkage long sys_getsid(pid_t pid) { if (!pid) - return process_session(current); + return task_session_vnr(current); else { int retval; struct task_struct *p; + struct pid_namespace *ns; - read_lock(&tasklist_lock); - p = find_task_by_pid(pid); + ns = current->nsproxy->pid_ns; + read_lock(&tasklist_lock); + p = find_task_by_pid_ns(pid, ns); retval = -ESRCH; if (p) { retval = security_task_getsid(p); if (!retval) - retval = process_session(p); + retval = task_session_nr_ns(p, ns); } read_unlock(&tasklist_lock); return retval; @@ -1587,7 +1065,8 @@ asmlinkage long sys_setsid(void) * session id and so the check will always fail and make it so * init cannot successfully call setsid. */ - if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session)) + if (session > 1 && find_task_by_pid_type_ns(PIDTYPE_PGID, + session, &init_pid_ns)) goto out; group_leader->signal->leader = 1; @@ -1597,7 +1076,7 @@ asmlinkage long sys_setsid(void) group_leader->signal->tty = NULL; spin_unlock(&group_leader->sighand->siglock); - err = process_group(group_leader); + err = task_pgrp_vnr(group_leader); out: write_unlock_irq(&tasklist_lock); return err; diff --git a/kernel/sysctl.c b/kernel/sysctl.c index dde3d53e8ad..3b4efbe2644 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -24,7 +24,7 @@ #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/proc_fs.h> -#include <linux/capability.h> +#include <linux/security.h> #include <linux/ctype.h> #include <linux/utsname.h> #include <linux/smp_lock.h> @@ -55,6 +55,8 @@ #include <asm/stacktrace.h> #endif +static int deprecated_sysctl_warning(struct __sysctl_args *args); + #if defined(CONFIG_SYSCTL) /* External variables not in a header file. */ @@ -142,32 +144,29 @@ extern int max_lock_depth; #ifdef CONFIG_SYSCTL_SYSCALL static int parse_table(int __user *, int, void __user *, size_t __user *, - void __user *, size_t, ctl_table *); + void __user *, size_t, struct ctl_table *); #endif #ifdef CONFIG_PROC_SYSCTL -static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, +static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos); -static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp, +static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos); #endif -static ctl_table root_table[]; +static struct ctl_table root_table[]; static struct ctl_table_header root_table_header = { root_table, LIST_HEAD_INIT(root_table_header.ctl_entry) }; -static ctl_table kern_table[]; -static ctl_table vm_table[]; -static ctl_table fs_table[]; -static ctl_table debug_table[]; -static ctl_table dev_table[]; -extern ctl_table random_table[]; -#ifdef CONFIG_UNIX98_PTYS -extern ctl_table pty_table[]; -#endif +static struct ctl_table kern_table[]; +static struct ctl_table vm_table[]; +static struct ctl_table fs_table[]; +static struct ctl_table debug_table[]; +static struct ctl_table dev_table[]; +extern struct ctl_table random_table[]; #ifdef CONFIG_INOTIFY_USER -extern ctl_table inotify_table[]; +extern struct ctl_table inotify_table[]; #endif #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT @@ -179,7 +178,7 @@ extern int lock_stat; /* The default sysctl tables: */ -static ctl_table root_table[] = { +static struct ctl_table root_table[] = { { .ctl_name = CTL_KERN, .procname = "kernel", @@ -232,7 +231,7 @@ static unsigned long min_wakeup_granularity_ns; /* 0 usecs */ static unsigned long max_wakeup_granularity_ns = 1000000000; /* 1 second */ #endif -static ctl_table kern_table[] = { +static struct ctl_table kern_table[] = { #ifdef CONFIG_SCHED_DEBUG { .ctl_name = CTL_UNNUMBERED, @@ -365,7 +364,6 @@ static ctl_table kern_table[] = { }, #ifdef CONFIG_PROC_SYSCTL { - .ctl_name = KERN_TAINTED, .procname = "tainted", .data = &tainted, .maxlen = sizeof(int), @@ -373,14 +371,15 @@ static ctl_table kern_table[] = { .proc_handler = &proc_dointvec_taint, }, #endif +#ifdef CONFIG_SECURITY_CAPABILITIES { - .ctl_name = KERN_CAP_BSET, .procname = "cap-bound", .data = &cap_bset, .maxlen = sizeof(kernel_cap_t), .mode = 0600, .proc_handler = &proc_dointvec_bset, }, +#endif /* def CONFIG_SECURITY_CAPABILITIES */ #ifdef CONFIG_BLK_DEV_INITRD { .ctl_name = KERN_REALROOTDEV, @@ -514,7 +513,6 @@ static ctl_table kern_table[] = { #endif #ifdef CONFIG_PROC_SYSCTL { - .ctl_name = KERN_CADPID, .procname = "cad_pid", .data = NULL, .maxlen = sizeof (int), @@ -536,14 +534,6 @@ static ctl_table kern_table[] = { .mode = 0555, .child = random_table, }, -#ifdef CONFIG_UNIX98_PTYS - { - .ctl_name = KERN_PTY, - .procname = "pty", - .mode = 0555, - .child = pty_table, - }, -#endif { .ctl_name = KERN_OVERFLOWUID, .procname = "overflowuid", @@ -650,7 +640,6 @@ static ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, { - .ctl_name = KERN_NMI_WATCHDOG, .procname = "nmi_watchdog", .data = &nmi_watchdog_enabled, .maxlen = sizeof (int), @@ -706,7 +695,6 @@ static ctl_table kern_table[] = { #endif #if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) { - .ctl_name = KERN_ACPI_VIDEO_FLAGS, .procname = "acpi_video_flags", .data = &acpi_realmode_flags, .maxlen = sizeof (unsigned long), @@ -783,7 +771,7 @@ static ctl_table kern_table[] = { { .ctl_name = 0 } }; -static ctl_table vm_table[] = { +static struct ctl_table vm_table[] = { { .ctl_name = VM_OVERCOMMIT_MEMORY, .procname = "overcommit_memory", @@ -847,7 +835,6 @@ static ctl_table vm_table[] = { .extra2 = &one_hundred, }, { - .ctl_name = VM_DIRTY_WB_CS, .procname = "dirty_writeback_centisecs", .data = &dirty_writeback_interval, .maxlen = sizeof(dirty_writeback_interval), @@ -855,7 +842,6 @@ static ctl_table vm_table[] = { .proc_handler = &dirty_writeback_centisecs_handler, }, { - .ctl_name = VM_DIRTY_EXPIRE_CS, .procname = "dirty_expire_centisecs", .data = &dirty_expire_interval, .maxlen = sizeof(dirty_expire_interval), @@ -883,7 +869,6 @@ static ctl_table vm_table[] = { }, #ifdef CONFIG_HUGETLB_PAGE { - .ctl_name = VM_HUGETLB_PAGES, .procname = "nr_hugepages", .data = &max_huge_pages, .maxlen = sizeof(unsigned long), @@ -1093,12 +1078,12 @@ static ctl_table vm_table[] = { }; #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) -static ctl_table binfmt_misc_table[] = { +static struct ctl_table binfmt_misc_table[] = { { .ctl_name = 0 } }; #endif -static ctl_table fs_table[] = { +static struct ctl_table fs_table[] = { { .ctl_name = FS_NRINODE, .procname = "inode-nr", @@ -1116,7 +1101,6 @@ static ctl_table fs_table[] = { .proc_handler = &proc_dointvec, }, { - .ctl_name = FS_NRFILE, .procname = "file-nr", .data = &files_stat, .maxlen = 3*sizeof(int), @@ -1192,7 +1176,6 @@ static ctl_table fs_table[] = { .extra2 = &two, }, { - .ctl_name = FS_AIO_NR, .procname = "aio-nr", .data = &aio_nr, .maxlen = sizeof(aio_nr), @@ -1200,7 +1183,6 @@ static ctl_table fs_table[] = { .proc_handler = &proc_doulongvec_minmax, }, { - .ctl_name = FS_AIO_MAX_NR, .procname = "aio-max-nr", .data = &aio_max_nr, .maxlen = sizeof(aio_max_nr), @@ -1239,7 +1221,7 @@ static ctl_table fs_table[] = { { .ctl_name = 0 } }; -static ctl_table debug_table[] = { +static struct ctl_table debug_table[] = { #if defined(CONFIG_X86) || defined(CONFIG_PPC) { .ctl_name = CTL_UNNUMBERED, @@ -1253,7 +1235,7 @@ static ctl_table debug_table[] = { { .ctl_name = 0 } }; -static ctl_table dev_table[] = { +static struct ctl_table dev_table[] = { { .ctl_name = 0 } }; @@ -1369,10 +1351,15 @@ asmlinkage long sys_sysctl(struct __sysctl_args __user *args) if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; + error = deprecated_sysctl_warning(&tmp); + if (error) + goto out; + lock_kernel(); error = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, tmp.oldlenp, tmp.newval, tmp.newlen); unlock_kernel(); +out: return error; } #endif /* CONFIG_SYSCTL_SYSCALL */ @@ -1393,7 +1380,7 @@ static int test_perm(int mode, int op) return -EACCES; } -int sysctl_perm(ctl_table *table, int op) +int sysctl_perm(struct ctl_table *table, int op) { int error; error = security_sysctl(table, op); @@ -1406,7 +1393,7 @@ int sysctl_perm(ctl_table *table, int op) static int parse_table(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen, - ctl_table *table) + struct ctl_table *table) { int n; repeat: @@ -1437,13 +1424,12 @@ repeat: } /* Perform the actual read/write of a sysctl table entry. */ -int do_sysctl_strategy (ctl_table *table, +int do_sysctl_strategy (struct ctl_table *table, int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { int op = 0, rc; - size_t len; if (oldval) op |= 004; @@ -1464,25 +1450,10 @@ int do_sysctl_strategy (ctl_table *table, /* If there is no strategy routine, or if the strategy returns * zero, proceed with automatic r/w */ if (table->data && table->maxlen) { - if (oldval && oldlenp) { - if (get_user(len, oldlenp)) - return -EFAULT; - if (len) { - if (len > table->maxlen) - len = table->maxlen; - if(copy_to_user(oldval, table->data, len)) - return -EFAULT; - if(put_user(len, oldlenp)) - return -EFAULT; - } - } - if (newval && newlen) { - len = newlen; - if (len > table->maxlen) - len = table->maxlen; - if(copy_from_user(table->data, newval, len)) - return -EFAULT; - } + rc = sysctl_data(table, name, nlen, oldval, oldlenp, + newval, newlen); + if (rc < 0) + return rc; } return 0; } @@ -1499,7 +1470,9 @@ static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table) static __init int sysctl_init(void) { + int err; sysctl_set_parent(NULL, root_table); + err = sysctl_check_table(root_table); return 0; } @@ -1512,7 +1485,7 @@ core_initcall(sysctl_init); * Register a sysctl table hierarchy. @table should be a filled in ctl_table * array. An entry with a ctl_name of 0 terminates the table. * - * The members of the &ctl_table structure are used as follows: + * The members of the &struct ctl_table structure are used as follows: * * ctl_name - This is the numeric sysctl value used by sysctl(2). The number * must be unique within that level of sysctl @@ -1573,7 +1546,7 @@ core_initcall(sysctl_init); * This routine returns %NULL on a failure to register, and a pointer * to the table header on success. */ -struct ctl_table_header *register_sysctl_table(ctl_table * table) +struct ctl_table_header *register_sysctl_table(struct ctl_table * table) { struct ctl_table_header *tmp; tmp = kmalloc(sizeof(struct ctl_table_header), GFP_KERNEL); @@ -1584,6 +1557,10 @@ struct ctl_table_header *register_sysctl_table(ctl_table * table) tmp->used = 0; tmp->unregistering = NULL; sysctl_set_parent(NULL, table); + if (sysctl_check_table(tmp->ctl_table)) { + kfree(tmp); + return NULL; + } spin_lock(&sysctl_lock); list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry); spin_unlock(&sysctl_lock); @@ -1607,7 +1584,7 @@ void unregister_sysctl_table(struct ctl_table_header * header) } #else /* !CONFIG_SYSCTL */ -struct ctl_table_header *register_sysctl_table(ctl_table * table) +struct ctl_table_header *register_sysctl_table(struct ctl_table * table) { return NULL; } @@ -1700,7 +1677,7 @@ static int _proc_do_string(void* data, int maxlen, int write, * * Returns 0 on success. */ -int proc_dostring(ctl_table *table, int write, struct file *filp, +int proc_dostring(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return _proc_do_string(table->data, table->maxlen, write, filp, @@ -1727,7 +1704,7 @@ static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp, return 0; } -static int __do_proc_dointvec(void *tbl_data, ctl_table *table, +static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos, int (*conv)(int *negp, unsigned long *lvalp, int *valp, @@ -1837,7 +1814,7 @@ static int __do_proc_dointvec(void *tbl_data, ctl_table *table, #undef TMPBUFLEN } -static int do_proc_dointvec(ctl_table *table, int write, struct file *filp, +static int do_proc_dointvec(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos, int (*conv)(int *negp, unsigned long *lvalp, int *valp, int write, void *data), @@ -1861,7 +1838,7 @@ static int do_proc_dointvec(ctl_table *table, int write, struct file *filp, * * Returns 0 on success. */ -int proc_dointvec(ctl_table *table, int write, struct file *filp, +int proc_dointvec(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, @@ -1897,11 +1874,12 @@ static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp, return 0; } +#ifdef CONFIG_SECURITY_CAPABILITIES /* * init may raise the set. */ - -int proc_dointvec_bset(ctl_table *table, int write, struct file *filp, + +int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { int op; @@ -1910,15 +1888,16 @@ int proc_dointvec_bset(ctl_table *table, int write, struct file *filp, return -EPERM; } - op = is_init(current) ? OP_SET : OP_AND; + op = is_global_init(current) ? OP_SET : OP_AND; return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, do_proc_dointvec_bset_conv,&op); } +#endif /* def CONFIG_SECURITY_CAPABILITIES */ /* * Taint values can only be increased */ -static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp, +static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { int op; @@ -1977,7 +1956,7 @@ static int do_proc_dointvec_minmax_conv(int *negp, unsigned long *lvalp, * * Returns 0 on success. */ -int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp, +int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { struct do_proc_dointvec_minmax_conv_param param = { @@ -1988,7 +1967,7 @@ int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp, do_proc_dointvec_minmax_conv, ¶m); } -static int __do_proc_doulongvec_minmax(void *data, ctl_table *table, int write, +static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos, @@ -2093,7 +2072,7 @@ static int __do_proc_doulongvec_minmax(void *data, ctl_table *table, int write, #undef TMPBUFLEN } -static int do_proc_doulongvec_minmax(ctl_table *table, int write, +static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos, @@ -2121,7 +2100,7 @@ static int do_proc_doulongvec_minmax(ctl_table *table, int write, * * Returns 0 on success. */ -int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp, +int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l); @@ -2145,7 +2124,7 @@ int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp, * * Returns 0 on success. */ -int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write, +int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -2238,7 +2217,7 @@ static int do_proc_dointvec_ms_jiffies_conv(int *negp, unsigned long *lvalp, * * Returns 0 on success. */ -int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp, +int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, @@ -2261,7 +2240,7 @@ int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp, * * Returns 0 on success. */ -int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp, +int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, @@ -2285,21 +2264,21 @@ int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp, * * Returns 0 on success. */ -int proc_dointvec_ms_jiffies(ctl_table *table, int write, struct file *filp, +int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table, write, filp, buffer, lenp, ppos, do_proc_dointvec_ms_jiffies_conv, NULL); } -static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, +static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { struct pid *new_pid; pid_t tmp; int r; - tmp = pid_nr(cad_pid); + tmp = pid_nr_ns(cad_pid, current->nsproxy->pid_ns); r = __do_proc_dointvec(&tmp, table, write, filp, buffer, lenp, ppos, NULL, NULL); @@ -2316,55 +2295,55 @@ static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, #else /* CONFIG_PROC_FS */ -int proc_dostring(ctl_table *table, int write, struct file *filp, +int proc_dostring(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } -int proc_dointvec(ctl_table *table, int write, struct file *filp, +int proc_dointvec(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } -int proc_dointvec_bset(ctl_table *table, int write, struct file *filp, +int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } -int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp, +int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } -int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp, +int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } -int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp, +int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } -int proc_dointvec_ms_jiffies(ctl_table *table, int write, struct file *filp, +int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } -int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp, +int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } -int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write, +int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -2381,8 +2360,42 @@ int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write, * General sysctl support routines */ +/* The generic sysctl data routine (used if no strategy routine supplied) */ +int sysctl_data(struct ctl_table *table, int __user *name, int nlen, + void __user *oldval, size_t __user *oldlenp, + void __user *newval, size_t newlen) +{ + size_t len; + + /* Get out of I don't have a variable */ + if (!table->data || !table->maxlen) + return -ENOTDIR; + + if (oldval && oldlenp) { + if (get_user(len, oldlenp)) + return -EFAULT; + if (len) { + if (len > table->maxlen) + len = table->maxlen; + if (copy_to_user(oldval, table->data, len)) + return -EFAULT; + if (put_user(len, oldlenp)) + return -EFAULT; + } + } + + if (newval && newlen) { + if (newlen > table->maxlen) + newlen = table->maxlen; + + if (copy_from_user(table->data, newval, newlen)) + return -EFAULT; + } + return 1; +} + /* The generic string strategy routine: */ -int sysctl_string(ctl_table *table, int __user *name, int nlen, +int sysctl_string(struct ctl_table *table, int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { @@ -2428,7 +2441,7 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen, * are between the minimum and maximum values given in the arrays * table->extra1 and table->extra2, respectively. */ -int sysctl_intvec(ctl_table *table, int __user *name, int nlen, +int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { @@ -2464,7 +2477,7 @@ int sysctl_intvec(ctl_table *table, int __user *name, int nlen, } /* Strategy function to convert jiffies to seconds */ -int sysctl_jiffies(ctl_table *table, int __user *name, int nlen, +int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { @@ -2498,7 +2511,7 @@ int sysctl_jiffies(ctl_table *table, int __user *name, int nlen, } /* Strategy function to convert jiffies to seconds */ -int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, +int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { @@ -2538,59 +2551,50 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, asmlinkage long sys_sysctl(struct __sysctl_args __user *args) { - static int msg_count; struct __sysctl_args tmp; - int name[CTL_MAXNAME]; - int i; + int error; - /* Read in the sysctl name for better debug message logging */ if (copy_from_user(&tmp, args, sizeof(tmp))) return -EFAULT; - if (tmp.nlen <= 0 || tmp.nlen >= CTL_MAXNAME) - return -ENOTDIR; - for (i = 0; i < tmp.nlen; i++) - if (get_user(name[i], tmp.name + i)) - return -EFAULT; - /* Ignore accesses to kernel.version */ - if ((tmp.nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION)) - goto out; + error = deprecated_sysctl_warning(&tmp); - if (msg_count < 5) { - msg_count++; - printk(KERN_INFO - "warning: process `%s' used the removed sysctl " - "system call with ", current->comm); - for (i = 0; i < tmp.nlen; i++) - printk("%d.", name[i]); - printk("\n"); - } -out: + /* If no error reading the parameters then just -ENOSYS ... */ + if (!error) + error = -ENOSYS; + + return error; +} + +int sysctl_data(struct ctl_table *table, int __user *name, int nlen, + void __user *oldval, size_t __user *oldlenp, + void __user *newval, size_t newlen) +{ return -ENOSYS; } -int sysctl_string(ctl_table *table, int __user *name, int nlen, +int sysctl_string(struct ctl_table *table, int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { return -ENOSYS; } -int sysctl_intvec(ctl_table *table, int __user *name, int nlen, +int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { return -ENOSYS; } -int sysctl_jiffies(ctl_table *table, int __user *name, int nlen, +int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { return -ENOSYS; } -int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, +int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen) { @@ -2599,6 +2603,33 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, #endif /* CONFIG_SYSCTL_SYSCALL */ +static int deprecated_sysctl_warning(struct __sysctl_args *args) +{ + static int msg_count; + int name[CTL_MAXNAME]; + int i; + + /* Read in the sysctl name for better debug message logging */ + for (i = 0; i < args->nlen; i++) + if (get_user(name[i], args->name + i)) + return -EFAULT; + + /* Ignore accesses to kernel.version */ + if ((args->nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION)) + return 0; + + if (msg_count < 5) { + msg_count++; + printk(KERN_INFO + "warning: process `%s' used the deprecated sysctl " + "system call with ", current->comm); + for (i = 0; i < args->nlen; i++) + printk("%d.", name[i]); + printk("\n"); + } + return 0; +} + /* * No sense putting this after each symbol definition, twice, * exception granted :-) @@ -2616,4 +2647,5 @@ EXPORT_SYMBOL(sysctl_intvec); EXPORT_SYMBOL(sysctl_jiffies); EXPORT_SYMBOL(sysctl_ms_jiffies); EXPORT_SYMBOL(sysctl_string); +EXPORT_SYMBOL(sysctl_data); EXPORT_SYMBOL(unregister_sysctl_table); diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c new file mode 100644 index 00000000000..3c9ef5a7d57 --- /dev/null +++ b/kernel/sysctl_check.c @@ -0,0 +1,1588 @@ +#include <linux/stat.h> +#include <linux/sysctl.h> +#include "../arch/s390/appldata/appldata.h" +#include "../fs/xfs/linux-2.6/xfs_sysctl.h" +#include <linux/sunrpc/debug.h> +#include <linux/string.h> +#include <net/ip_vs.h> + +struct trans_ctl_table { + int ctl_name; + const char *procname; + struct trans_ctl_table *child; +}; + +static struct trans_ctl_table trans_random_table[] = { + { RANDOM_POOLSIZE, "poolsize" }, + { RANDOM_ENTROPY_COUNT, "entropy_avail" }, + { RANDOM_READ_THRESH, "read_wakeup_threshold" }, + { RANDOM_WRITE_THRESH, "write_wakeup_threshold" }, + { RANDOM_BOOT_ID, "boot_id" }, + { RANDOM_UUID, "uuid" }, + {} +}; + +static struct trans_ctl_table trans_pty_table[] = { + { PTY_MAX, "max" }, + { PTY_NR, "nr" }, + {} +}; + +static struct trans_ctl_table trans_kern_table[] = { + { KERN_OSTYPE, "ostype" }, + { KERN_OSRELEASE, "osrelease" }, + /* KERN_OSREV not used */ + { KERN_VERSION, "version" }, + /* KERN_SECUREMASK not used */ + /* KERN_PROF not used */ + { KERN_NODENAME, "hostname" }, + { KERN_DOMAINNAME, "domainname" }, + +#ifdef CONFIG_SECURITY_CAPABILITIES + { KERN_CAP_BSET, "cap-bound" }, +#endif /* def CONFIG_SECURITY_CAPABILITIES */ + + { KERN_PANIC, "panic" }, + { KERN_REALROOTDEV, "real-root-dev" }, + + { KERN_SPARC_REBOOT, "reboot-cmd" }, + { KERN_CTLALTDEL, "ctrl-alt-del" }, + { KERN_PRINTK, "printk" }, + + /* KERN_NAMETRANS not used */ + /* KERN_PPC_HTABRECLAIM not used */ + /* KERN_PPC_ZEROPAGED not used */ + { KERN_PPC_POWERSAVE_NAP, "powersave-nap" }, + + { KERN_MODPROBE, "modprobe" }, + { KERN_SG_BIG_BUFF, "sg-big-buff" }, + { KERN_ACCT, "acct" }, + { KERN_PPC_L2CR, "l2cr" }, + + /* KERN_RTSIGNR not used */ + /* KERN_RTSIGMAX not used */ + + { KERN_SHMMAX, "shmmax" }, + { KERN_MSGMAX, "msgmax" }, + { KERN_MSGMNB, "msgmnb" }, + /* KERN_MSGPOOL not used*/ + { KERN_SYSRQ, "sysrq" }, + { KERN_MAX_THREADS, "threads-max" }, + { KERN_RANDOM, "random", trans_random_table }, + { KERN_SHMALL, "shmall" }, + { KERN_MSGMNI, "msgmni" }, + { KERN_SEM, "sem" }, + { KERN_SPARC_STOP_A, "stop-a" }, + { KERN_SHMMNI, "shmmni" }, + + { KERN_OVERFLOWUID, "overflowuid" }, + { KERN_OVERFLOWGID, "overflowgid" }, + + { KERN_HOTPLUG, "hotplug", }, + { KERN_IEEE_EMULATION_WARNINGS, "ieee_emulation_warnings" }, + + { KERN_S390_USER_DEBUG_LOGGING, "userprocess_debug" }, + { KERN_CORE_USES_PID, "core_uses_pid" }, + { KERN_TAINTED, "tainted" }, + { KERN_CADPID, "cad_pid" }, + { KERN_PIDMAX, "pid_max" }, + { KERN_CORE_PATTERN, "core_pattern" }, + { KERN_PANIC_ON_OOPS, "panic_on_oops" }, + { KERN_HPPA_PWRSW, "soft-power" }, + { KERN_HPPA_UNALIGNED, "unaligned-trap" }, + + { KERN_PRINTK_RATELIMIT, "printk_ratelimit" }, + { KERN_PRINTK_RATELIMIT_BURST, "printk_ratelimit_burst" }, + + { KERN_PTY, "pty", trans_pty_table }, + { KERN_NGROUPS_MAX, "ngroups_max" }, + { KERN_SPARC_SCONS_PWROFF, "scons_poweroff" }, + { KERN_HZ_TIMER, "hz_timer" }, + { KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" }, + { KERN_BOOTLOADER_TYPE, "bootloader_type" }, + { KERN_RANDOMIZE, "randomize_va_space" }, + + { KERN_SPIN_RETRY, "spin_retry" }, + { KERN_ACPI_VIDEO_FLAGS, "acpi_video_flags" }, + { KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" }, + { KERN_COMPAT_LOG, "compat-log" }, + { KERN_MAX_LOCK_DEPTH, "max_lock_depth" }, + { KERN_NMI_WATCHDOG, "nmi_watchdog" }, + { KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" }, + {} +}; + +static struct trans_ctl_table trans_vm_table[] = { + { VM_OVERCOMMIT_MEMORY, "overcommit_memory" }, + { VM_PAGE_CLUSTER, "page-cluster" }, + { VM_DIRTY_BACKGROUND, "dirty_background_ratio" }, + { VM_DIRTY_RATIO, "dirty_ratio" }, + { VM_DIRTY_WB_CS, "dirty_writeback_centisecs" }, + { VM_DIRTY_EXPIRE_CS, "dirty_expire_centisecs" }, + { VM_NR_PDFLUSH_THREADS, "nr_pdflush_threads" }, + { VM_OVERCOMMIT_RATIO, "overcommit_ratio" }, + /* VM_PAGEBUF unused */ + { VM_HUGETLB_PAGES, "nr_hugepages" }, + { VM_SWAPPINESS, "swappiness" }, + { VM_LOWMEM_RESERVE_RATIO, "lowmem_reserve_ratio" }, + { VM_MIN_FREE_KBYTES, "min_free_kbytes" }, + { VM_MAX_MAP_COUNT, "max_map_count" }, + { VM_LAPTOP_MODE, "laptop_mode" }, + { VM_BLOCK_DUMP, "block_dump" }, + { VM_HUGETLB_GROUP, "hugetlb_shm_group" }, + { VM_VFS_CACHE_PRESSURE, "vfs_cache_pressure" }, + { VM_LEGACY_VA_LAYOUT, "legacy_va_layout" }, + /* VM_SWAP_TOKEN_TIMEOUT unused */ + { VM_DROP_PAGECACHE, "drop_caches" }, + { VM_PERCPU_PAGELIST_FRACTION, "percpu_pagelist_fraction" }, + { VM_ZONE_RECLAIM_MODE, "zone_reclaim_mode" }, + { VM_MIN_UNMAPPED, "min_unmapped_ratio" }, + { VM_PANIC_ON_OOM, "panic_on_oom" }, + { VM_VDSO_ENABLED, "vdso_enabled" }, + { VM_MIN_SLAB, "min_slab_ratio" }, + { VM_CMM_PAGES, "cmm_pages" }, + { VM_CMM_TIMED_PAGES, "cmm_timed_pages" }, + { VM_CMM_TIMEOUT, "cmm_timeout" }, + + {} +}; + +static struct trans_ctl_table trans_net_core_table[] = { + { NET_CORE_WMEM_MAX, "wmem_max" }, + { NET_CORE_RMEM_MAX, "rmem_max" }, + { NET_CORE_WMEM_DEFAULT, "wmem_default" }, + { NET_CORE_RMEM_DEFAULT, "rmem_default" }, + /* NET_CORE_DESTROY_DELAY unused */ + { NET_CORE_MAX_BACKLOG, "netdev_max_backlog" }, + /* NET_CORE_FASTROUTE unused */ + { NET_CORE_MSG_COST, "message_cost" }, + { NET_CORE_MSG_BURST, "message_burst" }, + { NET_CORE_OPTMEM_MAX, "optmem_max" }, + /* NET_CORE_HOT_LIST_LENGTH unused */ + /* NET_CORE_DIVERT_VERSION unused */ + /* NET_CORE_NO_CONG_THRESH unused */ + /* NET_CORE_NO_CONG unused */ + /* NET_CORE_LO_CONG unused */ + /* NET_CORE_MOD_CONG unused */ + { NET_CORE_DEV_WEIGHT, "dev_weight" }, + { NET_CORE_SOMAXCONN, "somaxconn" }, + { NET_CORE_BUDGET, "netdev_budget" }, + { NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" }, + { NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" }, + { NET_CORE_WARNINGS, "warnings" }, + {}, +}; + +static struct trans_ctl_table trans_net_unix_table[] = { + /* NET_UNIX_DESTROY_DELAY unused */ + /* NET_UNIX_DELETE_DELAY unused */ + { NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" }, + {} +}; + +static struct trans_ctl_table trans_net_ipv4_route_table[] = { + { NET_IPV4_ROUTE_FLUSH, "flush" }, + { NET_IPV4_ROUTE_MIN_DELAY, "min_delay" }, + { NET_IPV4_ROUTE_MAX_DELAY, "max_delay" }, + { NET_IPV4_ROUTE_GC_THRESH, "gc_thresh" }, + { NET_IPV4_ROUTE_MAX_SIZE, "max_size" }, + { NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" }, + { NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" }, + { NET_IPV4_ROUTE_GC_INTERVAL, "gc_interval" }, + { NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" }, + { NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" }, + { NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" }, + { NET_IPV4_ROUTE_ERROR_COST, "error_cost" }, + { NET_IPV4_ROUTE_ERROR_BURST, "error_burst" }, + { NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity" }, + { NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" }, + { NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" }, + { NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" }, + { NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval" }, + { NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" }, + {} +}; + +static struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = { + { NET_IPV4_CONF_FORWARDING, "forwarding" }, + { NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" }, + + { NET_IPV4_CONF_PROXY_ARP, "proxy_arp" }, + { NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects" }, + { NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects" }, + { NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects" }, + { NET_IPV4_CONF_SHARED_MEDIA, "shared_media" }, + { NET_IPV4_CONF_RP_FILTER, "rp_filter" }, + { NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route" }, + { NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay" }, + { NET_IPV4_CONF_LOG_MARTIANS, "log_martians" }, + { NET_IPV4_CONF_TAG, "tag" }, + { NET_IPV4_CONF_ARPFILTER, "arp_filter" }, + { NET_IPV4_CONF_MEDIUM_ID, "medium_id" }, + { NET_IPV4_CONF_NOXFRM, "disable_xfrm" }, + { NET_IPV4_CONF_NOPOLICY, "disable_policy" }, + { NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" }, + + { NET_IPV4_CONF_ARP_ANNOUNCE, "arp_announce" }, + { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" }, + { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, + { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, + {} +}; + +static struct trans_ctl_table trans_net_ipv4_conf_table[] = { + { NET_PROTO_CONF_ALL, "all", trans_net_ipv4_conf_vars_table }, + { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv4_conf_vars_table }, + { 0, NULL, trans_net_ipv4_conf_vars_table }, + {} +}; + + +static struct trans_ctl_table trans_net_ipv4_vs_table[] = { + { NET_IPV4_VS_AMEMTHRESH, "amemthresh" }, + { NET_IPV4_VS_DEBUG_LEVEL, "debug_level" }, + { NET_IPV4_VS_AMDROPRATE, "am_droprate" }, + { NET_IPV4_VS_DROP_ENTRY, "drop_entry" }, + { NET_IPV4_VS_DROP_PACKET, "drop_packet" }, + { NET_IPV4_VS_SECURE_TCP, "secure_tcp" }, + { NET_IPV4_VS_TO_ES, "timeout_established" }, + { NET_IPV4_VS_TO_SS, "timeout_synsent" }, + { NET_IPV4_VS_TO_SR, "timeout_synrecv" }, + { NET_IPV4_VS_TO_FW, "timeout_finwait" }, + { NET_IPV4_VS_TO_TW, "timeout_timewait" }, + { NET_IPV4_VS_TO_CL, "timeout_close" }, + { NET_IPV4_VS_TO_CW, "timeout_closewait" }, + { NET_IPV4_VS_TO_LA, "timeout_lastack" }, + { NET_IPV4_VS_TO_LI, "timeout_listen" }, + { NET_IPV4_VS_TO_SA, "timeout_synack" }, + { NET_IPV4_VS_TO_UDP, "timeout_udp" }, + { NET_IPV4_VS_TO_ICMP, "timeout_icmp" }, + { NET_IPV4_VS_CACHE_BYPASS, "cache_bypass" }, + { NET_IPV4_VS_EXPIRE_NODEST_CONN, "expire_nodest_conn" }, + { NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE, "expire_quiescent_template" }, + { NET_IPV4_VS_SYNC_THRESHOLD, "sync_threshold" }, + { NET_IPV4_VS_NAT_ICMP_SEND, "nat_icmp_send" }, + { NET_IPV4_VS_LBLC_EXPIRE, "lblc_expiration" }, + { NET_IPV4_VS_LBLCR_EXPIRE, "lblcr_expiration" }, + {} +}; + +static struct trans_ctl_table trans_net_neigh_vars_table[] = { + { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" }, + { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" }, + { NET_NEIGH_APP_SOLICIT, "app_solicit" }, + { NET_NEIGH_RETRANS_TIME, "retrans_time" }, + { NET_NEIGH_REACHABLE_TIME, "base_reachable_time" }, + { NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time" }, + { NET_NEIGH_GC_STALE_TIME, "gc_stale_time" }, + { NET_NEIGH_UNRES_QLEN, "unres_qlen" }, + { NET_NEIGH_PROXY_QLEN, "proxy_qlen" }, + { NET_NEIGH_ANYCAST_DELAY, "anycast_delay" }, + { NET_NEIGH_PROXY_DELAY, "proxy_delay" }, + { NET_NEIGH_LOCKTIME, "locktime" }, + { NET_NEIGH_GC_INTERVAL, "gc_interval" }, + { NET_NEIGH_GC_THRESH1, "gc_thresh1" }, + { NET_NEIGH_GC_THRESH2, "gc_thresh2" }, + { NET_NEIGH_GC_THRESH3, "gc_thresh3" }, + { NET_NEIGH_RETRANS_TIME_MS, "retrans_time_ms" }, + { NET_NEIGH_REACHABLE_TIME_MS, "base_reachable_time_ms" }, + {} +}; + +static struct trans_ctl_table trans_net_neigh_table[] = { + { NET_PROTO_CONF_DEFAULT, "default", trans_net_neigh_vars_table }, + { 0, NULL, trans_net_neigh_vars_table }, + {} +}; + +static struct trans_ctl_table trans_net_ipv4_netfilter_table[] = { + { NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" }, + + { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "ip_conntrack_tcp_timeout_syn_sent" }, + { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "ip_conntrack_tcp_timeout_syn_recv" }, + { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "ip_conntrack_tcp_timeout_established" }, + { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "ip_conntrack_tcp_timeout_fin_wait" }, + { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "ip_conntrack_tcp_timeout_close_wait" }, + { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "ip_conntrack_tcp_timeout_last_ack" }, + { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "ip_conntrack_tcp_timeout_time_wait" }, + { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "ip_conntrack_tcp_timeout_close" }, + + { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT, "ip_conntrack_udp_timeout" }, + { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "ip_conntrack_udp_timeout_stream" }, + { NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT, "ip_conntrack_icmp_timeout" }, + { NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT, "ip_conntrack_generic_timeout" }, + + { NET_IPV4_NF_CONNTRACK_BUCKETS, "ip_conntrack_buckets" }, + { NET_IPV4_NF_CONNTRACK_LOG_INVALID, "ip_conntrack_log_invalid" }, + { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "ip_conntrack_tcp_timeout_max_retrans" }, + { NET_IPV4_NF_CONNTRACK_TCP_LOOSE, "ip_conntrack_tcp_loose" }, + { NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, "ip_conntrack_tcp_be_liberal" }, + { NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, "ip_conntrack_tcp_max_retrans" }, + + { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "ip_conntrack_sctp_timeout_closed" }, + { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "ip_conntrack_sctp_timeout_cookie_wait" }, + { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "ip_conntrack_sctp_timeout_cookie_echoed" }, + { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "ip_conntrack_sctp_timeout_established" }, + { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "ip_conntrack_sctp_timeout_shutdown_sent" }, + { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "ip_conntrack_sctp_timeout_shutdown_recd" }, + { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "ip_conntrack_sctp_timeout_shutdown_ack_sent" }, + + { NET_IPV4_NF_CONNTRACK_COUNT, "ip_conntrack_count" }, + { NET_IPV4_NF_CONNTRACK_CHECKSUM, "ip_conntrack_checksum" }, + {} +}; + +static struct trans_ctl_table trans_net_ipv4_table[] = { + { NET_IPV4_FORWARD, "ip_forward" }, + { NET_IPV4_DYNADDR, "ip_dynaddr" }, + + { NET_IPV4_CONF, "conf", trans_net_ipv4_conf_table }, + { NET_IPV4_NEIGH, "neigh", trans_net_neigh_table }, + { NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table }, + /* NET_IPV4_FIB_HASH unused */ + { NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table }, + { NET_IPV4_VS, "vs", trans_net_ipv4_vs_table }, + + { NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" }, + { NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" }, + { NET_IPV4_TCP_SACK, "tcp_sack" }, + { NET_IPV4_TCP_RETRANS_COLLAPSE, "tcp_retrans_collapse" }, + { NET_IPV4_DEFAULT_TTL, "ip_default_ttl" }, + /* NET_IPV4_AUTOCONFIG unused */ + { NET_IPV4_NO_PMTU_DISC, "ip_no_pmtu_disc" }, + { NET_IPV4_TCP_SYN_RETRIES, "tcp_syn_retries" }, + { NET_IPV4_IPFRAG_HIGH_THRESH, "ipfrag_high_thresh" }, + { NET_IPV4_IPFRAG_LOW_THRESH, "ipfrag_low_thresh" }, + { NET_IPV4_IPFRAG_TIME, "ipfrag_time" }, + /* NET_IPV4_TCP_MAX_KA_PROBES unused */ + { NET_IPV4_TCP_KEEPALIVE_TIME, "tcp_keepalive_time" }, + { NET_IPV4_TCP_KEEPALIVE_PROBES, "tcp_keepalive_probes" }, + { NET_IPV4_TCP_RETRIES1, "tcp_retries1" }, + { NET_IPV4_TCP_RETRIES2, "tcp_retries2" }, + { NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" }, + /* NET_IPV4_IP_MASQ_DEBUG unused */ + { NET_TCP_SYNCOOKIES, "tcp_syncookies" }, + { NET_TCP_STDURG, "tcp_stdurg" }, + { NET_TCP_RFC1337, "tcp_rfc1337" }, + /* NET_TCP_SYN_TAILDROP unused */ + { NET_TCP_MAX_SYN_BACKLOG, "tcp_max_syn_backlog" }, + { NET_IPV4_LOCAL_PORT_RANGE, "ip_local_port_range" }, + { NET_IPV4_ICMP_ECHO_IGNORE_ALL, "icmp_echo_ignore_all" }, + { NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, "icmp_echo_ignore_broadcasts" }, + /* NET_IPV4_ICMP_SOURCEQUENCH_RATE unused */ + /* NET_IPV4_ICMP_DESTUNREACH_RATE unused */ + /* NET_IPV4_ICMP_TIMEEXCEED_RATE unused */ + /* NET_IPV4_ICMP_PARAMPROB_RATE unused */ + /* NET_IPV4_ICMP_ECHOREPLY_RATE unused */ + { NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, "icmp_ignore_bogus_error_responses" }, + { NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships" }, + { NET_TCP_TW_RECYCLE, "tcp_tw_recycle" }, + /* NET_IPV4_ALWAYS_DEFRAG unused */ + { NET_IPV4_TCP_KEEPALIVE_INTVL, "tcp_keepalive_intvl" }, + { NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold" }, + { NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl" }, + { NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl" }, + { NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime" }, + { NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime" }, + { NET_TCP_ORPHAN_RETRIES, "tcp_orphan_retries" }, + { NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" }, + { NET_TCP_SYNACK_RETRIES, "tcp_synack_retries" }, + { NET_TCP_MAX_ORPHANS, "tcp_max_orphans" }, + { NET_TCP_MAX_TW_BUCKETS, "tcp_max_tw_buckets" }, + { NET_TCP_FACK, "tcp_fack" }, + { NET_TCP_REORDERING, "tcp_reordering" }, + { NET_TCP_ECN, "tcp_ecn" }, + { NET_TCP_DSACK, "tcp_dsack" }, + { NET_TCP_MEM, "tcp_mem" }, + { NET_TCP_WMEM, "tcp_wmem" }, + { NET_TCP_RMEM, "tcp_rmem" }, + { NET_TCP_APP_WIN, "tcp_app_win" }, + { NET_TCP_ADV_WIN_SCALE, "tcp_adv_win_scale" }, + { NET_IPV4_NONLOCAL_BIND, "ip_nonlocal_bind" }, + { NET_IPV4_ICMP_RATELIMIT, "icmp_ratelimit" }, + { NET_IPV4_ICMP_RATEMASK, "icmp_ratemask" }, + { NET_TCP_TW_REUSE, "tcp_tw_reuse" }, + { NET_TCP_FRTO, "tcp_frto" }, + { NET_TCP_LOW_LATENCY, "tcp_low_latency" }, + { NET_IPV4_IPFRAG_SECRET_INTERVAL, "ipfrag_secret_interval" }, + { NET_IPV4_IGMP_MAX_MSF, "igmp_max_msf" }, + { NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" }, + /* NET_TCP_DEFAULT_WIN_SCALE unused */ + { NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" }, + { NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" }, + /* NET_TCP_BIC_BETA unused */ + { NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, "icmp_errors_use_inbound_ifaddr" }, + { NET_TCP_CONG_CONTROL, "tcp_congestion_control" }, + { NET_TCP_ABC, "tcp_abc" }, + { NET_IPV4_IPFRAG_MAX_DIST, "ipfrag_max_dist" }, + { NET_TCP_MTU_PROBING, "tcp_mtu_probing" }, + { NET_TCP_BASE_MSS, "tcp_base_mss" }, + { NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" }, + { NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" }, + { NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" }, + { NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" }, + { NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" }, + { NET_CIPSOV4_RBM_OPTFMT, "cipso_rbm_optfmt" }, + { NET_CIPSOV4_RBM_STRICTVALID, "cipso_rbm_strictvalid" }, + { NET_TCP_AVAIL_CONG_CONTROL, "tcp_available_congestion_control" }, + { NET_TCP_ALLOWED_CONG_CONTROL, "tcp_allowed_congestion_control" }, + { NET_TCP_MAX_SSTHRESH, "tcp_max_ssthresh" }, + { NET_TCP_FRTO_RESPONSE, "tcp_frto_response" }, + { 2088 /* NET_IPQ_QMAX */, "ip_queue_maxlen" }, + {} +}; + +static struct trans_ctl_table trans_net_ipx_table[] = { + { NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" }, + /* NET_IPX_FORWARDING unused */ + {} +}; + +static struct trans_ctl_table trans_net_atalk_table[] = { + { NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" }, + { NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" }, + { NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" }, + { NET_ATALK_AARP_RESOLVE_TIME, "aarp-resolve-time" }, + {}, +}; + +static struct trans_ctl_table trans_net_netrom_table[] = { + { NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" }, + { NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" }, + { NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" }, + { NET_NETROM_TRANSPORT_TIMEOUT, "transport_timeout" }, + { NET_NETROM_TRANSPORT_MAXIMUM_TRIES, "transport_maximum_tries" }, + { NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, "transport_acknowledge_delay" }, + { NET_NETROM_TRANSPORT_BUSY_DELAY, "transport_busy_delay" }, + { NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, "transport_requested_window_size" }, + { NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, "transport_no_activity_timeout" }, + { NET_NETROM_ROUTING_CONTROL, "routing_control" }, + { NET_NETROM_LINK_FAILS_COUNT, "link_fails_count" }, + { NET_NETROM_RESET, "reset" }, + {} +}; + +static struct trans_ctl_table trans_net_ax25_table[] = { + { NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" }, + { NET_AX25_DEFAULT_MODE, "ax25_default_mode" }, + { NET_AX25_BACKOFF_TYPE, "backoff_type" }, + { NET_AX25_CONNECT_MODE, "connect_mode" }, + { NET_AX25_STANDARD_WINDOW, "standard_window_size" }, + { NET_AX25_EXTENDED_WINDOW, "extended_window_size" }, + { NET_AX25_T1_TIMEOUT, "t1_timeout" }, + { NET_AX25_T2_TIMEOUT, "t2_timeout" }, + { NET_AX25_T3_TIMEOUT, "t3_timeout" }, + { NET_AX25_IDLE_TIMEOUT, "idle_timeout" }, + { NET_AX25_N2, "maximum_retry_count" }, + { NET_AX25_PACLEN, "maximum_packet_length" }, + { NET_AX25_PROTOCOL, "protocol" }, + { NET_AX25_DAMA_SLAVE_TIMEOUT, "dama_slave_timeout" }, + {} +}; + +static struct trans_ctl_table trans_net_bridge_table[] = { + { NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" }, + { NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" }, + { NET_BRIDGE_NF_CALL_IP6TABLES, "bridge-nf-call-ip6tables" }, + { NET_BRIDGE_NF_FILTER_VLAN_TAGGED, "bridge-nf-filter-vlan-tagged" }, + { NET_BRIDGE_NF_FILTER_PPPOE_TAGGED, "bridge-nf-filter-pppoe-tagged" }, + {} +}; + +static struct trans_ctl_table trans_net_rose_table[] = { + { NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" }, + { NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" }, + { NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" }, + { NET_ROSE_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" }, + { NET_ROSE_ACK_HOLD_BACK_TIMEOUT, "acknowledge_hold_back_timeout" }, + { NET_ROSE_ROUTING_CONTROL, "routing_control" }, + { NET_ROSE_LINK_FAIL_TIMEOUT, "link_fail_timeout" }, + { NET_ROSE_MAX_VCS, "maximum_virtual_circuits" }, + { NET_ROSE_WINDOW_SIZE, "window_size" }, + { NET_ROSE_NO_ACTIVITY_TIMEOUT, "no_activity_timeout" }, + {} +}; + +static struct trans_ctl_table trans_net_ipv6_conf_var_table[] = { + { NET_IPV6_FORWARDING, "forwarding" }, + { NET_IPV6_HOP_LIMIT, "hop_limit" }, + { NET_IPV6_MTU, "mtu" }, + { NET_IPV6_ACCEPT_RA, "accept_ra" }, + { NET_IPV6_ACCEPT_REDIRECTS, "accept_redirects" }, + { NET_IPV6_AUTOCONF, "autoconf" }, + { NET_IPV6_DAD_TRANSMITS, "dad_transmits" }, + { NET_IPV6_RTR_SOLICITS, "router_solicitations" }, + { NET_IPV6_RTR_SOLICIT_INTERVAL, "router_solicitation_interval" }, + { NET_IPV6_RTR_SOLICIT_DELAY, "router_solicitation_delay" }, + { NET_IPV6_USE_TEMPADDR, "use_tempaddr" }, + { NET_IPV6_TEMP_VALID_LFT, "temp_valid_lft" }, + { NET_IPV6_TEMP_PREFERED_LFT, "temp_prefered_lft" }, + { NET_IPV6_REGEN_MAX_RETRY, "regen_max_retry" }, + { NET_IPV6_MAX_DESYNC_FACTOR, "max_desync_factor" }, + { NET_IPV6_MAX_ADDRESSES, "max_addresses" }, + { NET_IPV6_FORCE_MLD_VERSION, "force_mld_version" }, + { NET_IPV6_ACCEPT_RA_DEFRTR, "accept_ra_defrtr" }, + { NET_IPV6_ACCEPT_RA_PINFO, "accept_ra_pinfo" }, + { NET_IPV6_ACCEPT_RA_RTR_PREF, "accept_ra_rtr_pref" }, + { NET_IPV6_RTR_PROBE_INTERVAL, "router_probe_interval" }, + { NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN, "accept_ra_rt_info_max_plen" }, + { NET_IPV6_PROXY_NDP, "proxy_ndp" }, + { NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" }, + {} +}; + +static struct trans_ctl_table trans_net_ipv6_conf_table[] = { + { NET_PROTO_CONF_ALL, "all", trans_net_ipv6_conf_var_table }, + { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv6_conf_var_table }, + { 0, NULL, trans_net_ipv6_conf_var_table }, + {} +}; + +static struct trans_ctl_table trans_net_ipv6_route_table[] = { + { NET_IPV6_ROUTE_FLUSH, "flush" }, + { NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" }, + { NET_IPV6_ROUTE_MAX_SIZE, "max_size" }, + { NET_IPV6_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" }, + { NET_IPV6_ROUTE_GC_TIMEOUT, "gc_timeout" }, + { NET_IPV6_ROUTE_GC_INTERVAL, "gc_interval" }, + { NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity" }, + { NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires" }, + { NET_IPV6_ROUTE_MIN_ADVMSS, "min_adv_mss" }, + { NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" }, + {} +}; + +static struct trans_ctl_table trans_net_ipv6_icmp_table[] = { + { NET_IPV6_ICMP_RATELIMIT, "ratelimit" }, + {} +}; + +static struct trans_ctl_table trans_net_ipv6_table[] = { + { NET_IPV6_CONF, "conf", trans_net_ipv6_conf_table }, + { NET_IPV6_NEIGH, "neigh", trans_net_neigh_table }, + { NET_IPV6_ROUTE, "route", trans_net_ipv6_route_table }, + { NET_IPV6_ICMP, "icmp", trans_net_ipv6_icmp_table }, + { NET_IPV6_BINDV6ONLY, "bindv6only" }, + { NET_IPV6_IP6FRAG_HIGH_THRESH, "ip6frag_high_thresh" }, + { NET_IPV6_IP6FRAG_LOW_THRESH, "ip6frag_low_thresh" }, + { NET_IPV6_IP6FRAG_TIME, "ip6frag_time" }, + { NET_IPV6_IP6FRAG_SECRET_INTERVAL, "ip6frag_secret_interval" }, + { NET_IPV6_MLD_MAX_MSF, "mld_max_msf" }, + { 2088 /* IPQ_QMAX */, "ip6_queue_maxlen" }, + {} +}; + +static struct trans_ctl_table trans_net_x25_table[] = { + { NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" }, + { NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" }, + { NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" }, + { NET_X25_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" }, + { NET_X25_ACK_HOLD_BACK_TIMEOUT, "acknowledgement_hold_back_timeout" }, + { NET_X25_FORWARD, "x25_forward" }, + {} +}; + +static struct trans_ctl_table trans_net_tr_table[] = { + { NET_TR_RIF_TIMEOUT, "rif_timeout" }, + {} +}; + + +static struct trans_ctl_table trans_net_decnet_conf_vars[] = { + { NET_DECNET_CONF_DEV_FORWARDING, "forwarding" }, + { NET_DECNET_CONF_DEV_PRIORITY, "priority" }, + { NET_DECNET_CONF_DEV_T2, "t2" }, + { NET_DECNET_CONF_DEV_T3, "t3" }, + {} +}; + +static struct trans_ctl_table trans_net_decnet_conf[] = { + { 0, NULL, trans_net_decnet_conf_vars }, + {} +}; + +static struct trans_ctl_table trans_net_decnet_table[] = { + { NET_DECNET_CONF, "conf", trans_net_decnet_conf }, + { NET_DECNET_NODE_ADDRESS, "node_address" }, + { NET_DECNET_NODE_NAME, "node_name" }, + { NET_DECNET_DEFAULT_DEVICE, "default_device" }, + { NET_DECNET_TIME_WAIT, "time_wait" }, + { NET_DECNET_DN_COUNT, "dn_count" }, + { NET_DECNET_DI_COUNT, "di_count" }, + { NET_DECNET_DR_COUNT, "dr_count" }, + { NET_DECNET_DST_GC_INTERVAL, "dst_gc_interval" }, + { NET_DECNET_NO_FC_MAX_CWND, "no_fc_max_cwnd" }, + { NET_DECNET_MEM, "decnet_mem" }, + { NET_DECNET_RMEM, "decnet_rmem" }, + { NET_DECNET_WMEM, "decnet_wmem" }, + { NET_DECNET_DEBUG_LEVEL, "debug" }, + {} +}; + +static struct trans_ctl_table trans_net_sctp_table[] = { + { NET_SCTP_RTO_INITIAL, "rto_initial" }, + { NET_SCTP_RTO_MIN, "rto_min" }, + { NET_SCTP_RTO_MAX, "rto_max" }, + { NET_SCTP_RTO_ALPHA, "rto_alpha_exp_divisor" }, + { NET_SCTP_RTO_BETA, "rto_beta_exp_divisor" }, + { NET_SCTP_VALID_COOKIE_LIFE, "valid_cookie_life" }, + { NET_SCTP_ASSOCIATION_MAX_RETRANS, "association_max_retrans" }, + { NET_SCTP_PATH_MAX_RETRANS, "path_max_retrans" }, + { NET_SCTP_MAX_INIT_RETRANSMITS, "max_init_retransmits" }, + { NET_SCTP_HB_INTERVAL, "hb_interval" }, + { NET_SCTP_PRESERVE_ENABLE, "cookie_preserve_enable" }, + { NET_SCTP_MAX_BURST, "max_burst" }, + { NET_SCTP_ADDIP_ENABLE, "addip_enable" }, + { NET_SCTP_PRSCTP_ENABLE, "prsctp_enable" }, + { NET_SCTP_SNDBUF_POLICY, "sndbuf_policy" }, + { NET_SCTP_SACK_TIMEOUT, "sack_timeout" }, + { NET_SCTP_RCVBUF_POLICY, "rcvbuf_policy" }, + {} +}; + +static struct trans_ctl_table trans_net_llc_llc2_timeout_table[] = { + { NET_LLC2_ACK_TIMEOUT, "ack" }, + { NET_LLC2_P_TIMEOUT, "p" }, + { NET_LLC2_REJ_TIMEOUT, "rej" }, + { NET_LLC2_BUSY_TIMEOUT, "busy" }, + {} +}; + +static struct trans_ctl_table trans_net_llc_station_table[] = { + { NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" }, + {} +}; + +static struct trans_ctl_table trans_net_llc_llc2_table[] = { + { NET_LLC2, "timeout", trans_net_llc_llc2_timeout_table }, + {} +}; + +static struct trans_ctl_table trans_net_llc_table[] = { + { NET_LLC2, "llc2", trans_net_llc_llc2_table }, + { NET_LLC_STATION, "station", trans_net_llc_station_table }, + {} +}; + +static struct trans_ctl_table trans_net_netfilter_table[] = { + { NET_NF_CONNTRACK_MAX, "nf_conntrack_max" }, + { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "nf_conntrack_tcp_timeout_syn_sent" }, + { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "nf_conntrack_tcp_timeout_syn_recv" }, + { NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "nf_conntrack_tcp_timeout_established" }, + { NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "nf_conntrack_tcp_timeout_fin_wait" }, + { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "nf_conntrack_tcp_timeout_close_wait" }, + { NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "nf_conntrack_tcp_timeout_last_ack" }, + { NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "nf_conntrack_tcp_timeout_time_wait" }, + { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "nf_conntrack_tcp_timeout_close" }, + { NET_NF_CONNTRACK_UDP_TIMEOUT, "nf_conntrack_udp_timeout" }, + { NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "nf_conntrack_udp_timeout_stream" }, + { NET_NF_CONNTRACK_ICMP_TIMEOUT, "nf_conntrack_icmp_timeout" }, + { NET_NF_CONNTRACK_GENERIC_TIMEOUT, "nf_conntrack_generic_timeout" }, + { NET_NF_CONNTRACK_BUCKETS, "nf_conntrack_buckets" }, + { NET_NF_CONNTRACK_LOG_INVALID, "nf_conntrack_log_invalid" }, + { NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "nf_conntrack_tcp_timeout_max_retrans" }, + { NET_NF_CONNTRACK_TCP_LOOSE, "nf_conntrack_tcp_loose" }, + { NET_NF_CONNTRACK_TCP_BE_LIBERAL, "nf_conntrack_tcp_be_liberal" }, + { NET_NF_CONNTRACK_TCP_MAX_RETRANS, "nf_conntrack_tcp_max_retrans" }, + { NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "nf_conntrack_sctp_timeout_closed" }, + { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "nf_conntrack_sctp_timeout_cookie_wait" }, + { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "nf_conntrack_sctp_timeout_cookie_echoed" }, + { NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "nf_conntrack_sctp_timeout_established" }, + { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "nf_conntrack_sctp_timeout_shutdown_sent" }, + { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "nf_conntrack_sctp_timeout_shutdown_recd" }, + { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "nf_conntrack_sctp_timeout_shutdown_ack_sent" }, + { NET_NF_CONNTRACK_COUNT, "nf_conntrack_count" }, + { NET_NF_CONNTRACK_ICMPV6_TIMEOUT, "nf_conntrack_icmpv6_timeout" }, + { NET_NF_CONNTRACK_FRAG6_TIMEOUT, "nf_conntrack_frag6_timeout" }, + { NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" }, + { NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" }, + { NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" }, + + {} +}; + +static struct trans_ctl_table trans_net_dccp_table[] = { + { NET_DCCP_DEFAULT, "default" }, + {} +}; + +static struct trans_ctl_table trans_net_irda_table[] = { + { NET_IRDA_DISCOVERY, "discovery" }, + { NET_IRDA_DEVNAME, "devname" }, + { NET_IRDA_DEBUG, "debug" }, + { NET_IRDA_FAST_POLL, "fast_poll_increase" }, + { NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" }, + { NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" }, + { NET_IRDA_SLOT_TIMEOUT, "slot_timeout" }, + { NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" }, + { NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" }, + { NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" }, + { NET_IRDA_MAX_TX_WINDOW, "max_tx_window" }, + { NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" }, + { NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" }, + { NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" }, + {} +}; + +static struct trans_ctl_table trans_net_table[] = { + { NET_CORE, "core", trans_net_core_table }, + /* NET_ETHER not used */ + /* NET_802 not used */ + { NET_UNIX, "unix", trans_net_unix_table }, + { NET_IPV4, "ipv4", trans_net_ipv4_table }, + { NET_IPX, "ipx", trans_net_ipx_table }, + { NET_ATALK, "atalk", trans_net_atalk_table }, + { NET_NETROM, "netrom", trans_net_netrom_table }, + { NET_AX25, "ax25", trans_net_ax25_table }, + { NET_BRIDGE, "bridge", trans_net_bridge_table }, + { NET_ROSE, "rose", trans_net_rose_table }, + { NET_IPV6, "ipv6", trans_net_ipv6_table }, + { NET_X25, "x25", trans_net_x25_table }, + { NET_TR, "tr", trans_net_tr_table }, + { NET_DECNET, "decnet", trans_net_decnet_table }, + /* NET_ECONET not used */ + { NET_SCTP, "sctp", trans_net_sctp_table }, + { NET_LLC, "llc", trans_net_llc_table }, + { NET_NETFILTER, "netfilter", trans_net_netfilter_table }, + { NET_DCCP, "dccp", trans_net_dccp_table }, + { NET_IRDA, "irda", trans_net_irda_table }, + { 2089, "nf_conntrack_max" }, + {} +}; + +static struct trans_ctl_table trans_fs_quota_table[] = { + { FS_DQ_LOOKUPS, "lookups" }, + { FS_DQ_DROPS, "drops" }, + { FS_DQ_READS, "reads" }, + { FS_DQ_WRITES, "writes" }, + { FS_DQ_CACHE_HITS, "cache_hits" }, + { FS_DQ_ALLOCATED, "allocated_dquots" }, + { FS_DQ_FREE, "free_dquots" }, + { FS_DQ_SYNCS, "syncs" }, + { FS_DQ_WARNINGS, "warnings" }, + {} +}; + +static struct trans_ctl_table trans_fs_xfs_table[] = { + { XFS_RESTRICT_CHOWN, "restrict_chown" }, + { XFS_SGID_INHERIT, "irix_sgid_inherit" }, + { XFS_SYMLINK_MODE, "irix_symlink_mode" }, + { XFS_PANIC_MASK, "panic_mask" }, + + { XFS_ERRLEVEL, "error_level" }, + { XFS_SYNCD_TIMER, "xfssyncd_centisecs" }, + { XFS_INHERIT_SYNC, "inherit_sync" }, + { XFS_INHERIT_NODUMP, "inherit_nodump" }, + { XFS_INHERIT_NOATIME, "inherit_noatime" }, + { XFS_BUF_TIMER, "xfsbufd_centisecs" }, + { XFS_BUF_AGE, "age_buffer_centisecs" }, + { XFS_INHERIT_NOSYM, "inherit_nosymlinks" }, + { XFS_ROTORSTEP, "rotorstep" }, + { XFS_INHERIT_NODFRG, "inherit_nodefrag" }, + { XFS_FILESTREAM_TIMER, "filestream_centisecs" }, + { XFS_STATS_CLEAR, "stats_clear" }, + {} +}; + +static struct trans_ctl_table trans_fs_ocfs2_nm_table[] = { + { 1, "hb_ctl_path" }, + {} +}; + +static struct trans_ctl_table trans_fs_ocfs2_table[] = { + { 1, "nm", trans_fs_ocfs2_nm_table }, + {} +}; + +static struct trans_ctl_table trans_inotify_table[] = { + { INOTIFY_MAX_USER_INSTANCES, "max_user_instances" }, + { INOTIFY_MAX_USER_WATCHES, "max_user_watches" }, + { INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" }, + {} +}; + +static struct trans_ctl_table trans_fs_table[] = { + { FS_NRINODE, "inode-nr" }, + { FS_STATINODE, "inode-state" }, + /* FS_MAXINODE unused */ + /* FS_NRDQUOT unused */ + /* FS_MAXDQUOT unused */ + { FS_NRFILE, "file-nr" }, + { FS_MAXFILE, "file-max" }, + { FS_DENTRY, "dentry-state" }, + /* FS_NRSUPER unused */ + /* FS_MAXUPSER unused */ + { FS_OVERFLOWUID, "overflowuid" }, + { FS_OVERFLOWGID, "overflowgid" }, + { FS_LEASES, "leases-enable" }, + { FS_DIR_NOTIFY, "dir-notify-enable" }, + { FS_LEASE_TIME, "lease-break-time" }, + { FS_DQSTATS, "quota", trans_fs_quota_table }, + { FS_XFS, "xfs", trans_fs_xfs_table }, + { FS_AIO_NR, "aio-nr" }, + { FS_AIO_MAX_NR, "aio-max-nr" }, + { FS_INOTIFY, "inotify", trans_inotify_table }, + { FS_OCFS2, "ocfs2", trans_fs_ocfs2_table }, + { KERN_SETUID_DUMPABLE, "suid_dumpable" }, + {} +}; + +static struct trans_ctl_table trans_debug_table[] = { + {} +}; + +static struct trans_ctl_table trans_cdrom_table[] = { + { DEV_CDROM_INFO, "info" }, + { DEV_CDROM_AUTOCLOSE, "autoclose" }, + { DEV_CDROM_AUTOEJECT, "autoeject" }, + { DEV_CDROM_DEBUG, "debug" }, + { DEV_CDROM_LOCK, "lock" }, + { DEV_CDROM_CHECK_MEDIA, "check_media" }, + {} +}; + +static struct trans_ctl_table trans_ipmi_table[] = { + { DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" }, + {} +}; + +static struct trans_ctl_table trans_mac_hid_files[] = { + /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */ + /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */ + { DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" }, + { DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE, "mouse_button2_keycode" }, + { DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE, "mouse_button3_keycode" }, + /* DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES unused */ + {} +}; + +static struct trans_ctl_table trans_raid_table[] = { + { DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" }, + { DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" }, + {} +}; + +static struct trans_ctl_table trans_scsi_table[] = { + { DEV_SCSI_LOGGING_LEVEL, "logging_level" }, + {} +}; + +static struct trans_ctl_table trans_parport_default_table[] = { + { DEV_PARPORT_DEFAULT_TIMESLICE, "timeslice" }, + { DEV_PARPORT_DEFAULT_SPINTIME, "spintime" }, + {} +}; + +static struct trans_ctl_table trans_parport_device_table[] = { + { DEV_PARPORT_DEVICE_TIMESLICE, "timeslice" }, + {} +}; + +static struct trans_ctl_table trans_parport_devices_table[] = { + { DEV_PARPORT_DEVICES_ACTIVE, "active" }, + { 0, NULL, trans_parport_device_table }, + {} +}; + +static struct trans_ctl_table trans_parport_parport_table[] = { + { DEV_PARPORT_SPINTIME, "spintime" }, + { DEV_PARPORT_BASE_ADDR, "base-addr" }, + { DEV_PARPORT_IRQ, "irq" }, + { DEV_PARPORT_DMA, "dma" }, + { DEV_PARPORT_MODES, "modes" }, + { DEV_PARPORT_DEVICES, "devices", trans_parport_devices_table }, + { DEV_PARPORT_AUTOPROBE, "autoprobe" }, + { DEV_PARPORT_AUTOPROBE + 1, "autoprobe0" }, + { DEV_PARPORT_AUTOPROBE + 2, "autoprobe1" }, + { DEV_PARPORT_AUTOPROBE + 3, "autoprobe2" }, + { DEV_PARPORT_AUTOPROBE + 4, "autoprobe3" }, + {} +}; +static struct trans_ctl_table trans_parport_table[] = { + { DEV_PARPORT_DEFAULT, "default", trans_parport_default_table }, + { 0, NULL, trans_parport_parport_table }, + {} +}; + +static struct trans_ctl_table trans_dev_table[] = { + { DEV_CDROM, "cdrom", trans_cdrom_table }, + /* DEV_HWMON unused */ + { DEV_PARPORT, "parport", trans_parport_table }, + { DEV_RAID, "raid", trans_raid_table }, + { DEV_MAC_HID, "mac_hid", trans_mac_hid_files }, + { DEV_SCSI, "scsi", trans_scsi_table }, + { DEV_IPMI, "ipmi", trans_ipmi_table }, + {} +}; + +static struct trans_ctl_table trans_bus_isa_table[] = { + { BUS_ISA_MEM_BASE, "membase" }, + { BUS_ISA_PORT_BASE, "portbase" }, + { BUS_ISA_PORT_SHIFT, "portshift" }, + {} +}; + +static struct trans_ctl_table trans_bus_table[] = { + { CTL_BUS_ISA, "isa", trans_bus_isa_table }, + {} +}; + +static struct trans_ctl_table trans_arlan_conf_table0[] = { + { 1, "spreadingCode" }, + { 2, "channelNumber" }, + { 3, "scramblingDisable" }, + { 4, "txAttenuation" }, + { 5, "systemId" }, + { 6, "maxDatagramSize" }, + { 7, "maxFrameSize" }, + { 8, "maxRetries" }, + { 9, "receiveMode" }, + { 10, "priority" }, + { 11, "rootOrRepeater" }, + { 12, "SID" }, + { 13, "registrationMode" }, + { 14, "registrationFill" }, + { 15, "localTalkAddress" }, + { 16, "codeFormat" }, + { 17, "numChannels" }, + { 18, "channel1" }, + { 19, "channel2" }, + { 20, "channel3" }, + { 21, "channel4" }, + { 22, "txClear" }, + { 23, "txRetries" }, + { 24, "txRouting" }, + { 25, "txScrambled" }, + { 26, "rxParameter" }, + { 27, "txTimeoutMs" }, + { 28, "waitCardTimeout" }, + { 29, "channelSet" }, + { 30, "name" }, + { 31, "waitTime" }, + { 32, "lParameter" }, + { 33, "_15" }, + { 34, "headerSize" }, + { 36, "tx_delay_ms" }, + { 37, "retries" }, + { 38, "ReTransmitPacketMaxSize" }, + { 39, "waitReTransmitPacketMaxSize" }, + { 40, "fastReTransCount" }, + { 41, "driverRetransmissions" }, + { 42, "txAckTimeoutMs" }, + { 43, "registrationInterrupts" }, + { 44, "hardwareType" }, + { 45, "radioType" }, + { 46, "writeEEPROM" }, + { 47, "writeRadioType" }, + { 48, "entry_exit_debug" }, + { 49, "debug" }, + { 50, "in_speed" }, + { 51, "out_speed" }, + { 52, "in_speed10" }, + { 53, "out_speed10" }, + { 54, "in_speed_max" }, + { 55, "out_speed_max" }, + { 56, "measure_rate" }, + { 57, "pre_Command_Wait" }, + { 58, "rx_tweak1" }, + { 59, "rx_tweak2" }, + { 60, "tx_queue_len" }, + + { 150, "arlan0-txRing" }, + { 151, "arlan0-rxRing" }, + { 152, "arlan0-18" }, + { 153, "arlan0-ring" }, + { 154, "arlan0-shm-cpy" }, + { 155, "config0" }, + { 156, "reset0" }, + {} +}; + +static struct trans_ctl_table trans_arlan_conf_table1[] = { + { 1, "spreadingCode" }, + { 2, "channelNumber" }, + { 3, "scramblingDisable" }, + { 4, "txAttenuation" }, + { 5, "systemId" }, + { 6, "maxDatagramSize" }, + { 7, "maxFrameSize" }, + { 8, "maxRetries" }, + { 9, "receiveMode" }, + { 10, "priority" }, + { 11, "rootOrRepeater" }, + { 12, "SID" }, + { 13, "registrationMode" }, + { 14, "registrationFill" }, + { 15, "localTalkAddress" }, + { 16, "codeFormat" }, + { 17, "numChannels" }, + { 18, "channel1" }, + { 19, "channel2" }, + { 20, "channel3" }, + { 21, "channel4" }, + { 22, "txClear" }, + { 23, "txRetries" }, + { 24, "txRouting" }, + { 25, "txScrambled" }, + { 26, "rxParameter" }, + { 27, "txTimeoutMs" }, + { 28, "waitCardTimeout" }, + { 29, "channelSet" }, + { 30, "name" }, + { 31, "waitTime" }, + { 32, "lParameter" }, + { 33, "_15" }, + { 34, "headerSize" }, + { 36, "tx_delay_ms" }, + { 37, "retries" }, + { 38, "ReTransmitPacketMaxSize" }, + { 39, "waitReTransmitPacketMaxSize" }, + { 40, "fastReTransCount" }, + { 41, "driverRetransmissions" }, + { 42, "txAckTimeoutMs" }, + { 43, "registrationInterrupts" }, + { 44, "hardwareType" }, + { 45, "radioType" }, + { 46, "writeEEPROM" }, + { 47, "writeRadioType" }, + { 48, "entry_exit_debug" }, + { 49, "debug" }, + { 50, "in_speed" }, + { 51, "out_speed" }, + { 52, "in_speed10" }, + { 53, "out_speed10" }, + { 54, "in_speed_max" }, + { 55, "out_speed_max" }, + { 56, "measure_rate" }, + { 57, "pre_Command_Wait" }, + { 58, "rx_tweak1" }, + { 59, "rx_tweak2" }, + { 60, "tx_queue_len" }, + + { 150, "arlan1-txRing" }, + { 151, "arlan1-rxRing" }, + { 152, "arlan1-18" }, + { 153, "arlan1-ring" }, + { 154, "arlan1-shm-cpy" }, + { 155, "config1" }, + { 156, "reset1" }, + {} +}; + +static struct trans_ctl_table trans_arlan_conf_table2[] = { + { 1, "spreadingCode" }, + { 2, "channelNumber" }, + { 3, "scramblingDisable" }, + { 4, "txAttenuation" }, + { 5, "systemId" }, + { 6, "maxDatagramSize" }, + { 7, "maxFrameSize" }, + { 8, "maxRetries" }, + { 9, "receiveMode" }, + { 10, "priority" }, + { 11, "rootOrRepeater" }, + { 12, "SID" }, + { 13, "registrationMode" }, + { 14, "registrationFill" }, + { 15, "localTalkAddress" }, + { 16, "codeFormat" }, + { 17, "numChannels" }, + { 18, "channel1" }, + { 19, "channel2" }, + { 20, "channel3" }, + { 21, "channel4" }, + { 22, "txClear" }, + { 23, "txRetries" }, + { 24, "txRouting" }, + { 25, "txScrambled" }, + { 26, "rxParameter" }, + { 27, "txTimeoutMs" }, + { 28, "waitCardTimeout" }, + { 29, "channelSet" }, + { 30, "name" }, + { 31, "waitTime" }, + { 32, "lParameter" }, + { 33, "_15" }, + { 34, "headerSize" }, + { 36, "tx_delay_ms" }, + { 37, "retries" }, + { 38, "ReTransmitPacketMaxSize" }, + { 39, "waitReTransmitPacketMaxSize" }, + { 40, "fastReTransCount" }, + { 41, "driverRetransmissions" }, + { 42, "txAckTimeoutMs" }, + { 43, "registrationInterrupts" }, + { 44, "hardwareType" }, + { 45, "radioType" }, + { 46, "writeEEPROM" }, + { 47, "writeRadioType" }, + { 48, "entry_exit_debug" }, + { 49, "debug" }, + { 50, "in_speed" }, + { 51, "out_speed" }, + { 52, "in_speed10" }, + { 53, "out_speed10" }, + { 54, "in_speed_max" }, + { 55, "out_speed_max" }, + { 56, "measure_rate" }, + { 57, "pre_Command_Wait" }, + { 58, "rx_tweak1" }, + { 59, "rx_tweak2" }, + { 60, "tx_queue_len" }, + + { 150, "arlan2-txRing" }, + { 151, "arlan2-rxRing" }, + { 152, "arlan2-18" }, + { 153, "arlan2-ring" }, + { 154, "arlan2-shm-cpy" }, + { 155, "config2" }, + { 156, "reset2" }, + {} +}; + +static struct trans_ctl_table trans_arlan_conf_table3[] = { + { 1, "spreadingCode" }, + { 2, "channelNumber" }, + { 3, "scramblingDisable" }, + { 4, "txAttenuation" }, + { 5, "systemId" }, + { 6, "maxDatagramSize" }, + { 7, "maxFrameSize" }, + { 8, "maxRetries" }, + { 9, "receiveMode" }, + { 10, "priority" }, + { 11, "rootOrRepeater" }, + { 12, "SID" }, + { 13, "registrationMode" }, + { 14, "registrationFill" }, + { 15, "localTalkAddress" }, + { 16, "codeFormat" }, + { 17, "numChannels" }, + { 18, "channel1" }, + { 19, "channel2" }, + { 20, "channel3" }, + { 21, "channel4" }, + { 22, "txClear" }, + { 23, "txRetries" }, + { 24, "txRouting" }, + { 25, "txScrambled" }, + { 26, "rxParameter" }, + { 27, "txTimeoutMs" }, + { 28, "waitCardTimeout" }, + { 29, "channelSet" }, + { 30, "name" }, + { 31, "waitTime" }, + { 32, "lParameter" }, + { 33, "_15" }, + { 34, "headerSize" }, + { 36, "tx_delay_ms" }, + { 37, "retries" }, + { 38, "ReTransmitPacketMaxSize" }, + { 39, "waitReTransmitPacketMaxSize" }, + { 40, "fastReTransCount" }, + { 41, "driverRetransmissions" }, + { 42, "txAckTimeoutMs" }, + { 43, "registrationInterrupts" }, + { 44, "hardwareType" }, + { 45, "radioType" }, + { 46, "writeEEPROM" }, + { 47, "writeRadioType" }, + { 48, "entry_exit_debug" }, + { 49, "debug" }, + { 50, "in_speed" }, + { 51, "out_speed" }, + { 52, "in_speed10" }, + { 53, "out_speed10" }, + { 54, "in_speed_max" }, + { 55, "out_speed_max" }, + { 56, "measure_rate" }, + { 57, "pre_Command_Wait" }, + { 58, "rx_tweak1" }, + { 59, "rx_tweak2" }, + { 60, "tx_queue_len" }, + + { 150, "arlan3-txRing" }, + { 151, "arlan3-rxRing" }, + { 152, "arlan3-18" }, + { 153, "arlan3-ring" }, + { 154, "arlan3-shm-cpy" }, + { 155, "config3" }, + { 156, "reset3" }, + {} +}; + +static struct trans_ctl_table trans_arlan_table[] = { + { 1, "arlan0", trans_arlan_conf_table0 }, + { 2, "arlan1", trans_arlan_conf_table1 }, + { 3, "arlan2", trans_arlan_conf_table2 }, + { 4, "arlan3", trans_arlan_conf_table3 }, + {} +}; + +static struct trans_ctl_table trans_appldata_table[] = { + { CTL_APPLDATA_TIMER, "timer" }, + { CTL_APPLDATA_INTERVAL, "interval" }, + { CTL_APPLDATA_OS, "os" }, + { CTL_APPLDATA_NET_SUM, "net_sum" }, + { CTL_APPLDATA_MEM, "mem" }, + {} + +}; + +static struct trans_ctl_table trans_s390dbf_table[] = { + { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" }, + { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" }, + {} +}; + +static struct trans_ctl_table trans_sunrpc_table[] = { + { CTL_RPCDEBUG, "rpc_debug" }, + { CTL_NFSDEBUG, "nfs_debug" }, + { CTL_NFSDDEBUG, "nfsd_debug" }, + { CTL_NLMDEBUG, "nlm_debug" }, + { CTL_SLOTTABLE_UDP, "udp_slot_table_entries" }, + { CTL_SLOTTABLE_TCP, "tcp_slot_table_entries" }, + { CTL_MIN_RESVPORT, "min_resvport" }, + { CTL_MAX_RESVPORT, "max_resvport" }, + {} +}; + +static struct trans_ctl_table trans_pm_table[] = { + { 1 /* CTL_PM_SUSPEND */, "suspend" }, + { 2 /* CTL_PM_CMODE */, "cmode" }, + { 3 /* CTL_PM_P0 */, "p0" }, + { 4 /* CTL_PM_CM */, "cm" }, + {} +}; + +static struct trans_ctl_table trans_frv_table[] = { + { 1, "cache-mode" }, + { 2, "pin-cxnr" }, + {} +}; + +static struct trans_ctl_table trans_root_table[] = { + { CTL_KERN, "kernel", trans_kern_table }, + { CTL_VM, "vm", trans_vm_table }, + { CTL_NET, "net", trans_net_table }, + /* CTL_PROC not used */ + { CTL_FS, "fs", trans_fs_table }, + { CTL_DEBUG, "debug", trans_debug_table }, + { CTL_DEV, "dev", trans_dev_table }, + { CTL_BUS, "bus", trans_bus_table }, + { CTL_ABI, "abi" }, + /* CTL_CPU not used */ + { CTL_ARLAN, "arlan", trans_arlan_table }, + { CTL_APPLDATA, "appldata", trans_appldata_table }, + { CTL_S390DBF, "s390dbf", trans_s390dbf_table }, + { CTL_SUNRPC, "sunrpc", trans_sunrpc_table }, + { CTL_PM, "pm", trans_pm_table }, + { CTL_FRV, "frv", trans_frv_table }, + {} +}; + + + + +static int sysctl_depth(struct ctl_table *table) +{ + struct ctl_table *tmp; + int depth; + + depth = 0; + for (tmp = table; tmp->parent; tmp = tmp->parent) + depth++; + + return depth; +} + +static struct ctl_table *sysctl_parent(struct ctl_table *table, int n) +{ + int i; + + for (i = 0; table && i < n; i++) + table = table->parent; + + return table; +} + +static struct trans_ctl_table *sysctl_binary_lookup(struct ctl_table *table) +{ + struct ctl_table *test; + struct trans_ctl_table *ref; + int depth, cur_depth; + + depth = sysctl_depth(table); + + cur_depth = depth; + ref = trans_root_table; +repeat: + test = sysctl_parent(table, cur_depth); + for (; ref->ctl_name || ref->procname || ref->child; ref++) { + int match = 0; + + if (cur_depth && !ref->child) + continue; + + if (test->procname && ref->procname && + (strcmp(test->procname, ref->procname) == 0)) + match++; + + if (test->ctl_name && ref->ctl_name && + (test->ctl_name == ref->ctl_name)) + match++; + + if (!ref->ctl_name && !ref->procname) + match++; + + if (match) { + if (cur_depth != 0) { + cur_depth--; + ref = ref->child; + goto repeat; + } + goto out; + } + } + ref = NULL; +out: + return ref; +} + +static void sysctl_print_path(struct ctl_table *table) +{ + struct ctl_table *tmp; + int depth, i; + depth = sysctl_depth(table); + if (table->procname) { + for (i = depth; i >= 0; i--) { + tmp = sysctl_parent(table, i); + printk("/%s", tmp->procname?tmp->procname:""); + } + } + printk(" "); + if (table->ctl_name) { + for (i = depth; i >= 0; i--) { + tmp = sysctl_parent(table, i); + printk(".%d", tmp->ctl_name); + } + } +} + +static void sysctl_repair_table(struct ctl_table *table) +{ + /* Don't complain about the classic default + * sysctl strategy routine. Maybe later we + * can get the tables fixed and complain about + * this. + */ + if (table->ctl_name && table->procname && + (table->proc_handler == proc_dointvec) && + (!table->strategy)) { + table->strategy = sysctl_data; + } +} + +static struct ctl_table *sysctl_check_lookup(struct ctl_table *table) +{ + struct ctl_table_header *head; + struct ctl_table *ref, *test; + int depth, cur_depth; + + depth = sysctl_depth(table); + + for (head = sysctl_head_next(NULL); head; + head = sysctl_head_next(head)) { + cur_depth = depth; + ref = head->ctl_table; +repeat: + test = sysctl_parent(table, cur_depth); + for (; ref->ctl_name || ref->procname; ref++) { + int match = 0; + if (cur_depth && !ref->child) + continue; + + if (test->procname && ref->procname && + (strcmp(test->procname, ref->procname) == 0)) + match++; + + if (test->ctl_name && ref->ctl_name && + (test->ctl_name == ref->ctl_name)) + match++; + + if (match) { + if (cur_depth != 0) { + cur_depth--; + ref = ref->child; + goto repeat; + } + goto out; + } + } + } + ref = NULL; +out: + sysctl_head_finish(head); + return ref; +} + +static void set_fail(const char **fail, struct ctl_table *table, const char *str) +{ + if (*fail) { + printk(KERN_ERR "sysctl table check failed: "); + sysctl_print_path(table); + printk(" %s\n", *fail); + } + *fail = str; +} + +static int sysctl_check_dir(struct ctl_table *table) +{ + struct ctl_table *ref; + int error; + + error = 0; + ref = sysctl_check_lookup(table); + if (ref) { + int match = 0; + if ((!table->procname && !ref->procname) || + (table->procname && ref->procname && + (strcmp(table->procname, ref->procname) == 0))) + match++; + + if ((!table->ctl_name && !ref->ctl_name) || + (table->ctl_name && ref->ctl_name && + (table->ctl_name == ref->ctl_name))) + match++; + + if (match != 2) { + printk(KERN_ERR "%s: failed: ", __func__); + sysctl_print_path(table); + printk(" ref: "); + sysctl_print_path(ref); + printk("\n"); + error = -EINVAL; + } + } + return error; +} + +static void sysctl_check_leaf(struct ctl_table *table, const char **fail) +{ + struct ctl_table *ref; + + ref = sysctl_check_lookup(table); + if (ref && (ref != table)) + set_fail(fail, table, "Sysctl already exists"); +} + +static void sysctl_check_bin_path(struct ctl_table *table, const char **fail) +{ + struct trans_ctl_table *ref; + + ref = sysctl_binary_lookup(table); + if (table->ctl_name && !ref) + set_fail(fail, table, "Unknown sysctl binary path"); + if (ref) { + if (ref->procname && + (!table->procname || + (strcmp(table->procname, ref->procname) != 0))) + set_fail(fail, table, "procname does not match binary path procname"); + + if (ref->ctl_name && table->ctl_name && + (table->ctl_name != ref->ctl_name)) + set_fail(fail, table, "ctl_name does not match binary path ctl_name"); + } +} + +int sysctl_check_table(struct ctl_table *table) +{ + int error = 0; + for (; table->ctl_name || table->procname; table++) { + const char *fail = NULL; + + sysctl_repair_table(table); + if (table->parent) { + if (table->procname && !table->parent->procname) + set_fail(&fail, table, "Parent without procname"); + if (table->ctl_name && !table->parent->ctl_name) + set_fail(&fail, table, "Parent without ctl_name"); + } + if (!table->procname) + set_fail(&fail, table, "No procname"); + if (table->child) { + if (table->data) + set_fail(&fail, table, "Directory with data?"); + if (table->maxlen) + set_fail(&fail, table, "Directory with maxlen?"); + if ((table->mode & (S_IRUGO|S_IXUGO)) != table->mode) + set_fail(&fail, table, "Writable sysctl directory"); + if (table->proc_handler) + set_fail(&fail, table, "Directory with proc_handler"); + if (table->strategy) + set_fail(&fail, table, "Directory with strategy"); + if (table->extra1) + set_fail(&fail, table, "Directory with extra1"); + if (table->extra2) + set_fail(&fail, table, "Directory with extra2"); + if (sysctl_check_dir(table)) + set_fail(&fail, table, "Inconsistent directory names"); + } else { + if ((table->strategy == sysctl_data) || + (table->strategy == sysctl_string) || + (table->strategy == sysctl_intvec) || + (table->strategy == sysctl_jiffies) || + (table->strategy == sysctl_ms_jiffies) || + (table->proc_handler == proc_dostring) || + (table->proc_handler == proc_dointvec) || +#ifdef CONFIG_SECURITY_CAPABILITIES + (table->proc_handler == proc_dointvec_bset) || +#endif /* def CONFIG_SECURITY_CAPABILITIES */ + (table->proc_handler == proc_dointvec_minmax) || + (table->proc_handler == proc_dointvec_jiffies) || + (table->proc_handler == proc_dointvec_userhz_jiffies) || + (table->proc_handler == proc_dointvec_ms_jiffies) || + (table->proc_handler == proc_doulongvec_minmax) || + (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { + if (!table->data) + set_fail(&fail, table, "No data"); + if (!table->maxlen) + set_fail(&fail, table, "No maxlen"); + } + if ((table->proc_handler == proc_doulongvec_minmax) || + (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) { + if (table->maxlen > sizeof (unsigned long)) { + if (!table->extra1) + set_fail(&fail, table, "No min"); + if (!table->extra2) + set_fail(&fail, table, "No max"); + } + } +#ifdef CONFIG_SYSCTL_SYSCALL + if (table->ctl_name && !table->strategy) + set_fail(&fail, table, "Missing strategy"); +#endif +#if 0 + if (!table->ctl_name && table->strategy) + set_fail(&fail, table, "Strategy without ctl_name"); +#endif +#ifdef CONFIG_PROC_FS + if (table->procname && !table->proc_handler) + set_fail(&fail, table, "No proc_handler"); +#endif +#if 0 + if (!table->procname && table->proc_handler) + set_fail(&fail, table, "proc_handler without procname"); +#endif + sysctl_check_leaf(table, &fail); + } + sysctl_check_bin_path(table, &fail); + if (fail) { + set_fail(&fail, table, NULL); + error = -EINVAL; + } + if (table->child) + error |= sysctl_check_table(table->child); + } + return error; +} diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 7d4d7f9c1bb..9f360f68aad 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c @@ -22,6 +22,10 @@ #include <linux/delayacct.h> #include <linux/cpumask.h> #include <linux/percpu.h> +#include <linux/cgroupstats.h> +#include <linux/cgroup.h> +#include <linux/fs.h> +#include <linux/file.h> #include <net/genetlink.h> #include <asm/atomic.h> @@ -49,6 +53,11 @@ __read_mostly = { [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; +static struct nla_policy +cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] __read_mostly = { + [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, +}; + struct listener { struct list_head list; pid_t pid; @@ -372,6 +381,51 @@ err: return NULL; } +static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info) +{ + int rc = 0; + struct sk_buff *rep_skb; + struct cgroupstats *stats; + struct nlattr *na; + size_t size; + u32 fd; + struct file *file; + int fput_needed; + + na = info->attrs[CGROUPSTATS_CMD_ATTR_FD]; + if (!na) + return -EINVAL; + + fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]); + file = fget_light(fd, &fput_needed); + if (file) { + size = nla_total_size(sizeof(struct cgroupstats)); + + rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb, + size); + if (rc < 0) + goto err; + + na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS, + sizeof(struct cgroupstats)); + stats = nla_data(na); + memset(stats, 0, sizeof(*stats)); + + rc = cgroupstats_build(stats, file->f_dentry); + if (rc < 0) + goto err; + + fput_light(file, fput_needed); + return send_reply(rep_skb, info->snd_pid); + } + +err: + if (file) + fput_light(file, fput_needed); + nlmsg_free(rep_skb); + return rc; +} + static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) { int rc = 0; @@ -522,6 +576,12 @@ static struct genl_ops taskstats_ops = { .policy = taskstats_cmd_get_policy, }; +static struct genl_ops cgroupstats_ops = { + .cmd = CGROUPSTATS_CMD_GET, + .doit = cgroupstats_user_cmd, + .policy = cgroupstats_cmd_get_policy, +}; + /* Needed early in initialization */ void __init taskstats_init_early(void) { @@ -546,8 +606,15 @@ static int __init taskstats_init(void) if (rc < 0) goto err; + rc = genl_register_ops(&family, &cgroupstats_ops); + if (rc < 0) + goto err_cgroup_ops; + family_registered = 1; + printk("registered taskstats version %d\n", TASKSTATS_GENL_VERSION); return 0; +err_cgroup_ops: + genl_unregister_ops(&family, &taskstats_ops); err: genl_unregister_family(&family); return rc; diff --git a/kernel/time.c b/kernel/time.c index 2d5b6a68213..09d3c45c4da 100644 --- a/kernel/time.c +++ b/kernel/time.c @@ -9,9 +9,9 @@ */ /* * Modification history kernel/time.c - * + * * 1993-09-02 Philip Gladstone - * Created file with time related functions from sched.c and adjtimex() + * Created file with time related functions from sched.c and adjtimex() * 1993-10-08 Torsten Duwe * adjtime interface update and CMOS clock write code * 1995-08-13 Torsten Duwe @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/timex.h> #include <linux/capability.h> +#include <linux/clocksource.h> #include <linux/errno.h> #include <linux/syscalls.h> #include <linux/security.h> @@ -38,7 +39,7 @@ #include <asm/uaccess.h> #include <asm/unistd.h> -/* +/* * The timezone where the local system is located. Used as a default by some * programs who obtain this value by using gettimeofday. */ @@ -71,7 +72,7 @@ asmlinkage long sys_time(time_t __user * tloc) * why not move it into the appropriate arch directory (for those * architectures that need it). */ - + asmlinkage long sys_stime(time_t __user *tptr) { struct timespec tv; @@ -110,10 +111,10 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __us /* * Adjust the time obtained from the CMOS to be UTC time instead of * local time. - * + * * This is ugly, but preferable to the alternatives. Otherwise we * would either need to write a program to do it in /etc/rc (and risk - * confusion if the program gets run more than once; it would also be + * confusion if the program gets run more than once; it would also be * hard to make the program warp the clock precisely n hours) or * compile in the timezone information into the kernel. Bad, bad.... * @@ -158,6 +159,7 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz) if (tz) { /* SMP safe, global irq locking makes it work. */ sys_tz = *tz; + update_vsyscall_tz(); if (firsttime) { firsttime = 0; if (!tv) diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 51b6a6a6158..c8a9d13874d 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c @@ -207,15 +207,12 @@ static inline void clocksource_resume_watchdog(void) { } */ void clocksource_resume(void) { - struct list_head *tmp; + struct clocksource *cs; unsigned long flags; spin_lock_irqsave(&clocksource_lock, flags); - list_for_each(tmp, &clocksource_list) { - struct clocksource *cs; - - cs = list_entry(tmp, struct clocksource, list); + list_for_each_entry(cs, &clocksource_list, list) { if (cs->resume) cs->resume(); } @@ -369,7 +366,6 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, const char *buf, size_t count) { struct clocksource *ovr = NULL; - struct list_head *tmp; size_t ret = count; int len; @@ -389,12 +385,11 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, len = strlen(override_name); if (len) { + struct clocksource *cs; + ovr = clocksource_override; /* try to select it: */ - list_for_each(tmp, &clocksource_list) { - struct clocksource *cs; - - cs = list_entry(tmp, struct clocksource, list); + list_for_each_entry(cs, &clocksource_list, list) { if (strlen(cs->name) == len && !strcmp(cs->name, override_name)) ovr = cs; @@ -422,14 +417,11 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev, static ssize_t sysfs_show_available_clocksources(struct sys_device *dev, char *buf) { - struct list_head *tmp; + struct clocksource *src; char *curr = buf; spin_lock_irq(&clocksource_lock); - list_for_each(tmp, &clocksource_list) { - struct clocksource *src; - - src = list_entry(tmp, struct clocksource, list); + list_for_each_entry(src, &clocksource_list, list) { curr += sprintf(curr, "%s ", src->name); } spin_unlock_irq(&clocksource_lock); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index ce89ffb474d..10a1347597f 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -153,6 +153,7 @@ void tick_nohz_stop_sched_tick(void) unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags; struct tick_sched *ts; ktime_t last_update, expires, now, delta; + struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; int cpu; local_irq_save(flags); @@ -302,11 +303,26 @@ void tick_nohz_stop_sched_tick(void) out: ts->next_jiffies = next_jiffies; ts->last_jiffies = last_jiffies; + ts->sleep_length = ktime_sub(dev->next_event, now); end: local_irq_restore(flags); } /** + * tick_nohz_get_sleep_length - return the length of the current sleep + * + * Called from power state control code with interrupts disabled + */ +ktime_t tick_nohz_get_sleep_length(void) +{ + struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); + + return ts->sleep_length; +} + +EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length); + +/** * nohz_restart_sched_tick - restart the idle tick from the idle task * * Restart the idle tick when the CPU is woken up from idle diff --git a/kernel/timer.c b/kernel/timer.c index 6ce1952eea7..fb4e67d5dd6 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -26,6 +26,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/swap.h> +#include <linux/pid_namespace.h> #include <linux/notifier.h> #include <linux/thread_info.h> #include <linux/time.h> @@ -817,7 +818,7 @@ unsigned long next_timer_interrupt(void) #endif /* - * Called from the timer interrupt handler to charge one tick to the current + * Called from the timer interrupt handler to charge one tick to the current * process. user_tick is 1 if the tick is user time, 0 for system. */ void update_process_times(int user_tick) @@ -826,10 +827,13 @@ void update_process_times(int user_tick) int cpu = smp_processor_id(); /* Note: this timer irq context must be accounted for as well. */ - if (user_tick) + if (user_tick) { account_user_time(p, jiffies_to_cputime(1)); - else + account_user_time_scaled(p, jiffies_to_cputime(1)); + } else { account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); + account_system_time_scaled(p, jiffies_to_cputime(1)); + } run_local_timers(); if (rcu_pending(cpu)) rcu_check_callbacks(cpu, user_tick); @@ -953,7 +957,7 @@ asmlinkage unsigned long sys_alarm(unsigned int seconds) */ asmlinkage long sys_getpid(void) { - return current->tgid; + return task_tgid_vnr(current); } /* @@ -967,7 +971,7 @@ asmlinkage long sys_getppid(void) int pid; rcu_read_lock(); - pid = rcu_dereference(current->real_parent)->tgid; + pid = task_ppid_nr_ns(current, current->nsproxy->pid_ns); rcu_read_unlock(); return pid; @@ -1099,7 +1103,7 @@ EXPORT_SYMBOL(schedule_timeout_uninterruptible); /* Thread ID - the internal kernel "pid" */ asmlinkage long sys_gettid(void) { - return current->pid; + return task_pid_vnr(current); } /** diff --git a/kernel/tsacct.c b/kernel/tsacct.c index c122131a122..4ab1b584961 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -62,6 +62,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) rcu_read_unlock(); stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; + stats->ac_utimescaled = + cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC; + stats->ac_stimescaled = + cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC; stats->ac_minflt = tsk->min_flt; stats->ac_majflt = tsk->maj_flt; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e080d1d744c..52d5e7c9a8e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -32,6 +32,7 @@ #include <linux/freezer.h> #include <linux/kallsyms.h> #include <linux/debug_locks.h> +#include <linux/lockdep.h> /* * The per-CPU workqueue (if single thread, we always use the first @@ -61,6 +62,9 @@ struct workqueue_struct { const char *name; int singlethread; int freezeable; /* Freeze threads during suspend */ +#ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; +#endif }; /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove @@ -250,6 +254,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) struct work_struct *work = list_entry(cwq->worklist.next, struct work_struct, entry); work_func_t f = work->func; +#ifdef CONFIG_LOCKDEP + /* + * It is permissible to free the struct work_struct + * from inside the function that is called from it, + * this we need to take into account for lockdep too. + * To avoid bogus "held lock freed" warnings as well + * as problems when looking into work->lockdep_map, + * make a copy and use that here. + */ + struct lockdep_map lockdep_map = work->lockdep_map; +#endif cwq->current_work = work; list_del_init(cwq->worklist.next); @@ -257,13 +272,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) BUG_ON(get_wq_data(work) != cwq); work_clear_pending(work); + lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); + lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); f(work); + lock_release(&lockdep_map, 1, _THIS_IP_); + lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " "%s/0x%08x/%d\n", current->comm, preempt_count(), - current->pid); + task_pid_nr(current)); printk(KERN_ERR " last function: "); print_symbol("%s\n", (unsigned long)f); debug_show_held_locks(current); @@ -376,6 +395,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) int cpu; might_sleep(); + lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); + lock_release(&wq->lockdep_map, 1, _THIS_IP_); for_each_cpu_mask(cpu, *cpu_map) flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); } @@ -446,6 +467,9 @@ static void wait_on_work(struct work_struct *work) might_sleep(); + lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); + lock_release(&work->lockdep_map, 1, _THIS_IP_); + cwq = get_wq_data(work); if (!cwq) return; @@ -695,8 +719,10 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) } } -struct workqueue_struct *__create_workqueue(const char *name, - int singlethread, int freezeable) +struct workqueue_struct *__create_workqueue_key(const char *name, + int singlethread, + int freezeable, + struct lock_class_key *key) { struct workqueue_struct *wq; struct cpu_workqueue_struct *cwq; @@ -713,6 +739,7 @@ struct workqueue_struct *__create_workqueue(const char *name, } wq->name = name; + lockdep_init_map(&wq->lockdep_map, name, key, 0); wq->singlethread = singlethread; wq->freezeable = freezeable; INIT_LIST_HEAD(&wq->list); @@ -741,7 +768,7 @@ struct workqueue_struct *__create_workqueue(const char *name, } return wq; } -EXPORT_SYMBOL_GPL(__create_workqueue); +EXPORT_SYMBOL_GPL(__create_workqueue_key); static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) { @@ -752,6 +779,9 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) if (cwq->thread == NULL) return; + lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); + lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); + flush_cpu_workqueue(cwq); /* * If the caller is CPU_DEAD and cwq->worklist was not empty, |