summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2006-01-09 19:18:33 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-01-09 19:18:33 +0000
commit0a3a98f6dd4e8f4d928a09302c0d1c56f2192ac3 (patch)
tree92f55e374a84d06ce8213a4540454760fdecf137 /kernel
parent8ef12c9f01afba47c2d33bb939085111ca0d0f7d (diff)
parent5367f2d67c7d0bf1faae90e6e7b4e2ac3c9b5e0f (diff)
Merge Linus' tree.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/cpuset.c537
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c33
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/module.c56
-rw-r--r--kernel/pid.c22
-rw-r--r--kernel/printk.c6
-rw-r--r--kernel/ptrace.c77
-rw-r--r--kernel/rcupdate.c19
-rw-r--r--kernel/rcutorture.c99
-rw-r--r--kernel/sched.c7
-rw-r--r--kernel/signal.c137
-rw-r--r--kernel/sys.c62
-rw-r--r--kernel/sys_ni.c22
-rw-r--r--kernel/sysctl.c22
-rw-r--r--kernel/timer.c1
-rw-r--r--kernel/workqueue.c40
18 files changed, 908 insertions, 240 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 32fa03ad198..d13ab7d2d89 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -267,7 +267,7 @@ static int audit_set_failure(int state, uid_t loginuid)
return old;
}
-int kauditd_thread(void *dummy)
+static int kauditd_thread(void *dummy)
{
struct sk_buff *skb;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 7430640f981..eab64e23bca 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -39,6 +39,7 @@
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
+#include <linux/rcupdate.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -54,7 +55,23 @@
#include <asm/atomic.h>
#include <asm/semaphore.h>
-#define CPUSET_SUPER_MAGIC 0x27e0eb
+#define CPUSET_SUPER_MAGIC 0x27e0eb
+
+/*
+ * Tracks how many cpusets are currently defined in system.
+ * When there is only one cpuset (the root cpuset) we can
+ * short circuit some hooks.
+ */
+int number_of_cpusets __read_mostly;
+
+/* See "Frequency meter" comments, below. */
+
+struct fmeter {
+ int cnt; /* unprocessed events count */
+ int val; /* most recent output value */
+ time_t time; /* clock (secs) when val computed */
+ spinlock_t lock; /* guards read or write of above */
+};
struct cpuset {
unsigned long flags; /* "unsigned long" so bitops work */
@@ -80,13 +97,16 @@ struct cpuset {
* Copy of global cpuset_mems_generation as of the most
* recent time this cpuset changed its mems_allowed.
*/
- int mems_generation;
+ int mems_generation;
+
+ struct fmeter fmeter; /* memory_pressure filter */
};
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
CS_MEM_EXCLUSIVE,
+ CS_MEMORY_MIGRATE,
CS_REMOVED,
CS_NOTIFY_ON_RELEASE
} cpuset_flagbits_t;
@@ -112,6 +132,11 @@ static inline int notify_on_release(const struct cpuset *cs)
return !!test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
}
+static inline int is_memory_migrate(const struct cpuset *cs)
+{
+ return !!test_bit(CS_MEMORY_MIGRATE, &cs->flags);
+}
+
/*
* Increment this atomic integer everytime any cpuset changes its
* mems_allowed value. Users of cpusets can track this generation
@@ -137,13 +162,10 @@ static struct cpuset top_cpuset = {
.count = ATOMIC_INIT(0),
.sibling = LIST_HEAD_INIT(top_cpuset.sibling),
.children = LIST_HEAD_INIT(top_cpuset.children),
- .parent = NULL,
- .dentry = NULL,
- .mems_generation = 0,
};
static struct vfsmount *cpuset_mount;
-static struct super_block *cpuset_sb = NULL;
+static struct super_block *cpuset_sb;
/*
* We have two global cpuset semaphores below. They can nest.
@@ -227,6 +249,11 @@ static struct super_block *cpuset_sb = NULL;
* a tasks cpuset pointer we use task_lock(), which acts on a spinlock
* (task->alloc_lock) already in the task_struct routinely used for
* such matters.
+ *
+ * P.S. One more locking exception. RCU is used to guard the
+ * update of a tasks cpuset pointer by attach_task() and the
+ * access of task->cpuset->mems_generation via that pointer in
+ * the routine cpuset_update_task_memory_state().
*/
static DECLARE_MUTEX(manage_sem);
@@ -304,7 +331,7 @@ static void cpuset_d_remove_dir(struct dentry *dentry)
spin_lock(&dcache_lock);
node = dentry->d_subdirs.next;
while (node != &dentry->d_subdirs) {
- struct dentry *d = list_entry(node, struct dentry, d_child);
+ struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
list_del_init(node);
if (d->d_inode) {
d = dget_locked(d);
@@ -316,7 +343,7 @@ static void cpuset_d_remove_dir(struct dentry *dentry)
}
node = dentry->d_subdirs.next;
}
- list_del_init(&dentry->d_child);
+ list_del_init(&dentry->d_u.d_child);
spin_unlock(&dcache_lock);
remove_dir(dentry);
}
@@ -570,20 +597,43 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
BUG_ON(!nodes_intersects(*pmask, node_online_map));
}
-/*
- * Refresh current tasks mems_allowed and mems_generation from current
- * tasks cpuset.
+/**
+ * cpuset_update_task_memory_state - update task memory placement
*
- * Call without callback_sem or task_lock() held. May be called with
- * or without manage_sem held. Will acquire task_lock() and might
- * acquire callback_sem during call.
+ * If the current tasks cpusets mems_allowed changed behind our
+ * backs, update current->mems_allowed, mems_generation and task NUMA
+ * mempolicy to the new value.
+ *
+ * Task mempolicy is updated by rebinding it relative to the
+ * current->cpuset if a task has its memory placement changed.
+ * Do not call this routine if in_interrupt().
*
- * The task_lock() is required to dereference current->cpuset safely.
- * Without it, we could pick up the pointer value of current->cpuset
- * in one instruction, and then attach_task could give us a different
- * cpuset, and then the cpuset we had could be removed and freed,
- * and then on our next instruction, we could dereference a no longer
- * valid cpuset pointer to get its mems_generation field.
+ * Call without callback_sem or task_lock() held. May be called
+ * with or without manage_sem held. Doesn't need task_lock to guard
+ * against another task changing a non-NULL cpuset pointer to NULL,
+ * as that is only done by a task on itself, and if the current task
+ * is here, it is not simultaneously in the exit code NULL'ing its
+ * cpuset pointer. This routine also might acquire callback_sem and
+ * current->mm->mmap_sem during call.
+ *
+ * Reading current->cpuset->mems_generation doesn't need task_lock
+ * to guard the current->cpuset derefence, because it is guarded
+ * from concurrent freeing of current->cpuset by attach_task(),
+ * using RCU.
+ *
+ * The rcu_dereference() is technically probably not needed,
+ * as I don't actually mind if I see a new cpuset pointer but
+ * an old value of mems_generation. However this really only
+ * matters on alpha systems using cpusets heavily. If I dropped
+ * that rcu_dereference(), it would save them a memory barrier.
+ * For all other arch's, rcu_dereference is a no-op anyway, and for
+ * alpha systems not using cpusets, another planned optimization,
+ * avoiding the rcu critical section for tasks in the root cpuset
+ * which is statically allocated, so can't vanish, will make this
+ * irrelevant. Better to use RCU as intended, than to engage in
+ * some cute trick to save a memory barrier that is impossible to
+ * test, for alpha systems using cpusets heavily, which might not
+ * even exist.
*
* This routine is needed to update the per-task mems_allowed data,
* within the tasks context, when it is trying to allocate memory
@@ -591,27 +641,31 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
* task has been modifying its cpuset.
*/
-static void refresh_mems(void)
+void cpuset_update_task_memory_state()
{
int my_cpusets_mem_gen;
+ struct task_struct *tsk = current;
+ struct cpuset *cs;
- task_lock(current);
- my_cpusets_mem_gen = current->cpuset->mems_generation;
- task_unlock(current);
-
- if (current->cpuset_mems_generation != my_cpusets_mem_gen) {
- struct cpuset *cs;
- nodemask_t oldmem = current->mems_allowed;
+ if (tsk->cpuset == &top_cpuset) {
+ /* Don't need rcu for top_cpuset. It's never freed. */
+ my_cpusets_mem_gen = top_cpuset.mems_generation;
+ } else {
+ rcu_read_lock();
+ cs = rcu_dereference(tsk->cpuset);
+ my_cpusets_mem_gen = cs->mems_generation;
+ rcu_read_unlock();
+ }
+ if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
down(&callback_sem);
- task_lock(current);
- cs = current->cpuset;
- guarantee_online_mems(cs, &current->mems_allowed);
- current->cpuset_mems_generation = cs->mems_generation;
- task_unlock(current);
+ task_lock(tsk);
+ cs = tsk->cpuset; /* Maybe changed when task not locked */
+ guarantee_online_mems(cs, &tsk->mems_allowed);
+ tsk->cpuset_mems_generation = cs->mems_generation;
+ task_unlock(tsk);
up(&callback_sem);
- if (!nodes_equal(oldmem, current->mems_allowed))
- numa_policy_rebind(&oldmem, &current->mems_allowed);
+ mpol_rebind_task(tsk, &tsk->mems_allowed);
}
}
@@ -766,36 +820,150 @@ static int update_cpumask(struct cpuset *cs, char *buf)
}
/*
+ * Handle user request to change the 'mems' memory placement
+ * of a cpuset. Needs to validate the request, update the
+ * cpusets mems_allowed and mems_generation, and for each
+ * task in the cpuset, rebind any vma mempolicies and if
+ * the cpuset is marked 'memory_migrate', migrate the tasks
+ * pages to the new memory.
+ *
* Call with manage_sem held. May take callback_sem during call.
+ * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
+ * lock each such tasks mm->mmap_sem, scan its vma's and rebind
+ * their mempolicies to the cpusets new mems_allowed.
*/
static int update_nodemask(struct cpuset *cs, char *buf)
{
struct cpuset trialcs;
+ nodemask_t oldmem;
+ struct task_struct *g, *p;
+ struct mm_struct **mmarray;
+ int i, n, ntasks;
+ int migrate;
+ int fudge;
int retval;
trialcs = *cs;
retval = nodelist_parse(buf, trialcs.mems_allowed);
if (retval < 0)
- return retval;
+ goto done;
nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, node_online_map);
- if (nodes_empty(trialcs.mems_allowed))
- return -ENOSPC;
+ oldmem = cs->mems_allowed;
+ if (nodes_equal(oldmem, trialcs.mems_allowed)) {
+ retval = 0; /* Too easy - nothing to do */
+ goto done;
+ }
+ if (nodes_empty(trialcs.mems_allowed)) {
+ retval = -ENOSPC;
+ goto done;
+ }
retval = validate_change(cs, &trialcs);
- if (retval == 0) {
- down(&callback_sem);
- cs->mems_allowed = trialcs.mems_allowed;
- atomic_inc(&cpuset_mems_generation);
- cs->mems_generation = atomic_read(&cpuset_mems_generation);
- up(&callback_sem);
+ if (retval < 0)
+ goto done;
+
+ down(&callback_sem);
+ cs->mems_allowed = trialcs.mems_allowed;
+ atomic_inc(&cpuset_mems_generation);
+ cs->mems_generation = atomic_read(&cpuset_mems_generation);
+ up(&callback_sem);
+
+ set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */
+
+ fudge = 10; /* spare mmarray[] slots */
+ fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
+ retval = -ENOMEM;
+
+ /*
+ * Allocate mmarray[] to hold mm reference for each task
+ * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
+ * tasklist_lock. We could use GFP_ATOMIC, but with a
+ * few more lines of code, we can retry until we get a big
+ * enough mmarray[] w/o using GFP_ATOMIC.
+ */
+ while (1) {
+ ntasks = atomic_read(&cs->count); /* guess */
+ ntasks += fudge;
+ mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
+ if (!mmarray)
+ goto done;
+ write_lock_irq(&tasklist_lock); /* block fork */
+ if (atomic_read(&cs->count) <= ntasks)
+ break; /* got enough */
+ write_unlock_irq(&tasklist_lock); /* try again */
+ kfree(mmarray);
}
+
+ n = 0;
+
+ /* Load up mmarray[] with mm reference for each task in cpuset. */
+ do_each_thread(g, p) {
+ struct mm_struct *mm;
+
+ if (n >= ntasks) {
+ printk(KERN_WARNING
+ "Cpuset mempolicy rebind incomplete.\n");
+ continue;
+ }
+ if (p->cpuset != cs)
+ continue;
+ mm = get_task_mm(p);
+ if (!mm)
+ continue;
+ mmarray[n++] = mm;
+ } while_each_thread(g, p);
+ write_unlock_irq(&tasklist_lock);
+
+ /*
+ * Now that we've dropped the tasklist spinlock, we can
+ * rebind the vma mempolicies of each mm in mmarray[] to their
+ * new cpuset, and release that mm. The mpol_rebind_mm()
+ * call takes mmap_sem, which we couldn't take while holding
+ * tasklist_lock. Forks can happen again now - the mpol_copy()
+ * cpuset_being_rebound check will catch such forks, and rebind
+ * their vma mempolicies too. Because we still hold the global
+ * cpuset manage_sem, we know that no other rebind effort will
+ * be contending for the global variable cpuset_being_rebound.
+ * It's ok if we rebind the same mm twice; mpol_rebind_mm()
+ * is idempotent. Also migrate pages in each mm to new nodes.
+ */
+ migrate = is_memory_migrate(cs);
+ for (i = 0; i < n; i++) {
+ struct mm_struct *mm = mmarray[i];
+
+ mpol_rebind_mm(mm, &cs->mems_allowed);
+ if (migrate) {
+ do_migrate_pages(mm, &oldmem, &cs->mems_allowed,
+ MPOL_MF_MOVE_ALL);
+ }
+ mmput(mm);
+ }
+
+ /* We're done rebinding vma's to this cpusets new mems_allowed. */
+ kfree(mmarray);
+ set_cpuset_being_rebound(NULL);
+ retval = 0;
+done:
return retval;
}
/*
+ * Call with manage_sem held.
+ */
+
+static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
+{
+ if (simple_strtoul(buf, NULL, 10) != 0)
+ cpuset_memory_pressure_enabled = 1;
+ else
+ cpuset_memory_pressure_enabled = 0;
+ return 0;
+}
+
+/*
* update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
- * CS_NOTIFY_ON_RELEASE)
+ * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE)
* cs: the cpuset to update
* buf: the buffer where we read the 0 or 1
*
@@ -834,6 +1002,104 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
}
/*
+ * Frequency meter - How fast is some event occuring?
+ *
+ * These routines manage a digitally filtered, constant time based,
+ * event frequency meter. There are four routines:
+ * fmeter_init() - initialize a frequency meter.
+ * fmeter_markevent() - called each time the event happens.
+ * fmeter_getrate() - returns the recent rate of such events.
+ * fmeter_update() - internal routine used to update fmeter.
+ *
+ * A common data structure is passed to each of these routines,
+ * which is used to keep track of the state required to manage the
+ * frequency meter and its digital filter.
+ *
+ * The filter works on the number of events marked per unit time.
+ * The filter is single-pole low-pass recursive (IIR). The time unit
+ * is 1 second. Arithmetic is done using 32-bit integers scaled to
+ * simulate 3 decimal digits of precision (multiplied by 1000).
+ *
+ * With an FM_COEF of 933, and a time base of 1 second, the filter
+ * has a half-life of 10 seconds, meaning that if the events quit
+ * happening, then the rate returned from the fmeter_getrate()
+ * will be cut in half each 10 seconds, until it converges to zero.
+ *
+ * It is not worth doing a real infinitely recursive filter. If more
+ * than FM_MAXTICKS ticks have elapsed since the last filter event,
+ * just compute FM_MAXTICKS ticks worth, by which point the level
+ * will be stable.
+ *
+ * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
+ * arithmetic overflow in the fmeter_update() routine.
+ *
+ * Given the simple 32 bit integer arithmetic used, this meter works
+ * best for reporting rates between one per millisecond (msec) and
+ * one per 32 (approx) seconds. At constant rates faster than one
+ * per msec it maxes out at values just under 1,000,000. At constant
+ * rates between one per msec, and one per second it will stabilize
+ * to a value N*1000, where N is the rate of events per second.
+ * At constant rates between one per second and one per 32 seconds,
+ * it will be choppy, moving up on the seconds that have an event,
+ * and then decaying until the next event. At rates slower than
+ * about one in 32 seconds, it decays all the way back to zero between
+ * each event.
+ */
+
+#define FM_COEF 933 /* coefficient for half-life of 10 secs */
+#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
+#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
+#define FM_SCALE 1000 /* faux fixed point scale */
+
+/* Initialize a frequency meter */
+static void fmeter_init(struct fmeter *fmp)
+{
+ fmp->cnt = 0;
+ fmp->val = 0;
+ fmp->time = 0;
+ spin_lock_init(&fmp->lock);
+}
+
+/* Internal meter update - process cnt events and update value */
+static void fmeter_update(struct fmeter *fmp)
+{
+ time_t now = get_seconds();
+ time_t ticks = now - fmp->time;
+
+ if (ticks == 0)
+ return;
+
+ ticks = min(FM_MAXTICKS, ticks);
+ while (ticks-- > 0)
+ fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
+ fmp->time = now;
+
+ fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
+ fmp->cnt = 0;
+}
+
+/* Process any previous ticks, then bump cnt by one (times scale). */
+static void fmeter_markevent(struct fmeter *fmp)
+{
+ spin_lock(&fmp->lock);
+ fmeter_update(fmp);
+ fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
+ spin_unlock(&fmp->lock);
+}
+
+/* Process any previous ticks, then return current value. */
+static int fmeter_getrate(struct fmeter *fmp)
+{
+ int val;
+
+ spin_lock(&fmp->lock);
+ fmeter_update(fmp);
+ val = fmp->val;
+ spin_unlock(&fmp->lock);
+ return val;
+}
+
+/*
* Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly
* writing the path of the old cpuset in 'ppathbuf' if it needs to be
* notified on release.
@@ -848,6 +1114,8 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
struct task_struct *tsk;
struct cpuset *oldcs;
cpumask_t cpus;
+ nodemask_t from, to;
+ struct mm_struct *mm;
if (sscanf(pidbuf, "%d", &pid) != 1)
return -EIO;
@@ -887,14 +1155,27 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
return -ESRCH;
}
atomic_inc(&cs->count);
- tsk->cpuset = cs;
+ rcu_assign_pointer(tsk->cpuset, cs);
task_unlock(tsk);
guarantee_online_cpus(cs, &cpus);
set_cpus_allowed(tsk, cpus);
+ from = oldcs->mems_allowed;
+ to = cs->mems_allowed;
+
up(&callback_sem);
+
+ mm = get_task_mm(tsk);
+ if (mm) {
+ mpol_rebind_mm(mm, &to);
+ mmput(mm);
+ }
+
+ if (is_memory_migrate(cs))
+ do_migrate_pages(tsk->mm, &from, &to, MPOL_MF_MOVE_ALL);
put_task_struct(tsk);
+ synchronize_rcu();
if (atomic_dec_and_test(&oldcs->count))
check_for_release(oldcs, ppathbuf);
return 0;
@@ -905,11 +1186,14 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf)
typedef enum {
FILE_ROOT,
FILE_DIR,
+ FILE_MEMORY_MIGRATE,
FILE_CPULIST,
FILE_MEMLIST,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_NOTIFY_ON_RELEASE,
+ FILE_MEMORY_PRESSURE_ENABLED,
+ FILE_MEMORY_PRESSURE,
FILE_TASKLIST,
} cpuset_filetype_t;
@@ -960,6 +1244,15 @@ static ssize_t cpuset_common_file_write(struct file *file, const char __user *us
case FILE_NOTIFY_ON_RELEASE:
retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer);
break;
+ case FILE_MEMORY_MIGRATE:
+ retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
+ break;
+ case FILE_MEMORY_PRESSURE_ENABLED:
+ retval = update_memory_pressure_enabled(cs, buffer);
+ break;
+ case FILE_MEMORY_PRESSURE:
+ retval = -EACCES;
+ break;
case FILE_TASKLIST:
retval = attach_task(cs, buffer, &pathbuf);
break;
@@ -1060,6 +1353,15 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf,
case FILE_NOTIFY_ON_RELEASE:
*s++ = notify_on_release(cs) ? '1' : '0';
break;
+ case FILE_MEMORY_MIGRATE:
+ *s++ = is_memory_migrate(cs) ? '1' : '0';
+ break;
+ case FILE_MEMORY_PRESSURE_ENABLED:
+ *s++ = cpuset_memory_pressure_enabled ? '1' : '0';
+ break;
+ case FILE_MEMORY_PRESSURE:
+ s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter));
+ break;
default:
retval = -EINVAL;
goto out;
@@ -1178,7 +1480,7 @@ static int cpuset_create_file(struct dentry *dentry, int mode)
/*
* cpuset_create_dir - create a directory for an object.
- * cs: the cpuset we create the directory for.
+ * cs: the cpuset we create the directory for.
* It must have a valid ->parent field
* And we are going to fill its ->dentry field.
* name: The name to give to the cpuset directory. Will be copied.
@@ -1408,6 +1710,21 @@ static struct cftype cft_notify_on_release = {
.private = FILE_NOTIFY_ON_RELEASE,
};
+static struct cftype cft_memory_migrate = {
+ .name = "memory_migrate",
+ .private = FILE_MEMORY_MIGRATE,
+};
+
+static struct cftype cft_memory_pressure_enabled = {
+ .name = "memory_pressure_enabled",
+ .private = FILE_MEMORY_PRESSURE_ENABLED,
+};
+
+static struct cftype cft_memory_pressure = {
+ .name = "memory_pressure",
+ .private = FILE_MEMORY_PRESSURE,
+};
+
static int cpuset_populate_dir(struct dentry *cs_dentry)
{
int err;
@@ -1422,6 +1739,10 @@ static int cpuset_populate_dir(struct dentry *cs_dentry)
return err;
if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0)
return err;
+ if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0)
+ return err;
+ if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0)
+ return err;
if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0)
return err;
return 0;
@@ -1446,7 +1767,7 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
return -ENOMEM;
down(&manage_sem);
- refresh_mems();
+ cpuset_update_task_memory_state();
cs->flags = 0;
if (notify_on_release(parent))
set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags);
@@ -1457,11 +1778,13 @@ static long cpuset_create(struct cpuset *parent, const char *name, int mode)
INIT_LIST_HEAD(&cs->children);
atomic_inc(&cpuset_mems_generation);
cs->mems_generation = atomic_read(&cpuset_mems_generation);
+ fmeter_init(&cs->fmeter);
cs->parent = parent;
down(&callback_sem);
list_add(&cs->sibling, &cs->parent->children);
+ number_of_cpusets++;
up(&callback_sem);
err = cpuset_create_dir(cs, name, mode);
@@ -1503,7 +1826,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
/* the vfs holds both inode->i_sem already */
down(&manage_sem);
- refresh_mems();
+ cpuset_update_task_memory_state();
if (atomic_read(&cs->count) > 0) {
up(&manage_sem);
return -EBUSY;
@@ -1524,6 +1847,7 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
spin_unlock(&d->d_lock);
cpuset_d_remove_dir(d);
dput(d);
+ number_of_cpusets--;
up(&callback_sem);
if (list_empty(&parent->children))
check_for_release(parent, &pathbuf);
@@ -1532,6 +1856,21 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry)
return 0;
}
+/*
+ * cpuset_init_early - just enough so that the calls to
+ * cpuset_update_task_memory_state() in early init code
+ * are harmless.
+ */
+
+int __init cpuset_init_early(void)
+{
+ struct task_struct *tsk = current;
+
+ tsk->cpuset = &top_cpuset;
+ tsk->cpuset->mems_generation = atomic_read(&cpuset_mems_generation);
+ return 0;
+}
+
/**
* cpuset_init - initialize cpusets at system boot
*
@@ -1546,6 +1885,7 @@ int __init cpuset_init(void)
top_cpuset.cpus_allowed = CPU_MASK_ALL;
top_cpuset.mems_allowed = NODE_MASK_ALL;
+ fmeter_init(&top_cpuset.fmeter);
atomic_inc(&cpuset_mems_generation);
top_cpuset.mems_generation = atomic_read(&cpuset_mems_generation);
@@ -1566,7 +1906,11 @@ int __init cpuset_init(void)
root->d_inode->i_nlink++;
top_cpuset.dentry = root;
root->d_inode->i_op = &cpuset_dir_inode_operations;
+ number_of_cpusets = 1;
err = cpuset_populate_dir(root);
+ /* memory_pressure_enabled is in root cpuset only */
+ if (err == 0)
+ err = cpuset_add_file(root, &cft_memory_pressure_enabled);
out:
return err;
}
@@ -1632,15 +1976,13 @@ void cpuset_fork(struct task_struct *child)
*
* We don't need to task_lock() this reference to tsk->cpuset,
* because tsk is already marked PF_EXITING, so attach_task() won't
- * mess with it.
+ * mess with it, or task is a failed fork, never visible to attach_task.
**/
void cpuset_exit(struct task_struct *tsk)
{
struct cpuset *cs;
- BUG_ON(!(tsk->flags & PF_EXITING));
-
cs = tsk->cpuset;
tsk->cpuset = NULL;
@@ -1667,14 +2009,14 @@ void cpuset_exit(struct task_struct *tsk)
* tasks cpuset.
**/
-cpumask_t cpuset_cpus_allowed(const struct task_struct *tsk)
+cpumask_t cpuset_cpus_allowed(struct task_struct *tsk)
{
cpumask_t mask;
down(&callback_sem);
- task_lock((struct task_struct *)tsk);
+ task_lock(tsk);
guarantee_online_cpus(tsk->cpuset, &mask);
- task_unlock((struct task_struct *)tsk);
+ task_unlock(tsk);
up(&callback_sem);
return mask;
@@ -1686,43 +2028,26 @@ void cpuset_init_current_mems_allowed(void)
}
/**
- * cpuset_update_current_mems_allowed - update mems parameters to new values
- *
- * If the current tasks cpusets mems_allowed changed behind our backs,
- * update current->mems_allowed and mems_generation to the new value.
- * Do not call this routine if in_interrupt().
+ * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
+ * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
*
- * Call without callback_sem or task_lock() held. May be called
- * with or without manage_sem held. Unless exiting, it will acquire
- * task_lock(). Also might acquire callback_sem during call to
- * refresh_mems().
- */
+ * Description: Returns the nodemask_t mems_allowed of the cpuset
+ * attached to the specified @tsk. Guaranteed to return some non-empty
+ * subset of node_online_map, even if this means going outside the
+ * tasks cpuset.
+ **/
-void cpuset_update_current_mems_allowed(void)
+nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
{
- struct cpuset *cs;
- int need_to_refresh = 0;
+ nodemask_t mask;
- task_lock(current);
- cs = current->cpuset;
- if (!cs)
- goto done;
- if (current->cpuset_mems_generation != cs->mems_generation)
- need_to_refresh = 1;
-done:
- task_unlock(current);
- if (need_to_refresh)
- refresh_mems();
-}
+ down(&callback_sem);
+ task_lock(tsk);
+ guarantee_online_mems(tsk->cpuset, &mask);
+ task_unlock(tsk);
+ up(&callback_sem);
-/**
- * cpuset_restrict_to_mems_allowed - limit nodes to current mems_allowed
- * @nodes: pointer to a node bitmap that is and-ed with mems_allowed
- */
-void cpuset_restrict_to_mems_allowed(unsigned long *nodes)
-{
- bitmap_and(nodes, nodes, nodes_addr(current->mems_allowed),
- MAX_NUMNODES);
+ return mask;
}
/**
@@ -1795,7 +2120,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
* GFP_USER - only nodes in current tasks mems allowed ok.
**/
-int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
+int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
int node; /* node that zone z is on */
const struct cpuset *cs; /* current cpuset ancestors */
@@ -1867,6 +2192,42 @@ done:
}
/*
+ * Collection of memory_pressure is suppressed unless
+ * this flag is enabled by writing "1" to the special
+ * cpuset file 'memory_pressure_enabled' in the root cpuset.
+ */
+
+int cpuset_memory_pressure_enabled __read_mostly;
+
+/**
+ * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
+ *
+ * Keep a running average of the rate of synchronous (direct)
+ * page reclaim efforts initiated by tasks in each cpuset.
+ *
+ * This represents the rate at which some task in the cpuset
+ * ran low on memory on all nodes it was allowed to use, and
+ * had to enter the kernels page reclaim code in an effort to
+ * create more free memory by tossing clean pages or swapping
+ * or writing dirty pages.
+ *
+ * Display to user space in the per-cpuset read-only file
+ * "memory_pressure". Value displayed is an integer
+ * representing the recent rate of entry into the synchronous
+ * (direct) page reclaim by any task attached to the cpuset.
+ **/
+
+void __cpuset_memory_pressure_bump(void)
+{
+ struct cpuset *cs;
+
+ task_lock(current);
+ cs = current->cpuset;
+ fmeter_markevent(&cs->fmeter);
+ task_unlock(current);
+}
+
+/*
* proc_cpuset_show()
* - Print tasks cpuset path into seq_file.
* - Used for /proc/<pid>/cpuset.
diff --git a/kernel/exit.c b/kernel/exit.c
index ee515683b92..caceabf3f23 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -72,7 +72,6 @@ repeat:
__ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p);
- __exit_sighand(p);
/*
* Note that the fastpath in sys_times depends on __exit_signal having
* updated the counters before a task is removed from the tasklist of
@@ -258,7 +257,7 @@ static inline void reparent_to_init(void)
void __set_special_pids(pid_t session, pid_t pgrp)
{
- struct task_struct *curr = current;
+ struct task_struct *curr = current->group_leader;
if (curr->signal->session != session) {
detach_pid(curr, PIDTYPE_SID);
@@ -926,7 +925,6 @@ do_group_exit(int exit_code)
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
- sig->flags = SIGNAL_GROUP_EXIT;
sig->group_exit_code = exit_code;
zap_other_threads(current);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index fb8572a4229..72e3252c676 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -743,6 +743,14 @@ int unshare_files(void)
EXPORT_SYMBOL(unshare_files);
+void sighand_free_cb(struct rcu_head *rhp)
+{
+ struct sighand_struct *sp;
+
+ sp = container_of(rhp, struct sighand_struct, rcu);
+ kmem_cache_free(sighand_cachep, sp);
+}
+
static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{
struct sighand_struct *sig;
@@ -752,7 +760,7 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
- tsk->sighand = sig;
+ rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
spin_lock_init(&sig->siglock);
@@ -803,9 +811,6 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
sig->it_prof_expires = cputime_zero;
sig->it_prof_incr = cputime_zero;
- sig->tty = current->signal->tty;
- sig->pgrp = process_group(current);
- sig->session = current->signal->session;
sig->leader = 0; /* session leadership doesn't inherit */
sig->tty_old_pgrp = 0;
@@ -964,12 +969,13 @@ static task_t *copy_process(unsigned long clone_flags,
p->io_context = NULL;
p->io_wait = NULL;
p->audit_context = NULL;
+ cpuset_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_copy(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
- goto bad_fork_cleanup;
+ goto bad_fork_cleanup_cpuset;
}
#endif
@@ -1127,25 +1133,19 @@ static task_t *copy_process(unsigned long clone_flags,
attach_pid(p, PIDTYPE_PID, p->pid);
attach_pid(p, PIDTYPE_TGID, p->tgid);
if (thread_group_leader(p)) {
+ p->signal->tty = current->signal->tty;
+ p->signal->pgrp = process_group(current);
+ p->signal->session = current->signal->session;
attach_pid(p, PIDTYPE_PGID, process_group(p));
attach_pid(p, PIDTYPE_SID, p->signal->session);
if (p->pid)
__get_cpu_var(process_counts)++;
}
- if (!current->signal->tty && p->signal->tty)
- p->signal->tty = NULL;
-
nr_threads++;
total_forks++;
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
- cpuset_fork(p);
- retval = 0;
-
-fork_out:
- if (retval)
- return ERR_PTR(retval);
return p;
bad_fork_cleanup_namespace:
@@ -1172,7 +1172,9 @@ bad_fork_cleanup_security:
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
mpol_free(p->mempolicy);
+bad_fork_cleanup_cpuset:
#endif
+ cpuset_exit(p);
bad_fork_cleanup:
if (p->binfmt)
module_put(p->binfmt->module);
@@ -1184,7 +1186,8 @@ bad_fork_cleanup_count:
free_uid(p->user);
bad_fork_free:
free_task(p);
- goto fork_out;
+fork_out:
+ return ERR_PTR(retval);
}
struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 8a64a4844cd..d03b5eef8ce 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -10,6 +10,8 @@
#include <linux/proc_fs.h>
#include <linux/interrupt.h>
+#include "internals.h"
+
static struct proc_dir_entry *root_irq_dir, *irq_dir[NR_IRQS];
#ifdef CONFIG_SMP
diff --git a/kernel/module.c b/kernel/module.c
index 4b06bbad49c..e4276046a1b 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -496,15 +496,15 @@ static void module_unload_free(struct module *mod)
}
#ifdef CONFIG_MODULE_FORCE_UNLOAD
-static inline int try_force(unsigned int flags)
+static inline int try_force_unload(unsigned int flags)
{
int ret = (flags & O_TRUNC);
if (ret)
- add_taint(TAINT_FORCED_MODULE);
+ add_taint(TAINT_FORCED_RMMOD);
return ret;
}
#else
-static inline int try_force(unsigned int flags)
+static inline int try_force_unload(unsigned int flags)
{
return 0;
}
@@ -524,7 +524,7 @@ static int __try_stop_module(void *_sref)
/* If it's not unused, quit unless we are told to block. */
if ((sref->flags & O_NONBLOCK) && module_refcount(sref->mod) != 0) {
- if (!(*sref->forced = try_force(sref->flags)))
+ if (!(*sref->forced = try_force_unload(sref->flags)))
return -EWOULDBLOCK;
}
@@ -609,7 +609,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
/* If it has an init func, it must have an exit func to unload */
if ((mod->init != NULL && mod->exit == NULL)
|| mod->unsafe) {
- forced = try_force(flags);
+ forced = try_force_unload(flags);
if (!forced) {
/* This module can't be removed */
ret = -EBUSY;
@@ -958,7 +958,6 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
unsigned long ret;
const unsigned long *crc;
- spin_lock_irq(&modlist_lock);
ret = __find_symbol(name, &owner, &crc, mod->license_gplok);
if (ret) {
/* use_module can fail due to OOM, or module unloading */
@@ -966,7 +965,6 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
!use_module(mod, owner))
ret = 0;
}
- spin_unlock_irq(&modlist_lock);
return ret;
}
@@ -1204,6 +1202,39 @@ void *__symbol_get(const char *symbol)
}
EXPORT_SYMBOL_GPL(__symbol_get);
+/*
+ * Ensure that an exported symbol [global namespace] does not already exist
+ * in the Kernel or in some other modules exported symbol table.
+ */
+static int verify_export_symbols(struct module *mod)
+{
+ const char *name = NULL;
+ unsigned long i, ret = 0;
+ struct module *owner;
+ const unsigned long *crc;
+
+ for (i = 0; i < mod->num_syms; i++)
+ if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) {
+ name = mod->syms[i].name;
+ ret = -ENOEXEC;
+ goto dup;
+ }
+
+ for (i = 0; i < mod->num_gpl_syms; i++)
+ if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) {
+ name = mod->gpl_syms[i].name;
+ ret = -ENOEXEC;
+ goto dup;
+ }
+
+dup:
+ if (ret)
+ printk(KERN_ERR "%s: exports duplicate symbol %s (owned by %s)\n",
+ mod->name, name, module_name(owner));
+
+ return ret;
+}
+
/* Change all symbols so that sh_value encodes the pointer directly. */
static int simplify_symbols(Elf_Shdr *sechdrs,
unsigned int symindex,
@@ -1715,6 +1746,11 @@ static struct module *load_module(void __user *umod,
/* Set up license info based on the info section */
set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
+ if (strcmp(mod->name, "ndiswrapper") == 0)
+ add_taint(TAINT_PROPRIETARY_MODULE);
+ if (strcmp(mod->name, "driverloader") == 0)
+ add_taint(TAINT_PROPRIETARY_MODULE);
+
#ifdef CONFIG_MODULE_UNLOAD
/* Set up MODINFO_ATTR fields */
setup_modinfo(mod, sechdrs, infoindex);
@@ -1767,6 +1803,12 @@ static struct module *load_module(void __user *umod,
goto cleanup;
}
+ /* Find duplicate symbols */
+ err = verify_export_symbols(mod);
+
+ if (err < 0)
+ goto cleanup;
+
/* Set up and sort exception table */
mod->num_exentries = sechdrs[exindex].sh_size / sizeof(*mod->extable);
mod->extable = extable = (void *)sechdrs[exindex].sh_addr;
diff --git a/kernel/pid.c b/kernel/pid.c
index edba31c681a..1acc0724699 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -136,7 +136,7 @@ struct pid * fastcall find_pid(enum pid_type type, int nr)
struct hlist_node *elem;
struct pid *pid;
- hlist_for_each_entry(pid, elem,
+ hlist_for_each_entry_rcu(pid, elem,
&pid_hash[type][pid_hashfn(nr)], pid_chain) {
if (pid->nr == nr)
return pid;
@@ -150,15 +150,15 @@ int fastcall attach_pid(task_t *task, enum pid_type type, int nr)
task_pid = &task->pids[type];
pid = find_pid(type, nr);
+ task_pid->nr = nr;
if (pid == NULL) {
- hlist_add_head(&task_pid->pid_chain,
- &pid_hash[type][pid_hashfn(nr)]);
INIT_LIST_HEAD(&task_pid->pid_list);
+ hlist_add_head_rcu(&task_pid->pid_chain,
+ &pid_hash[type][pid_hashfn(nr)]);
} else {
INIT_HLIST_NODE(&task_pid->pid_chain);
- list_add_tail(&task_pid->pid_list, &pid->pid_list);
+ list_add_tail_rcu(&task_pid->pid_list, &pid->pid_list);
}
- task_pid->nr = nr;
return 0;
}
@@ -170,20 +170,20 @@ static fastcall int __detach_pid(task_t *task, enum pid_type type)
pid = &task->pids[type];
if (!hlist_unhashed(&pid->pid_chain)) {
- hlist_del(&pid->pid_chain);
- if (list_empty(&pid->pid_list))
+ if (list_empty(&pid->pid_list)) {
nr = pid->nr;
- else {
+ hlist_del_rcu(&pid->pid_chain);
+ } else {
pid_next = list_entry(pid->pid_list.next,
struct pid, pid_list);
/* insert next pid from pid_list to hash */
- hlist_add_head(&pid_next->pid_chain,
- &pid_hash[type][pid_hashfn(pid_next->nr)]);
+ hlist_replace_rcu(&pid->pid_chain,
+ &pid_next->pid_chain);
}
}
- list_del(&pid->pid_list);
+ list_del_rcu(&pid->pid_list);
pid->nr = 0;
return nr;
diff --git a/kernel/printk.c b/kernel/printk.c
index 5287be83e3e..2251be80cd2 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -569,7 +569,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
p[1] <= '7' && p[2] == '>') {
loglev_char = p[1];
p += 3;
- printed_len += 3;
+ printed_len -= 3;
} else {
loglev_char = default_message_loglevel
+ '0';
@@ -584,7 +584,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
for (tp = tbuf; tp < tbuf + tlen; tp++)
emit_log_char(*tp);
- printed_len += tlen - 3;
+ printed_len += tlen;
} else {
if (p[0] != '<' || p[1] < '0' ||
p[1] > '7' || p[2] != '>') {
@@ -592,8 +592,8 @@ asmlinkage int vprintk(const char *fmt, va_list args)
emit_log_char(default_message_loglevel
+ '0');
emit_log_char('>');
+ printed_len += 3;
}
- printed_len += 3;
}
log_level_unknown = 0;
if (!*p)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 656476eedb1..cceaf09ac41 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -408,54 +408,62 @@ int ptrace_request(struct task_struct *child, long request,
return ret;
}
-#ifndef __ARCH_SYS_PTRACE
-static int ptrace_get_task_struct(long request, long pid,
- struct task_struct **childp)
+/**
+ * ptrace_traceme -- helper for PTRACE_TRACEME
+ *
+ * Performs checks and sets PT_PTRACED.
+ * Should be used by all ptrace implementations for PTRACE_TRACEME.
+ */
+int ptrace_traceme(void)
{
- struct task_struct *child;
int ret;
/*
- * Callers use child == NULL as an indication to exit early even
- * when the return value is 0, so make sure it is non-NULL here.
+ * Are we already being traced?
+ */
+ if (current->ptrace & PT_PTRACED)
+ return -EPERM;
+ ret = security_ptrace(current->parent, current);
+ if (ret)
+ return -EPERM;
+ /*
+ * Set the ptrace bit in the process ptrace flags.
*/
- *childp = NULL;
+ current->ptrace |= PT_PTRACED;
+ return 0;
+}
- if (request == PTRACE_TRACEME) {
- /*
- * Are we already being traced?
- */
- if (current->ptrace & PT_PTRACED)
- return -EPERM;
- ret = security_ptrace(current->parent, current);
- if (ret)
- return -EPERM;
- /*
- * Set the ptrace bit in the process ptrace flags.
- */
- current->ptrace |= PT_PTRACED;
- return 0;
- }
+/**
+ * ptrace_get_task_struct -- grab a task struct reference for ptrace
+ * @pid: process id to grab a task_struct reference of
+ *
+ * This function is a helper for ptrace implementations. It checks
+ * permissions and then grabs a task struct for use of the actual
+ * ptrace implementation.
+ *
+ * Returns the task_struct for @pid or an ERR_PTR() on failure.
+ */
+struct task_struct *ptrace_get_task_struct(pid_t pid)
+{
+ struct task_struct *child;
/*
- * You may not mess with init
+ * Tracing init is not allowed.
*/
if (pid == 1)
- return -EPERM;
+ return ERR_PTR(-EPERM);
- ret = -ESRCH;
read_lock(&tasklist_lock);
child = find_task_by_pid(pid);
if (child)
get_task_struct(child);
read_unlock(&tasklist_lock);
if (!child)
- return -ESRCH;
-
- *childp = child;
- return 0;
+ return ERR_PTR(-ESRCH);
+ return child;
}
+#ifndef __ARCH_SYS_PTRACE
asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
{
struct task_struct *child;
@@ -465,9 +473,16 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
* This lock_kernel fixes a subtle race with suid exec
*/
lock_kernel();
- ret = ptrace_get_task_struct(request, pid, &child);
- if (!child)
+ if (request == PTRACE_TRACEME) {
+ ret = ptrace_traceme();
goto out;
+ }
+
+ child = ptrace_get_task_struct(pid);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
+ goto out;
+ }
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 48d3bce465b..30b0bba0385 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
+#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/atomic.h>
@@ -45,7 +46,6 @@
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/rcupdate.h>
-#include <linux/rcuref.h>
#include <linux/cpu.h>
/* Definition for rcupdate control block. */
@@ -61,9 +61,9 @@ struct rcu_state {
/* for current batch to proceed. */
};
-static struct rcu_state rcu_state ____cacheline_maxaligned_in_smp =
+static struct rcu_state rcu_state ____cacheline_internodealigned_in_smp =
{.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE };
-static struct rcu_state rcu_bh_state ____cacheline_maxaligned_in_smp =
+static struct rcu_state rcu_bh_state ____cacheline_internodealigned_in_smp =
{.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE };
DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
@@ -73,19 +73,6 @@ DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
static int maxbatch = 10000;
-#ifndef __HAVE_ARCH_CMPXCHG
-/*
- * We use an array of spinlocks for the rcurefs -- similar to ones in sparc
- * 32 bit atomic_t implementations, and a hash function similar to that
- * for our refcounting needs.
- * Can't help multiprocessors which donot have cmpxchg :(
- */
-
-spinlock_t __rcuref_hash[RCUREF_HASH_SIZE] = {
- [0 ... (RCUREF_HASH_SIZE-1)] = SPIN_LOCK_UNLOCKED
-};
-#endif
-
/**
* call_rcu - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 49fbbeff201..773219907dd 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -39,7 +39,6 @@
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
-#include <linux/rcuref.h>
#include <linux/cpu.h>
#include <linux/random.h>
#include <linux/delay.h>
@@ -49,9 +48,11 @@
MODULE_LICENSE("GPL");
static int nreaders = -1; /* # reader threads, defaults to 4*ncpus */
-static int stat_interval = 0; /* Interval between stats, in seconds. */
+static int stat_interval; /* Interval between stats, in seconds. */
/* Defaults to "only at end of test". */
-static int verbose = 0; /* Print more debug info. */
+static int verbose; /* Print more debug info. */
+static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
+static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
MODULE_PARM(nreaders, "i");
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
@@ -59,6 +60,10 @@ MODULE_PARM(stat_interval, "i");
MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
MODULE_PARM(verbose, "i");
MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
+MODULE_PARM(test_no_idle_hz, "i");
+MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
+MODULE_PARM(shuffle_interval, "i");
+MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
#define TORTURE_FLAG "rcutorture: "
#define PRINTK_STRING(s) \
do { printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
@@ -73,6 +78,7 @@ static int nrealreaders;
static struct task_struct *writer_task;
static struct task_struct **reader_tasks;
static struct task_struct *stats_task;
+static struct task_struct *shuffler_task;
#define RCU_TORTURE_PIPE_LEN 10
@@ -103,7 +109,7 @@ atomic_t n_rcu_torture_error;
/*
* Allocate an element from the rcu_tortures pool.
*/
-struct rcu_torture *
+static struct rcu_torture *
rcu_torture_alloc(void)
{
struct list_head *p;
@@ -376,12 +382,77 @@ rcu_torture_stats(void *arg)
return 0;
}
+static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
+
+/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
+ * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
+ */
+void rcu_torture_shuffle_tasks(void)
+{
+ cpumask_t tmp_mask = CPU_MASK_ALL;
+ int i;
+
+ lock_cpu_hotplug();
+
+ /* No point in shuffling if there is only one online CPU (ex: UP) */
+ if (num_online_cpus() == 1) {
+ unlock_cpu_hotplug();
+ return;
+ }
+
+ if (rcu_idle_cpu != -1)
+ cpu_clear(rcu_idle_cpu, tmp_mask);
+
+ set_cpus_allowed(current, tmp_mask);
+
+ if (reader_tasks != NULL) {
+ for (i = 0; i < nrealreaders; i++)
+ if (reader_tasks[i])
+ set_cpus_allowed(reader_tasks[i], tmp_mask);
+ }
+
+ if (writer_task)
+ set_cpus_allowed(writer_task, tmp_mask);
+
+ if (stats_task)
+ set_cpus_allowed(stats_task, tmp_mask);
+
+ if (rcu_idle_cpu == -1)
+ rcu_idle_cpu = num_online_cpus() - 1;
+ else
+ rcu_idle_cpu--;
+
+ unlock_cpu_hotplug();
+}
+
+/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
+ * system to become idle at a time and cut off its timer ticks. This is meant
+ * to test the support for such tickless idle CPU in RCU.
+ */
+static int
+rcu_torture_shuffle(void *arg)
+{
+ VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
+ do {
+ schedule_timeout_interruptible(shuffle_interval * HZ);
+ rcu_torture_shuffle_tasks();
+ } while (!kthread_should_stop());
+ VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
+ return 0;
+}
+
static void
rcu_torture_cleanup(void)
{
int i;
fullstop = 1;
+ if (shuffler_task != NULL) {
+ VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
+ kthread_stop(shuffler_task);
+ }
+ shuffler_task = NULL;
+
if (writer_task != NULL) {
VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
kthread_stop(writer_task);
@@ -430,9 +501,11 @@ rcu_torture_init(void)
nrealreaders = nreaders;
else
nrealreaders = 2 * num_online_cpus();
- printk(KERN_ALERT TORTURE_FLAG
- "--- Start of test: nreaders=%d stat_interval=%d verbose=%d\n",
- nrealreaders, stat_interval, verbose);
+ printk(KERN_ALERT TORTURE_FLAG "--- Start of test: nreaders=%d "
+ "stat_interval=%d verbose=%d test_no_idle_hz=%d "
+ "shuffle_interval = %d\n",
+ nrealreaders, stat_interval, verbose, test_no_idle_hz,
+ shuffle_interval);
fullstop = 0;
/* Set up the freelist. */
@@ -502,6 +575,18 @@ rcu_torture_init(void)
goto unwind;
}
}
+ if (test_no_idle_hz) {
+ rcu_idle_cpu = num_online_cpus() - 1;
+ /* Create the shuffler thread */
+ shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
+ "rcu_torture_shuffle");
+ if (IS_ERR(shuffler_task)) {
+ firsterr = PTR_ERR(shuffler_task);
+ VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
+ shuffler_task = NULL;
+ goto unwind;
+ }
+ }
return 0;
unwind:
diff --git a/kernel/sched.c b/kernel/sched.c
index 6f46c94cc29..92733091154 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -176,6 +176,13 @@ static unsigned int task_timeslice(task_t *p)
#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
< (long long) (sd)->cache_hot_time)
+void __put_task_struct_cb(struct rcu_head *rhp)
+{
+ __put_task_struct(container_of(rhp, struct task_struct, rcu));
+}
+
+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
+
/*
* These are the runqueue data structures:
*/
diff --git a/kernel/signal.c b/kernel/signal.c
index d7611f189ef..08aa5b263f3 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -329,13 +329,20 @@ void __exit_sighand(struct task_struct *tsk)
/* Ok, we're done with the signal handlers */
tsk->sighand = NULL;
if (atomic_dec_and_test(&sighand->count))
- kmem_cache_free(sighand_cachep, sighand);
+ sighand_free(sighand);
}
void exit_sighand(struct task_struct *tsk)
{
write_lock_irq(&tasklist_lock);
- __exit_sighand(tsk);
+ rcu_read_lock();
+ if (tsk->sighand != NULL) {
+ struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
+ spin_lock(&sighand->siglock);
+ __exit_sighand(tsk);
+ spin_unlock(&sighand->siglock);
+ }
+ rcu_read_unlock();
write_unlock_irq(&tasklist_lock);
}
@@ -345,19 +352,20 @@ void exit_sighand(struct task_struct *tsk)
void __exit_signal(struct task_struct *tsk)
{
struct signal_struct * sig = tsk->signal;
- struct sighand_struct * sighand = tsk->sighand;
+ struct sighand_struct * sighand;
if (!sig)
BUG();
if (!atomic_read(&sig->count))
BUG();
+ rcu_read_lock();
+ sighand = rcu_dereference(tsk->sighand);
spin_lock(&sighand->siglock);
posix_cpu_timers_exit(tsk);
if (atomic_dec_and_test(&sig->count)) {
posix_cpu_timers_exit_group(tsk);
- if (tsk == sig->curr_target)
- sig->curr_target = next_thread(tsk);
tsk->signal = NULL;
+ __exit_sighand(tsk);
spin_unlock(&sighand->siglock);
flush_sigqueue(&sig->shared_pending);
} else {
@@ -389,9 +397,11 @@ void __exit_signal(struct task_struct *tsk)
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
sig->sched_time += tsk->sched_time;
+ __exit_sighand(tsk);
spin_unlock(&sighand->siglock);
sig = NULL; /* Marker for below. */
}
+ rcu_read_unlock();
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
if (sig) {
@@ -613,6 +623,33 @@ void signal_wake_up(struct task_struct *t, int resume)
* Returns 1 if any signals were found.
*
* All callers must be holding the siglock.
+ *
+ * This version takes a sigset mask and looks at all signals,
+ * not just those in the first mask word.
+ */
+static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
+{
+ struct sigqueue *q, *n;
+ sigset_t m;
+
+ sigandsets(&m, mask, &s->signal);
+ if (sigisemptyset(&m))
+ return 0;
+
+ signandsets(&s->signal, &s->signal, mask);
+ list_for_each_entry_safe(q, n, &s->list, list) {
+ if (sigismember(mask, q->info.si_signo)) {
+ list_del_init(&q->list);
+ __sigqueue_free(q);
+ }
+ }
+ return 1;
+}
+/*
+ * Remove signals in mask from the pending set and queue.
+ * Returns 1 if any signals were found.
+ *
+ * All callers must be holding the siglock.
*/
static int rm_from_queue(unsigned long mask, struct sigpending *s)
{
@@ -1080,18 +1117,29 @@ void zap_other_threads(struct task_struct *p)
}
/*
- * Must be called with the tasklist_lock held for reading!
+ * Must be called under rcu_read_lock() or with tasklist_lock read-held.
*/
int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
{
unsigned long flags;
+ struct sighand_struct *sp;
int ret;
+retry:
ret = check_kill_permission(sig, info, p);
- if (!ret && sig && p->sighand) {
- spin_lock_irqsave(&p->sighand->siglock, flags);
+ if (!ret && sig && (sp = rcu_dereference(p->sighand))) {
+ spin_lock_irqsave(&sp->siglock, flags);
+ if (p->sighand != sp) {
+ spin_unlock_irqrestore(&sp->siglock, flags);
+ goto retry;
+ }
+ if ((atomic_read(&sp->count) == 0) ||
+ (atomic_read(&p->usage) == 0)) {
+ spin_unlock_irqrestore(&sp->siglock, flags);
+ return -ESRCH;
+ }
ret = __group_send_sig_info(sig, info, p);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ spin_unlock_irqrestore(&sp->siglock, flags);
}
return ret;
@@ -1136,14 +1184,21 @@ int
kill_proc_info(int sig, struct siginfo *info, pid_t pid)
{
int error;
+ int acquired_tasklist_lock = 0;
struct task_struct *p;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
+ if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) {
+ read_lock(&tasklist_lock);
+ acquired_tasklist_lock = 1;
+ }
p = find_task_by_pid(pid);
error = -ESRCH;
if (p)
error = group_send_sig_info(sig, info, p);
- read_unlock(&tasklist_lock);
+ if (unlikely(acquired_tasklist_lock))
+ read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return error;
}
@@ -1163,8 +1218,7 @@ int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
ret = -ESRCH;
goto out_unlock;
}
- if ((!info || ((unsigned long)info != 1 &&
- (unsigned long)info != 2 && SI_FROMUSER(info)))
+ if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
&& (euid != p->suid) && (euid != p->uid)
&& (uid != p->suid) && (uid != p->uid)) {
ret = -EPERM;
@@ -1355,16 +1409,54 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
{
unsigned long flags;
int ret = 0;
+ struct sighand_struct *sh;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
- read_lock(&tasklist_lock);
+
+ /*
+ * The rcu based delayed sighand destroy makes it possible to
+ * run this without tasklist lock held. The task struct itself
+ * cannot go away as create_timer did get_task_struct().
+ *
+ * We return -1, when the task is marked exiting, so
+ * posix_timer_event can redirect it to the group leader
+ */
+ rcu_read_lock();
if (unlikely(p->flags & PF_EXITING)) {
ret = -1;
goto out_err;
}
- spin_lock_irqsave(&p->sighand->siglock, flags);
+retry:
+ sh = rcu_dereference(p->sighand);
+
+ spin_lock_irqsave(&sh->siglock, flags);
+ if (p->sighand != sh) {
+ /* We raced with exec() in a multithreaded process... */
+ spin_unlock_irqrestore(&sh->siglock, flags);
+ goto retry;
+ }
+
+ /*
+ * We do the check here again to handle the following scenario:
+ *
+ * CPU 0 CPU 1
+ * send_sigqueue
+ * check PF_EXITING
+ * interrupt exit code running
+ * __exit_signal
+ * lock sighand->siglock
+ * unlock sighand->siglock
+ * lock sh->siglock
+ * add(tsk->pending) flush_sigqueue(tsk->pending)
+ *
+ */
+
+ if (unlikely(p->flags & PF_EXITING)) {
+ ret = -1;
+ goto out;
+ }
if (unlikely(!list_empty(&q->list))) {
/*
@@ -1388,9 +1480,9 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
signal_wake_up(p, sig == SIGKILL);
out:
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
+ spin_unlock_irqrestore(&sh->siglock, flags);
out_err:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return ret;
}
@@ -1402,7 +1494,9 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
int ret = 0;
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
+
read_lock(&tasklist_lock);
+ /* Since it_lock is held, p->sighand cannot be NULL. */
spin_lock_irqsave(&p->sighand->siglock, flags);
handle_stop_signal(sig, p);
@@ -1436,7 +1530,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
out:
spin_unlock_irqrestore(&p->sighand->siglock, flags);
read_unlock(&tasklist_lock);
- return(ret);
+ return ret;
}
/*
@@ -2338,6 +2432,7 @@ int
do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
{
struct k_sigaction *k;
+ sigset_t mask;
if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
return -EINVAL;
@@ -2385,9 +2480,11 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
*k = *act;
sigdelsetmask(&k->sa.sa_mask,
sigmask(SIGKILL) | sigmask(SIGSTOP));
- rm_from_queue(sigmask(sig), &t->signal->shared_pending);
+ sigemptyset(&mask);
+ sigaddset(&mask, sig);
+ rm_from_queue_full(&mask, &t->signal->shared_pending);
do {
- rm_from_queue(sigmask(sig), &t->pending);
+ rm_from_queue_full(&mask, &t->pending);
recalc_sigpending_tsk(t);
t = next_thread(t);
} while (t != current);
diff --git a/kernel/sys.c b/kernel/sys.c
index eecf84526af..b6941e06d5d 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -489,6 +489,12 @@ asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user
magic2 != LINUX_REBOOT_MAGIC2C))
return -EINVAL;
+ /* Instead of trying to make the power_off code look like
+ * halt when pm_power_off is not set do it the easy way.
+ */
+ if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
+ cmd = LINUX_REBOOT_CMD_HALT;
+
lock_kernel();
switch (cmd) {
case LINUX_REBOOT_CMD_RESTART:
@@ -1084,10 +1090,11 @@ asmlinkage long sys_times(struct tms __user * tbuf)
asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
{
struct task_struct *p;
+ struct task_struct *group_leader = current->group_leader;
int err = -EINVAL;
if (!pid)
- pid = current->pid;
+ pid = group_leader->pid;
if (!pgid)
pgid = pid;
if (pgid < 0)
@@ -1107,16 +1114,16 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
if (!thread_group_leader(p))
goto out;
- if (p->parent == current || p->real_parent == current) {
+ if (p->real_parent == group_leader) {
err = -EPERM;
- if (p->signal->session != current->signal->session)
+ if (p->signal->session != group_leader->signal->session)
goto out;
err = -EACCES;
if (p->did_exec)
goto out;
} else {
err = -ESRCH;
- if (p != current)
+ if (p != group_leader)
goto out;
}
@@ -1128,7 +1135,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
struct task_struct *p;
do_each_task_pid(pgid, PIDTYPE_PGID, p) {
- if (p->signal->session == current->signal->session)
+ if (p->signal->session == group_leader->signal->session)
goto ok_pgid;
} while_each_task_pid(pgid, PIDTYPE_PGID, p);
goto out;
@@ -1208,24 +1215,22 @@ asmlinkage long sys_getsid(pid_t pid)
asmlinkage long sys_setsid(void)
{
+ struct task_struct *group_leader = current->group_leader;
struct pid *pid;
int err = -EPERM;
- if (!thread_group_leader(current))
- return -EINVAL;
-
down(&tty_sem);
write_lock_irq(&tasklist_lock);
- pid = find_pid(PIDTYPE_PGID, current->pid);
+ pid = find_pid(PIDTYPE_PGID, group_leader->pid);
if (pid)
goto out;
- current->signal->leader = 1;
- __set_special_pids(current->pid, current->pid);
- current->signal->tty = NULL;
- current->signal->tty_old_pgrp = 0;
- err = process_group(current);
+ group_leader->signal->leader = 1;
+ __set_special_pids(group_leader->pid, group_leader->pid);
+ group_leader->signal->tty = NULL;
+ group_leader->signal->tty_old_pgrp = 0;
+ err = process_group(group_leader);
out:
write_unlock_irq(&tasklist_lock);
up(&tty_sem);
@@ -1687,7 +1692,10 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
if (unlikely(!p->signal))
return;
+ utime = stime = cputime_zero;
+
switch (who) {
+ case RUSAGE_BOTH:
case RUSAGE_CHILDREN:
spin_lock_irqsave(&p->sighand->siglock, flags);
utime = p->signal->cutime;
@@ -1697,22 +1705,11 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_minflt = p->signal->cmin_flt;
r->ru_majflt = p->signal->cmaj_flt;
spin_unlock_irqrestore(&p->sighand->siglock, flags);
- cputime_to_timeval(utime, &r->ru_utime);
- cputime_to_timeval(stime, &r->ru_stime);
- break;
+
+ if (who == RUSAGE_CHILDREN)
+ break;
+
case RUSAGE_SELF:
- spin_lock_irqsave(&p->sighand->siglock, flags);
- utime = stime = cputime_zero;
- goto sum_group;
- case RUSAGE_BOTH:
- spin_lock_irqsave(&p->sighand->siglock, flags);
- utime = p->signal->cutime;
- stime = p->signal->cstime;
- r->ru_nvcsw = p->signal->cnvcsw;
- r->ru_nivcsw = p->signal->cnivcsw;
- r->ru_minflt = p->signal->cmin_flt;
- r->ru_majflt = p->signal->cmaj_flt;
- sum_group:
utime = cputime_add(utime, p->signal->utime);
stime = cputime_add(stime, p->signal->stime);
r->ru_nvcsw += p->signal->nvcsw;
@@ -1729,13 +1726,14 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
r->ru_majflt += t->maj_flt;
t = next_thread(t);
} while (t != p);
- spin_unlock_irqrestore(&p->sighand->siglock, flags);
- cputime_to_timeval(utime, &r->ru_utime);
- cputime_to_timeval(stime, &r->ru_stime);
break;
+
default:
BUG();
}
+
+ cputime_to_timeval(utime, &r->ru_utime);
+ cputime_to_timeval(stime, &r->ru_stime);
}
int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 1ab2370e2ef..bd3b9bfcfce 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -82,6 +82,28 @@ cond_syscall(compat_sys_socketcall);
cond_syscall(sys_inotify_init);
cond_syscall(sys_inotify_add_watch);
cond_syscall(sys_inotify_rm_watch);
+cond_syscall(sys_migrate_pages);
+cond_syscall(sys_chown16);
+cond_syscall(sys_fchown16);
+cond_syscall(sys_getegid16);
+cond_syscall(sys_geteuid16);
+cond_syscall(sys_getgid16);
+cond_syscall(sys_getgroups16);
+cond_syscall(sys_getresgid16);
+cond_syscall(sys_getresuid16);
+cond_syscall(sys_getuid16);
+cond_syscall(sys_lchown16);
+cond_syscall(sys_setfsgid16);
+cond_syscall(sys_setfsuid16);
+cond_syscall(sys_setgid16);
+cond_syscall(sys_setgroups16);
+cond_syscall(sys_setregid16);
+cond_syscall(sys_setresgid16);
+cond_syscall(sys_setresuid16);
+cond_syscall(sys_setreuid16);
+cond_syscall(sys_setuid16);
+cond_syscall(sys_vm86old);
+cond_syscall(sys_vm86);
/* arch-specific weak syscall entries */
cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index a85047bb573..03b0598f236 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -68,6 +68,8 @@ extern int min_free_kbytes;
extern int printk_ratelimit_jiffies;
extern int printk_ratelimit_burst;
extern int pid_max_min, pid_max_max;
+extern int sysctl_drop_caches;
+extern int percpu_pagelist_fraction;
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
int unknown_nmi_panic;
@@ -78,6 +80,7 @@ extern int proc_unknown_nmi_panic(ctl_table *, int, struct file *,
/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
static int maxolduid = 65535;
static int minolduid;
+static int min_percpu_pagelist_fract = 8;
static int ngroups_max = NGROUPS_MAX;
@@ -775,6 +778,15 @@ static ctl_table vm_table[] = {
.strategy = &sysctl_intvec,
},
{
+ .ctl_name = VM_DROP_PAGECACHE,
+ .procname = "drop_caches",
+ .data = &sysctl_drop_caches,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = drop_caches_sysctl_handler,
+ .strategy = &sysctl_intvec,
+ },
+ {
.ctl_name = VM_MIN_FREE_KBYTES,
.procname = "min_free_kbytes",
.data = &min_free_kbytes,
@@ -784,6 +796,16 @@ static ctl_table vm_table[] = {
.strategy = &sysctl_intvec,
.extra1 = &zero,
},
+ {
+ .ctl_name = VM_PERCPU_PAGELIST_FRACTION,
+ .procname = "percpu_pagelist_fraction",
+ .data = &percpu_pagelist_fraction,
+ .maxlen = sizeof(percpu_pagelist_fraction),
+ .mode = 0644,
+ .proc_handler = &percpu_pagelist_fraction_sysctl_handler,
+ .strategy = &sysctl_intvec,
+ .extra1 = &min_percpu_pagelist_fract,
+ },
#ifdef CONFIG_MMU
{
.ctl_name = VM_MAX_MAP_COUNT,
diff --git a/kernel/timer.c b/kernel/timer.c
index fd74268d866..074b4bd5cfd 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -33,6 +33,7 @@
#include <linux/posix-timers.h>
#include <linux/cpu.h>
#include <linux/syscalls.h>
+#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2bd5aee1c73..82c4fa70595 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -29,7 +29,8 @@
#include <linux/kthread.h>
/*
- * The per-CPU workqueue (if single thread, we always use cpu 0's).
+ * The per-CPU workqueue (if single thread, we always use the first
+ * possible cpu).
*
* The sequence counters are for flush_scheduled_work(). It wants to wait
* until until all currently-scheduled works are completed, but it doesn't
@@ -69,6 +70,8 @@ struct workqueue_struct {
static DEFINE_SPINLOCK(workqueue_lock);
static LIST_HEAD(workqueues);
+static int singlethread_cpu;
+
/* If it's single threaded, it isn't in the list of workqueues. */
static inline int is_single_threaded(struct workqueue_struct *wq)
{
@@ -102,7 +105,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
if (!test_and_set_bit(0, &work->pending)) {
if (unlikely(is_single_threaded(wq)))
- cpu = any_online_cpu(cpu_online_map);
+ cpu = singlethread_cpu;
BUG_ON(!list_empty(&work->entry));
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
ret = 1;
@@ -118,7 +121,7 @@ static void delayed_work_timer_fn(unsigned long __data)
int cpu = smp_processor_id();
if (unlikely(is_single_threaded(wq)))
- cpu = any_online_cpu(cpu_online_map);
+ cpu = singlethread_cpu;
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
@@ -267,7 +270,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
if (is_single_threaded(wq)) {
/* Always use first cpu's area. */
- flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
+ flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, singlethread_cpu));
} else {
int cpu;
@@ -315,12 +318,17 @@ struct workqueue_struct *__create_workqueue(const char *name,
return NULL;
wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
+ if (!wq->cpu_wq) {
+ kfree(wq);
+ return NULL;
+ }
+
wq->name = name;
/* We don't need the distraction of CPUs appearing and vanishing. */
lock_cpu_hotplug();
if (singlethread) {
INIT_LIST_HEAD(&wq->list);
- p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
+ p = create_workqueue_thread(wq, singlethread_cpu);
if (!p)
destroy = 1;
else
@@ -374,7 +382,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
/* We don't need the distraction of CPUs appearing and vanishing. */
lock_cpu_hotplug();
if (is_single_threaded(wq))
- cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
+ cleanup_workqueue_thread(wq, singlethread_cpu);
else {
for_each_online_cpu(cpu)
cleanup_workqueue_thread(wq, cpu);
@@ -419,6 +427,25 @@ int schedule_delayed_work_on(int cpu,
return ret;
}
+int schedule_on_each_cpu(void (*func) (void *info), void *info)
+{
+ int cpu;
+ struct work_struct *work;
+
+ work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL);
+
+ if (!work)
+ return -ENOMEM;
+ for_each_online_cpu(cpu) {
+ INIT_WORK(work + cpu, func, info);
+ __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
+ work + cpu);
+ }
+ flush_workqueue(keventd_wq);
+ kfree(work);
+ return 0;
+}
+
void flush_scheduled_work(void)
{
flush_workqueue(keventd_wq);
@@ -543,6 +570,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
void init_workqueues(void)
{
+ singlethread_cpu = first_cpu(cpu_possible_map);
hotcpu_notifier(workqueue_cpu_callback, 0);
keventd_wq = create_workqueue("events");
BUG_ON(!keventd_wq);