summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-05-12 00:05:35 -0700
committerDavid S. Miller <davem@davemloft.net>2010-05-12 00:05:35 -0700
commit278554bd6579206921f5d8a523649a7a57f8850d (patch)
tree4e6c527daf0910e455b3aa72e2c96b0479e430be /kernel
parent5a147e8bf982f9dd414c1dd751fe02c1942506b2 (diff)
parentcea0d767c29669bf89f86e4aee46ef462d2ebae8 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: Documentation/feature-removal-schedule.txt drivers/net/wireless/ath/ar9170/usb.c drivers/scsi/iscsi_tcp.c net/ipv4/ipmr.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c17
-rw-r--r--kernel/cgroup.c62
-rw-r--r--kernel/cgroup_freezer.c5
-rw-r--r--kernel/cred.c4
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/kexec.c6
-rw-r--r--kernel/perf_event.c2
-rw-r--r--kernel/rcupdate.c11
-rw-r--r--kernel/sched.c18
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/workqueue.c2
12 files changed, 97 insertions, 36 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index 24f8c81fc48..e4c0e1fee9b 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -353,17 +353,18 @@ restart:
void acct_exit_ns(struct pid_namespace *ns)
{
- struct bsd_acct_struct *acct;
+ struct bsd_acct_struct *acct = ns->bacct;
- spin_lock(&acct_lock);
- acct = ns->bacct;
- if (acct != NULL) {
- if (acct->file != NULL)
- acct_file_reopen(acct, NULL, NULL);
+ if (acct == NULL)
+ return;
- kfree(acct);
- }
+ del_timer_sync(&acct->timer);
+ spin_lock(&acct_lock);
+ if (acct->file != NULL)
+ acct_file_reopen(acct, NULL, NULL);
spin_unlock(&acct_lock);
+
+ kfree(acct);
}
/*
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e2769e13980..6d870f2d122 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1646,7 +1646,9 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
{
char *start;
- struct dentry *dentry = rcu_dereference(cgrp->dentry);
+ struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
+ rcu_read_lock_held() ||
+ cgroup_lock_is_held());
if (!dentry || cgrp == dummytop) {
/*
@@ -1662,13 +1664,17 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
*--start = '\0';
for (;;) {
int len = dentry->d_name.len;
+
if ((start -= len) < buf)
return -ENAMETOOLONG;
- memcpy(start, cgrp->dentry->d_name.name, len);
+ memcpy(start, dentry->d_name.name, len);
cgrp = cgrp->parent;
if (!cgrp)
break;
- dentry = rcu_dereference(cgrp->dentry);
+
+ dentry = rcu_dereference_check(cgrp->dentry,
+ rcu_read_lock_held() ||
+ cgroup_lock_is_held());
if (!cgrp->parent)
continue;
if (--start < buf)
@@ -4429,7 +4435,15 @@ __setup("cgroup_disable=", cgroup_disable);
*/
unsigned short css_id(struct cgroup_subsys_state *css)
{
- struct css_id *cssid = rcu_dereference(css->id);
+ struct css_id *cssid;
+
+ /*
+ * This css_id() can return correct value when somone has refcnt
+ * on this or this is under rcu_read_lock(). Once css->id is allocated,
+ * it's unchanged until freed.
+ */
+ cssid = rcu_dereference_check(css->id,
+ rcu_read_lock_held() || atomic_read(&css->refcnt));
if (cssid)
return cssid->id;
@@ -4439,7 +4453,10 @@ EXPORT_SYMBOL_GPL(css_id);
unsigned short css_depth(struct cgroup_subsys_state *css)
{
- struct css_id *cssid = rcu_dereference(css->id);
+ struct css_id *cssid;
+
+ cssid = rcu_dereference_check(css->id,
+ rcu_read_lock_held() || atomic_read(&css->refcnt));
if (cssid)
return cssid->depth;
@@ -4447,15 +4464,36 @@ unsigned short css_depth(struct cgroup_subsys_state *css)
}
EXPORT_SYMBOL_GPL(css_depth);
+/**
+ * css_is_ancestor - test "root" css is an ancestor of "child"
+ * @child: the css to be tested.
+ * @root: the css supporsed to be an ancestor of the child.
+ *
+ * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
+ * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
+ * But, considering usual usage, the csses should be valid objects after test.
+ * Assuming that the caller will do some action to the child if this returns
+ * returns true, the caller must take "child";s reference count.
+ * If "child" is valid object and this returns true, "root" is valid, too.
+ */
+
bool css_is_ancestor(struct cgroup_subsys_state *child,
const struct cgroup_subsys_state *root)
{
- struct css_id *child_id = rcu_dereference(child->id);
- struct css_id *root_id = rcu_dereference(root->id);
+ struct css_id *child_id;
+ struct css_id *root_id;
+ bool ret = true;
- if (!child_id || !root_id || (child_id->depth < root_id->depth))
- return false;
- return child_id->stack[root_id->depth] == root_id->id;
+ rcu_read_lock();
+ child_id = rcu_dereference(child->id);
+ root_id = rcu_dereference(root->id);
+ if (!child_id
+ || !root_id
+ || (child_id->depth < root_id->depth)
+ || (child_id->stack[root_id->depth] != root_id->id))
+ ret = false;
+ rcu_read_unlock();
+ return ret;
}
static void __free_css_id_cb(struct rcu_head *head)
@@ -4555,13 +4593,13 @@ static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
{
int subsys_id, i, depth = 0;
struct cgroup_subsys_state *parent_css, *child_css;
- struct css_id *child_id, *parent_id = NULL;
+ struct css_id *child_id, *parent_id;
subsys_id = ss->subsys_id;
parent_css = parent->subsys[subsys_id];
child_css = child->subsys[subsys_id];
- depth = css_depth(parent_css) + 1;
parent_id = parent_css->id;
+ depth = parent_id->depth;
child_id = get_new_cssid(ss, depth);
if (IS_ERR(child_id))
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index da5e1397553..e5c0244962b 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -205,9 +205,12 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
* No lock is needed, since the task isn't on tasklist yet,
* so it can't be moved to another cgroup, which means the
* freezer won't be removed and will be valid during this
- * function call.
+ * function call. Nevertheless, apply RCU read-side critical
+ * section to suppress RCU lockdep false positives.
*/
+ rcu_read_lock();
freezer = task_freezer(task);
+ rcu_read_unlock();
/*
* The root cgroup is non-freezable, so we can skip the
diff --git a/kernel/cred.c b/kernel/cred.c
index e1dbe9eef80..62af1816c23 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -398,6 +398,8 @@ struct cred *prepare_usermodehelper_creds(void)
error:
put_cred(new);
+ return NULL;
+
free_tgcred:
#ifdef CONFIG_KEYS
kfree(tgcred);
@@ -791,8 +793,6 @@ bool creds_are_invalid(const struct cred *cred)
{
if (cred->magic != CRED_MAGIC)
return true;
- if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
- return true;
#ifdef CONFIG_SECURITY_SELINUX
if (selinux_is_enabled()) {
if ((unsigned long) cred->security < PAGE_SIZE)
diff --git a/kernel/fork.c b/kernel/fork.c
index 44b0791b0a2..4c14942a0ee 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1114,8 +1114,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->bts = NULL;
- p->stack_start = stack_start;
-
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p, clone_flags);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 87ebe8adc47..474a84715ea 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1134,11 +1134,9 @@ int crash_shrink_memory(unsigned long new_size)
free_reserved_phys_range(end, crashk_res.end);
- if (start == end) {
- crashk_res.end = end;
+ if (start == end)
release_resource(&crashk_res);
- } else
- crashk_res.end = end - 1;
+ crashk_res.end = end - 1;
unlock:
mutex_unlock(&kexec_mutex);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 2f3fbf84215..3d1552d3c12 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -4897,7 +4897,7 @@ err_fput_free_put_context:
err_free_put_context:
if (err < 0)
- kfree(event);
+ free_event(event);
err_put_context:
if (err < 0)
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 03a7ea1579f..49d808e833b 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -122,3 +122,14 @@ void wakeme_after_rcu(struct rcu_head *head)
rcu = container_of(head, struct rcu_synchronize, head);
complete(&rcu->completion);
}
+
+#ifdef CONFIG_PROVE_RCU
+/*
+ * wrapper function to avoid #include problems.
+ */
+int rcu_my_thread_group_empty(void)
+{
+ return thread_group_empty(current);
+}
+EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty);
+#endif /* #ifdef CONFIG_PROVE_RCU */
diff --git a/kernel/sched.c b/kernel/sched.c
index 6af210a7de7..3c2a54f70ff 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -323,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
{
+ /*
+ * Strictly speaking this rcu_read_lock() is not needed since the
+ * task_group is tied to the cgroup, which in turn can never go away
+ * as long as there are tasks attached to it.
+ *
+ * However since task_group() uses task_subsys_state() which is an
+ * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
+ */
+ rcu_read_lock();
#ifdef CONFIG_FAIR_GROUP_SCHED
p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
p->se.parent = task_group(p)->se[cpu];
@@ -332,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
p->rt.rt_rq = task_group(p)->rt_rq[cpu];
p->rt.parent = task_group(p)->rt_se[cpu];
#endif
+ rcu_read_unlock();
}
#else
@@ -3780,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
* the mutex owner just released it and exited.
*/
if (probe_kernel_address(&owner->cpu, cpu))
- goto out;
+ return 0;
#else
cpu = owner->cpu;
#endif
@@ -3790,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
* the cpu field may no longer be valid.
*/
if (cpu >= nr_cpumask_bits)
- goto out;
+ return 0;
/*
* We need to validate that we can do a
* get_cpu() and that we have the percpu area.
*/
if (!cpu_online(cpu))
- goto out;
+ return 0;
rq = cpu_rq(cpu);
@@ -3816,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
cpu_relax();
}
-out:
+
return 1;
}
#endif
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 9b49db14403..19be00ba612 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -114,7 +114,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
{
char path[64];
+ rcu_read_lock();
cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
+ rcu_read_unlock();
SEQ_printf(m, " %s", path);
}
#endif
diff --git a/kernel/sys.c b/kernel/sys.c
index 6d1a7e0f9d5..7cb426a5896 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1118,7 +1118,7 @@ DECLARE_RWSEM(uts_sem);
#ifdef COMPAT_UTS_MACHINE
#define override_architecture(name) \
- (current->personality == PER_LINUX32 && \
+ (personality(current->personality) == PER_LINUX32 && \
copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
sizeof(COMPAT_UTS_MACHINE)))
#else
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index dee48658805..5bfb213984b 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -774,7 +774,7 @@ void flush_delayed_work(struct delayed_work *dwork)
{
if (del_timer_sync(&dwork->timer)) {
struct cpu_workqueue_struct *cwq;
- cwq = wq_per_cpu(keventd_wq, get_cpu());
+ cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu());
__queue_work(cwq, &dwork->work);
put_cpu();
}