diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2008-01-25 21:08:26 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 21:08:26 +0100 |
commit | 8eb703e4f33488bf75829564d51d427e17f7cd4c (patch) | |
tree | ee520bf5f1b6c5b61f3b2b6b950ecb4108fc22fc /kernel/user.c | |
parent | dc938520d2bf343b239795cfa24e4f44649358dc (diff) |
uids: merge multiple error paths in alloc_uid() into one
There are already 4 error paths in alloc_uid() that do incremental rollbacks.
I think it's time to merge them. This costs us 8 lines of code :)
Maybe it would be better to merge this patch with the previous one, but I
remember that some time ago I sent a similar patch (fixing the error path and
cleaning it), but I was told to make two patches in such cases.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Acked-by: Dhaval Giani <dhaval@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/user.c')
-rw-r--r-- | kernel/user.c | 47 |
1 files changed, 20 insertions, 27 deletions
diff --git a/kernel/user.c b/kernel/user.c index ab4fd706993..bc1c48d35cb 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -319,7 +319,7 @@ void free_uid(struct user_struct *up) struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) { struct hlist_head *hashent = uidhashentry(ns, uid); - struct user_struct *up; + struct user_struct *up, *new; /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() * atomic. @@ -331,13 +331,9 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) spin_unlock_irq(&uidhash_lock); if (!up) { - struct user_struct *new; - new = kmem_cache_alloc(uid_cachep, GFP_KERNEL); - if (!new) { - uids_mutex_unlock(); - return NULL; - } + if (!new) + goto out_unlock; new->uid = uid; atomic_set(&new->__count, 1); @@ -353,28 +349,14 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) #endif new->locked_shm = 0; - if (alloc_uid_keyring(new, current) < 0) { - kmem_cache_free(uid_cachep, new); - uids_mutex_unlock(); - return NULL; - } + if (alloc_uid_keyring(new, current) < 0) + goto out_free_user; - if (sched_create_user(new) < 0) { - key_put(new->uid_keyring); - key_put(new->session_keyring); - kmem_cache_free(uid_cachep, new); - uids_mutex_unlock(); - return NULL; - } + if (sched_create_user(new) < 0) + goto out_put_keys; - if (uids_user_create(new)) { - sched_destroy_user(new); - key_put(new->uid_keyring); - key_put(new->session_keyring); - kmem_cache_free(uid_cachep, new); - uids_mutex_unlock(); - return NULL; - } + if (uids_user_create(new)) + goto out_destoy_sched; /* * Before adding this, check whether we raced @@ -402,6 +384,17 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) uids_mutex_unlock(); return up; + +out_destoy_sched: + sched_destroy_user(new); +out_put_keys: + key_put(new->uid_keyring); + key_put(new->session_keyring); +out_free_user: + kmem_cache_free(uid_cachep, new); +out_unlock: + uids_mutex_unlock(); + return NULL; } void switch_uid(struct user_struct *new_user) |