diff options
Diffstat (limited to 'security/keys/keyring.c')
-rw-r--r-- | security/keys/keyring.c | 312 |
1 files changed, 181 insertions, 131 deletions
diff --git a/security/keys/keyring.c b/security/keys/keyring.c index dd7cd0f8e13..ef03a82a013 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -17,9 +17,14 @@ #include <linux/seq_file.h> #include <linux/err.h> #include <keys/keyring-type.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "internal.h" +#define rcu_dereference_locked_keyring(keyring) \ + (rcu_dereference_protected( \ + (keyring)->payload.subscriptions, \ + rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem))) + /* * when plumbing the depths of the key tree, this sets a hard limit set on how * deep we're willing to go @@ -39,7 +44,7 @@ static inline unsigned keyring_hash(const char *desc) unsigned bucket = 0; for (; *desc; desc++) - bucket += (unsigned char) *desc; + bucket += (unsigned char)*desc; return bucket & (KEYRING_NAME_HASH_SIZE - 1); } @@ -170,12 +175,10 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m) { struct keyring_list *klist; - if (keyring->description) { + if (keyring->description) seq_puts(m, keyring->description); - } - else { + else seq_puts(m, "[anon]"); - } rcu_read_lock(); klist = rcu_dereference(keyring->payload.subscriptions); @@ -201,8 +204,7 @@ static long keyring_read(const struct key *keyring, int loop, ret; ret = 0; - klist = keyring->payload.subscriptions; - + klist = rcu_dereference_locked_keyring(keyring); if (klist) { /* calculate how much data we could return */ qty = klist->nkeys * sizeof(key_serial_t); @@ -237,7 +239,7 @@ static long keyring_read(const struct key *keyring, ret = qty; } - error: +error: return ret; } /* end keyring_read() */ @@ -306,7 +308,7 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref, key_check(keyring); /* top keyring must have search permission to begin the search */ - err = key_task_permission(keyring_ref, cred, KEY_SEARCH); + err = key_task_permission(keyring_ref, cred, KEY_SEARCH); if (err < 0) { key_ref = ERR_PTR(err); goto error; @@ -508,7 +510,7 @@ key_ref_t __keyring_search_one(key_ref_t keyring_ref, rcu_read_unlock(); return ERR_PTR(-ENOKEY); - found: +found: atomic_inc(&key->usage); rcu_read_unlock(); return make_key_ref(key, possessed); @@ -526,9 +528,8 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) struct key *keyring; int bucket; - keyring = ERR_PTR(-EINVAL); if (!name) - goto error; + return ERR_PTR(-EINVAL); bucket = keyring_hash(name); @@ -555,17 +556,18 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) KEY_SEARCH) < 0) continue; - /* we've got a match */ - atomic_inc(&keyring->usage); - read_unlock(&keyring_name_lock); - goto error; + /* we've got a match but we might end up racing with + * key_cleanup() if the keyring is currently 'dead' + * (ie. it has a zero usage count) */ + if (!atomic_inc_not_zero(&keyring->usage)) + continue; + goto out; } } - read_unlock(&keyring_name_lock); keyring = ERR_PTR(-ENOKEY); - - error: +out: + read_unlock(&keyring_name_lock); return keyring; } /* end find_keyring_by_name() */ @@ -598,7 +600,7 @@ static int keyring_detect_cycle(struct key *A, struct key *B) sp = 0; /* start processing a new keyring */ - descend: +descend: if (test_bit(KEY_FLAG_REVOKED, &subtree->flags)) goto not_this_keyring; @@ -607,7 +609,7 @@ static int keyring_detect_cycle(struct key *A, struct key *B) goto not_this_keyring; kix = 0; - ascend: +ascend: /* iterate through the remaining keys in this keyring */ for (; kix < keylist->nkeys; kix++) { key = keylist->keys[kix]; @@ -633,7 +635,7 @@ static int keyring_detect_cycle(struct key *A, struct key *B) /* the keyring we're looking at was disqualified or didn't contain a * matching key */ - not_this_keyring: +not_this_keyring: if (sp > 0) { /* resume the checking of a keyring higher up in the tree */ sp--; @@ -644,34 +646,20 @@ static int keyring_detect_cycle(struct key *A, struct key *B) ret = 0; /* no cycles detected */ - error: +error: rcu_read_unlock(); return ret; - too_deep: +too_deep: ret = -ELOOP; goto error; - cycle_detected: +cycle_detected: ret = -EDEADLK; goto error; } /* end keyring_detect_cycle() */ -/*****************************************************************************/ -/* - * dispose of a keyring list after the RCU grace period - */ -static void keyring_link_rcu_disposal(struct rcu_head *rcu) -{ - struct keyring_list *klist = - container_of(rcu, struct keyring_list, rcu); - - kfree(klist); - -} /* end keyring_link_rcu_disposal() */ - -/*****************************************************************************/ /* * dispose of a keyring list after the RCU grace period, freeing the unlinked * key @@ -681,56 +669,51 @@ static void keyring_unlink_rcu_disposal(struct rcu_head *rcu) struct keyring_list *klist = container_of(rcu, struct keyring_list, rcu); - key_put(klist->keys[klist->delkey]); + if (klist->delkey != USHORT_MAX) + key_put(klist->keys[klist->delkey]); kfree(klist); +} -} /* end keyring_unlink_rcu_disposal() */ - -/*****************************************************************************/ /* - * link a key into to a keyring - * - must be called with the keyring's semaphore write-locked - * - discard already extant link to matching key if there is one + * preallocate memory so that a key can be linked into to a keyring */ -int __key_link(struct key *keyring, struct key *key) +int __key_link_begin(struct key *keyring, const struct key_type *type, + const char *description, + struct keyring_list **_prealloc) + __acquires(&keyring->sem) { struct keyring_list *klist, *nklist; unsigned max; size_t size; int loop, ret; - ret = -EKEYREVOKED; - if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) - goto error; + kenter("%d,%s,%s,", key_serial(keyring), type->name, description); - ret = -ENOTDIR; if (keyring->type != &key_type_keyring) - goto error; + return -ENOTDIR; - /* serialise link/link calls to prevent parallel calls causing a - * cycle when applied to two keyring in opposite orders */ - down_write(&keyring_serialise_link_sem); + down_write(&keyring->sem); - /* check that we aren't going to create a cycle adding one keyring to - * another */ - if (key->type == &key_type_keyring) { - ret = keyring_detect_cycle(keyring, key); - if (ret < 0) - goto error2; - } + ret = -EKEYREVOKED; + if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) + goto error_krsem; - /* see if there's a matching key we can displace */ - klist = keyring->payload.subscriptions; + /* serialise link/link calls to prevent parallel calls causing a cycle + * when linking two keyring in opposite orders */ + if (type == &key_type_keyring) + down_write(&keyring_serialise_link_sem); - if (klist && klist->nkeys > 0) { - struct key_type *type = key->type; + klist = rcu_dereference_locked_keyring(keyring); + /* see if there's a matching key we can displace */ + if (klist && klist->nkeys > 0) { for (loop = klist->nkeys - 1; loop >= 0; loop--) { if (klist->keys[loop]->type == type && strcmp(klist->keys[loop]->description, - key->description) == 0 + description) == 0 ) { - /* found a match - replace with new key */ + /* found a match - we'll replace this one with + * the new key */ size = sizeof(struct key *) * klist->maxkeys; size += sizeof(*klist); BUG_ON(size > PAGE_SIZE); @@ -738,22 +721,10 @@ int __key_link(struct key *keyring, struct key *key) ret = -ENOMEM; nklist = kmemdup(klist, size, GFP_KERNEL); if (!nklist) - goto error2; - - /* replace matched key */ - atomic_inc(&key->usage); - nklist->keys[loop] = key; - - rcu_assign_pointer( - keyring->payload.subscriptions, - nklist); - - /* dispose of the old keyring list and the - * displaced key */ - klist->delkey = loop; - call_rcu(&klist->rcu, - keyring_unlink_rcu_disposal); + goto error_sem; + /* note replacement slot */ + klist->delkey = nklist->delkey = loop; goto done; } } @@ -763,90 +734,167 @@ int __key_link(struct key *keyring, struct key *key) ret = key_payload_reserve(keyring, keyring->datalen + KEYQUOTA_LINK_BYTES); if (ret < 0) - goto error2; - - klist = keyring->payload.subscriptions; + goto error_sem; if (klist && klist->nkeys < klist->maxkeys) { - /* there's sufficient slack space to add directly */ - atomic_inc(&key->usage); - - klist->keys[klist->nkeys] = key; - smp_wmb(); - klist->nkeys++; - smp_wmb(); - } - else { + /* there's sufficient slack space to append directly */ + nklist = NULL; + } else { /* grow the key list */ max = 4; if (klist) max += klist->maxkeys; ret = -ENFILE; - if (max > 65535) - goto error3; + if (max > USHORT_MAX - 1) + goto error_quota; size = sizeof(*klist) + sizeof(struct key *) * max; if (size > PAGE_SIZE) - goto error3; + goto error_quota; ret = -ENOMEM; nklist = kmalloc(size, GFP_KERNEL); if (!nklist) - goto error3; - nklist->maxkeys = max; - nklist->nkeys = 0; + goto error_quota; + nklist->maxkeys = max; if (klist) { - nklist->nkeys = klist->nkeys; - memcpy(nklist->keys, - klist->keys, + memcpy(nklist->keys, klist->keys, sizeof(struct key *) * klist->nkeys); + nklist->delkey = klist->nkeys; + nklist->nkeys = klist->nkeys + 1; + klist->delkey = USHORT_MAX; + } else { + nklist->nkeys = 1; + nklist->delkey = 0; } /* add the key into the new space */ - atomic_inc(&key->usage); - nklist->keys[nklist->nkeys++] = key; - - rcu_assign_pointer(keyring->payload.subscriptions, nklist); - - /* dispose of the old keyring list */ - if (klist) - call_rcu(&klist->rcu, keyring_link_rcu_disposal); + nklist->keys[nklist->delkey] = NULL; } done: - ret = 0; -error2: - up_write(&keyring_serialise_link_sem); -error: - return ret; + *_prealloc = nklist; + kleave(" = 0"); + return 0; -error3: +error_quota: /* undo the quota changes */ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); - goto error2; +error_sem: + if (type == &key_type_keyring) + up_write(&keyring_serialise_link_sem); +error_krsem: + up_write(&keyring->sem); + kleave(" = %d", ret); + return ret; +} -} /* end __key_link() */ +/* + * check already instantiated keys aren't going to be a problem + * - the caller must have called __key_link_begin() + * - don't need to call this for keys that were created since __key_link_begin() + * was called + */ +int __key_link_check_live_key(struct key *keyring, struct key *key) +{ + if (key->type == &key_type_keyring) + /* check that we aren't going to create a cycle by linking one + * keyring to another */ + return keyring_detect_cycle(keyring, key); + return 0; +} + +/* + * link a key into to a keyring + * - must be called with __key_link_begin() having being called + * - discard already extant link to matching key if there is one + */ +void __key_link(struct key *keyring, struct key *key, + struct keyring_list **_prealloc) +{ + struct keyring_list *klist, *nklist; + + nklist = *_prealloc; + *_prealloc = NULL; + + kenter("%d,%d,%p", keyring->serial, key->serial, nklist); + + klist = rcu_dereference_protected(keyring->payload.subscriptions, + rwsem_is_locked(&keyring->sem)); + + atomic_inc(&key->usage); + + /* there's a matching key we can displace or an empty slot in a newly + * allocated list we can fill */ + if (nklist) { + kdebug("replace %hu/%hu/%hu", + nklist->delkey, nklist->nkeys, nklist->maxkeys); + + nklist->keys[nklist->delkey] = key; + + rcu_assign_pointer(keyring->payload.subscriptions, nklist); + + /* dispose of the old keyring list and, if there was one, the + * displaced key */ + if (klist) { + kdebug("dispose %hu/%hu/%hu", + klist->delkey, klist->nkeys, klist->maxkeys); + call_rcu(&klist->rcu, keyring_unlink_rcu_disposal); + } + } else { + /* there's sufficient slack space to append directly */ + klist->keys[klist->nkeys] = key; + smp_wmb(); + klist->nkeys++; + } +} + +/* + * finish linking a key into to a keyring + * - must be called with __key_link_begin() having being called + */ +void __key_link_end(struct key *keyring, struct key_type *type, + struct keyring_list *prealloc) + __releases(&keyring->sem) +{ + BUG_ON(type == NULL); + BUG_ON(type->name == NULL); + kenter("%d,%s,%p", keyring->serial, type->name, prealloc); + + if (type == &key_type_keyring) + up_write(&keyring_serialise_link_sem); + + if (prealloc) { + kfree(prealloc); + key_payload_reserve(keyring, + keyring->datalen - KEYQUOTA_LINK_BYTES); + } + up_write(&keyring->sem); +} -/*****************************************************************************/ /* * link a key to a keyring */ int key_link(struct key *keyring, struct key *key) { + struct keyring_list *prealloc; int ret; key_check(keyring); key_check(key); - down_write(&keyring->sem); - ret = __key_link(keyring, key); - up_write(&keyring->sem); + ret = __key_link_begin(keyring, key->type, key->description, &prealloc); + if (ret == 0) { + ret = __key_link_check_live_key(keyring, key); + if (ret == 0) + __key_link(keyring, key, &prealloc); + __key_link_end(keyring, key->type, prealloc); + } return ret; - -} /* end key_link() */ +} EXPORT_SYMBOL(key_link); @@ -868,7 +916,7 @@ int key_unlink(struct key *keyring, struct key *key) down_write(&keyring->sem); - klist = keyring->payload.subscriptions; + klist = rcu_dereference_locked_keyring(keyring); if (klist) { /* search the keyring for the key */ for (loop = 0; loop < klist->nkeys; loop++) @@ -959,7 +1007,7 @@ int keyring_clear(struct key *keyring) /* detach the pointer block with the locks held */ down_write(&keyring->sem); - klist = keyring->payload.subscriptions; + klist = rcu_dereference_locked_keyring(keyring); if (klist) { /* adjust the quota */ key_payload_reserve(keyring, @@ -991,7 +1039,9 @@ EXPORT_SYMBOL(keyring_clear); */ static void keyring_revoke(struct key *keyring) { - struct keyring_list *klist = keyring->payload.subscriptions; + struct keyring_list *klist; + + klist = rcu_dereference_locked_keyring(keyring); /* adjust the quota */ key_payload_reserve(keyring, 0); @@ -1025,7 +1075,7 @@ void keyring_gc(struct key *keyring, time_t limit) down_write(&keyring->sem); - klist = keyring->payload.subscriptions; + klist = rcu_dereference_locked_keyring(keyring); if (!klist) goto no_klist; |