diff options
Diffstat (limited to 'ipc/util.c')
-rw-r--r-- | ipc/util.c | 150 |
1 files changed, 108 insertions, 42 deletions
diff --git a/ipc/util.c b/ipc/util.c index 03eadd8fb0f..579201e4bc0 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) * NULL is returned if the allocation fails */ -void* ipc_alloc(int size) +void *ipc_alloc(int size) { - void* out; + void *out; if(size > PAGE_SIZE) out = vmalloc(size); else @@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size) */ struct ipc_rcu_hdr { - int refcount; + atomic_t refcount; int is_vmalloc; void *data[0]; }; @@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size) * @size: size desired * * Allocate memory for the rcu header structure + the object. - * Returns the pointer to the object. - * NULL is returned if the allocation fails. + * Returns the pointer to the object or NULL upon failure. */ - -void* ipc_rcu_alloc(int size) +void *ipc_rcu_alloc(int size) { - void* out; - /* + void *out; + + /* * We prepend the allocation with the rcu struct, and - * workqueue if necessary (for vmalloc). + * workqueue if necessary (for vmalloc). */ if (rcu_use_vmalloc(size)) { out = vmalloc(HDRLEN_VMALLOC + size); - if (out) { - out += HDRLEN_VMALLOC; - container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1; - container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; - } + if (!out) + goto done; + + out += HDRLEN_VMALLOC; + container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1; } else { out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL); - if (out) { - out += HDRLEN_KMALLOC; - container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0; - container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; - } + if (!out) + goto done; + + out += HDRLEN_KMALLOC; + container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0; } + /* set reference counter no matter what kind of allocation was done */ + atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1); +done: return out; } -void ipc_rcu_getref(void *ptr) +int ipc_rcu_getref(void *ptr) { - container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; + return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount); } static void ipc_do_vfree(struct work_struct *work) @@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head) void ipc_rcu_putref(void *ptr) { - if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0) + if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount)) return; if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { @@ -669,38 +671,81 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) } /** + * ipc_obtain_object + * @ids: ipc identifier set + * @id: ipc id to look for + * + * Look for an id in the ipc ids idr and return associated ipc object. + * + * Call inside the RCU critical section. + * The ipc object is *not* locked on exit. + */ +struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) +{ + struct kern_ipc_perm *out; + int lid = ipcid_to_idx(id); + + out = idr_find(&ids->ipcs_idr, lid); + if (!out) + return ERR_PTR(-EINVAL); + + return out; +} + +/** * ipc_lock - Lock an ipc structure without rw_mutex held * @ids: IPC identifier set * @id: ipc id to look for * * Look for an id in the ipc ids idr and lock the associated ipc object. * - * The ipc object is locked on exit. + * The ipc object is locked on successful exit. */ - struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) { struct kern_ipc_perm *out; - int lid = ipcid_to_idx(id); rcu_read_lock(); - out = idr_find(&ids->ipcs_idr, lid); - if (out == NULL) { - rcu_read_unlock(); - return ERR_PTR(-EINVAL); - } + out = ipc_obtain_object(ids, id); + if (IS_ERR(out)) + goto err1; spin_lock(&out->lock); - + /* ipc_rmid() may have already freed the ID while ipc_lock * was spinning: here verify that the structure is still valid */ - if (out->deleted) { - spin_unlock(&out->lock); - rcu_read_unlock(); - return ERR_PTR(-EINVAL); - } + if (!out->deleted) + return out; + spin_unlock(&out->lock); + out = ERR_PTR(-EINVAL); +err1: + rcu_read_unlock(); + return out; +} + +/** + * ipc_obtain_object_check + * @ids: ipc identifier set + * @id: ipc id to look for + * + * Similar to ipc_obtain_object() but also checks + * the ipc object reference counter. + * + * Call inside the RCU critical section. + * The ipc object is *not* locked on exit. + */ +struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id) +{ + struct kern_ipc_perm *out = ipc_obtain_object(ids, id); + + if (IS_ERR(out)) + goto out; + + if (ipc_checkid(out, id)) + return ERR_PTR(-EIDRM); +out: return out; } @@ -781,11 +826,28 @@ struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns, struct ipc64_perm *perm, int extra_perm) { struct kern_ipc_perm *ipcp; + + ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm); + if (IS_ERR(ipcp)) + goto out; + + spin_lock(&ipcp->lock); +out: + return ipcp; +} + +struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, + struct ipc_ids *ids, int id, int cmd, + struct ipc64_perm *perm, int extra_perm) +{ kuid_t euid; - int err; + int err = -EPERM; + struct kern_ipc_perm *ipcp; down_write(&ids->rw_mutex); - ipcp = ipc_lock_check(ids, id); + rcu_read_lock(); + + ipcp = ipc_obtain_object_check(ids, id); if (IS_ERR(ipcp)) { err = PTR_ERR(ipcp); goto out_up; @@ -794,17 +856,21 @@ struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns, audit_ipc_obj(ipcp); if (cmd == IPC_SET) audit_ipc_set_perm(extra_perm, perm->uid, - perm->gid, perm->mode); + perm->gid, perm->mode); euid = current_euid(); if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) || ns_capable(ns->user_ns, CAP_SYS_ADMIN)) return ipcp; - err = -EPERM; - ipc_unlock(ipcp); out_up: + /* + * Unsuccessful lookup, unlock and return + * the corresponding error. + */ + rcu_read_unlock(); up_write(&ids->rw_mutex); + return ERR_PTR(err); } |