summaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 11:31:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 11:31:16 -0700
commit3f17ea6dea8ba5668873afa54628a91aaa3fb1c0 (patch)
treeafbeb2accd4c2199ddd705ae943995b143a0af02 /ipc
parent1860e379875dfe7271c649058aeddffe5afd9d0d (diff)
parent1a5700bc2d10cd379a795fd2bb377a190af5acd4 (diff)
Merge branch 'next' (accumulated 3.16 merge window patches) into master
Now that 3.15 is released, this merges the 'next' branch into 'master', bringing us to the normal situation where my 'master' branch is the merge window. * accumulated work in next: (6809 commits) ufs: sb mutex merge + mutex_destroy powerpc: update comments for generic idle conversion cris: update comments for generic idle conversion idle: remove cpu_idle() forward declarations nbd: zero from and len fields in NBD_CMD_DISCONNECT. mm: convert some level-less printks to pr_* MAINTAINERS: adi-buildroot-devel is moderated MAINTAINERS: add linux-api for review of API/ABI changes mm/kmemleak-test.c: use pr_fmt for logging fs/dlm/debug_fs.c: replace seq_printf by seq_puts fs/dlm/lockspace.c: convert simple_str to kstr fs/dlm/config.c: convert simple_str to kstr mm: mark remap_file_pages() syscall as deprecated mm: memcontrol: remove unnecessary memcg argument from soft limit functions mm: memcontrol: clean up memcg zoneinfo lookup mm/memblock.c: call kmemleak directly from memblock_(alloc|free) mm/mempool.c: update the kmemleak stack trace for mempool allocations lib/radix-tree.c: update the kmemleak stack trace for radix tree allocations mm: introduce kmemleak_update_trace() mm/kmemleak.c: use %u to print ->checksum ...
Diffstat (limited to 'ipc')
-rw-r--r--ipc/compat.c2
-rw-r--r--ipc/compat_mq.c2
-rw-r--r--ipc/ipc_sysctl.c14
-rw-r--r--ipc/mq_sysctl.c12
-rw-r--r--ipc/msg.c188
-rw-r--r--ipc/sem.c171
-rw-r--r--ipc/shm.c23
-rw-r--r--ipc/util.c12
-rw-r--r--ipc/util.h10
9 files changed, 224 insertions, 210 deletions
diff --git a/ipc/compat.c b/ipc/compat.c
index 45d035d4ced..b5ef4f7946d 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -30,7 +30,7 @@
#include <linux/ptrace.h>
#include <linux/mutex.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "util.h"
diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c
index 90d29f59cac..ef6f91cc449 100644
--- a/ipc/compat_mq.c
+++ b/ipc/compat_mq.c
@@ -12,7 +12,7 @@
#include <linux/mqueue.h>
#include <linux/syscalls.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
struct compat_mq_attr {
compat_long_t mq_flags; /* message queue flags */
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index 998d31b230f..c3f0326e98d 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -18,7 +18,7 @@
#include <linux/msg.h>
#include "util.h"
-static void *get_ipc(ctl_table *table)
+static void *get_ipc(struct ctl_table *table)
{
char *which = table->data;
struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
@@ -27,7 +27,7 @@ static void *get_ipc(ctl_table *table)
}
#ifdef CONFIG_PROC_SYSCTL
-static int proc_ipc_dointvec(ctl_table *table, int write,
+static int proc_ipc_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
@@ -38,7 +38,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
return proc_dointvec(&ipc_table, write, buffer, lenp, ppos);
}
-static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
+static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
@@ -49,7 +49,7 @@ static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
}
-static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
+static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ipc_namespace *ns = current->nsproxy->ipc_ns;
@@ -62,7 +62,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
return err;
}
-static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
+static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
@@ -85,7 +85,7 @@ static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
return rc;
}
-static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
+static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
@@ -119,7 +119,7 @@ static void ipc_auto_callback(int val)
}
}
-static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
+static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table ipc_table;
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index 5bb8bfe6714..68d4e953762 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -14,7 +14,7 @@
#include <linux/sysctl.h>
#ifdef CONFIG_PROC_SYSCTL
-static void *get_mq(ctl_table *table)
+static void *get_mq(struct ctl_table *table)
{
char *which = table->data;
struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
@@ -22,7 +22,7 @@ static void *get_mq(ctl_table *table)
return which;
}
-static int proc_mq_dointvec(ctl_table *table, int write,
+static int proc_mq_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table mq_table;
@@ -32,7 +32,7 @@ static int proc_mq_dointvec(ctl_table *table, int write,
return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
}
-static int proc_mq_dointvec_minmax(ctl_table *table, int write,
+static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table mq_table;
@@ -53,7 +53,7 @@ static int msg_max_limit_max = HARD_MSGMAX;
static int msg_maxsize_limit_min = MIN_MSGSIZEMAX;
static int msg_maxsize_limit_max = HARD_MSGSIZEMAX;
-static ctl_table mq_sysctls[] = {
+static struct ctl_table mq_sysctls[] = {
{
.procname = "queues_max",
.data = &init_ipc_ns.mq_queues_max,
@@ -100,7 +100,7 @@ static ctl_table mq_sysctls[] = {
{}
};
-static ctl_table mq_sysctl_dir[] = {
+static struct ctl_table mq_sysctl_dir[] = {
{
.procname = "mqueue",
.mode = 0555,
@@ -109,7 +109,7 @@ static ctl_table mq_sysctl_dir[] = {
{}
};
-static ctl_table mq_sysctl_root[] = {
+static struct ctl_table mq_sysctl_root[] = {
{
.procname = "fs",
.mode = 0555,
diff --git a/ipc/msg.c b/ipc/msg.c
index 649853105a5..c5d8e374998 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -39,12 +39,10 @@
#include <linux/ipc_namespace.h>
#include <asm/current.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "util.h"
-/*
- * one msg_receiver structure for each sleeping receiver:
- */
+/* one msg_receiver structure for each sleeping receiver */
struct msg_receiver {
struct list_head r_list;
struct task_struct *r_tsk;
@@ -53,6 +51,12 @@ struct msg_receiver {
long r_msgtype;
long r_maxsize;
+ /*
+ * Mark r_msg volatile so that the compiler
+ * does not try to get smart and optimize
+ * it. We rely on this for the lockless
+ * receive algorithm.
+ */
struct msg_msg *volatile r_msg;
};
@@ -70,75 +74,6 @@ struct msg_sender {
#define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
-static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
-static int newque(struct ipc_namespace *, struct ipc_params *);
-#ifdef CONFIG_PROC_FS
-static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
-#endif
-
-/*
- * Scale msgmni with the available lowmem size: the memory dedicated to msg
- * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
- * Also take into account the number of nsproxies created so far.
- * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
- */
-void recompute_msgmni(struct ipc_namespace *ns)
-{
- struct sysinfo i;
- unsigned long allowed;
- int nb_ns;
-
- si_meminfo(&i);
- allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
- / MSGMNB;
- nb_ns = atomic_read(&nr_ipc_ns);
- allowed /= nb_ns;
-
- if (allowed < MSGMNI) {
- ns->msg_ctlmni = MSGMNI;
- return;
- }
-
- if (allowed > IPCMNI / nb_ns) {
- ns->msg_ctlmni = IPCMNI / nb_ns;
- return;
- }
-
- ns->msg_ctlmni = allowed;
-}
-
-void msg_init_ns(struct ipc_namespace *ns)
-{
- ns->msg_ctlmax = MSGMAX;
- ns->msg_ctlmnb = MSGMNB;
-
- recompute_msgmni(ns);
-
- atomic_set(&ns->msg_bytes, 0);
- atomic_set(&ns->msg_hdrs, 0);
- ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
-}
-
-#ifdef CONFIG_IPC_NS
-void msg_exit_ns(struct ipc_namespace *ns)
-{
- free_ipcs(ns, &msg_ids(ns), freeque);
- idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
-}
-#endif
-
-void __init msg_init(void)
-{
- msg_init_ns(&init_ipc_ns);
-
- printk(KERN_INFO "msgmni has been set to %d\n",
- init_ipc_ns.msg_ctlmni);
-
- ipc_init_proc_interface("sysvipc/msg",
- " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
- IPC_MSG_IDS, sysvipc_msg_proc_show);
-}
-
static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id);
@@ -227,7 +162,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
{
mss->tsk = current;
- current->state = TASK_INTERRUPTIBLE;
+ __set_current_state(TASK_INTERRUPTIBLE);
list_add_tail(&mss->list, &msq->q_senders);
}
@@ -306,15 +241,14 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
{
struct ipc_namespace *ns;
- struct ipc_ops msg_ops;
+ static const struct ipc_ops msg_ops = {
+ .getnew = newque,
+ .associate = msg_security,
+ };
struct ipc_params msg_params;
ns = current->nsproxy->ipc_ns;
- msg_ops.getnew = newque;
- msg_ops.associate = msg_security;
- msg_ops.more_checks = NULL;
-
msg_params.key = key;
msg_params.flg = msgflg;
@@ -612,23 +546,22 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
static int testmsg(struct msg_msg *msg, long type, int mode)
{
- switch (mode)
- {
- case SEARCH_ANY:
- case SEARCH_NUMBER:
+ switch (mode) {
+ case SEARCH_ANY:
+ case SEARCH_NUMBER:
+ return 1;
+ case SEARCH_LESSEQUAL:
+ if (msg->m_type <= type)
return 1;
- case SEARCH_LESSEQUAL:
- if (msg->m_type <= type)
- return 1;
- break;
- case SEARCH_EQUAL:
- if (msg->m_type == type)
- return 1;
- break;
- case SEARCH_NOTEQUAL:
- if (msg->m_type != type)
- return 1;
- break;
+ break;
+ case SEARCH_EQUAL:
+ if (msg->m_type == type)
+ return 1;
+ break;
+ case SEARCH_NOTEQUAL:
+ if (msg->m_type != type)
+ return 1;
+ break;
}
return 0;
}
@@ -978,7 +911,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
else
msr_d.r_maxsize = bufsz;
msr_d.r_msg = ERR_PTR(-EAGAIN);
- current->state = TASK_INTERRUPTIBLE;
+ __set_current_state(TASK_INTERRUPTIBLE);
ipc_unlock_object(&msq->q_perm);
rcu_read_unlock();
@@ -1056,6 +989,57 @@ SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
}
+/*
+ * Scale msgmni with the available lowmem size: the memory dedicated to msg
+ * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
+ * Also take into account the number of nsproxies created so far.
+ * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
+ */
+void recompute_msgmni(struct ipc_namespace *ns)
+{
+ struct sysinfo i;
+ unsigned long allowed;
+ int nb_ns;
+
+ si_meminfo(&i);
+ allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
+ / MSGMNB;
+ nb_ns = atomic_read(&nr_ipc_ns);
+ allowed /= nb_ns;
+
+ if (allowed < MSGMNI) {
+ ns->msg_ctlmni = MSGMNI;
+ return;
+ }
+
+ if (allowed > IPCMNI / nb_ns) {
+ ns->msg_ctlmni = IPCMNI / nb_ns;
+ return;
+ }
+
+ ns->msg_ctlmni = allowed;
+}
+
+void msg_init_ns(struct ipc_namespace *ns)
+{
+ ns->msg_ctlmax = MSGMAX;
+ ns->msg_ctlmnb = MSGMNB;
+
+ recompute_msgmni(ns);
+
+ atomic_set(&ns->msg_bytes, 0);
+ atomic_set(&ns->msg_hdrs, 0);
+ ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
+}
+
+#ifdef CONFIG_IPC_NS
+void msg_exit_ns(struct ipc_namespace *ns)
+{
+ free_ipcs(ns, &msg_ids(ns), freeque);
+ idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
+}
+#endif
+
#ifdef CONFIG_PROC_FS
static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
{
@@ -1080,3 +1064,15 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
msq->q_ctime);
}
#endif
+
+void __init msg_init(void)
+{
+ msg_init_ns(&init_ipc_ns);
+
+ printk(KERN_INFO "msgmni has been set to %d\n",
+ init_ipc_ns.msg_ctlmni);
+
+ ipc_init_proc_interface("sysvipc/msg",
+ " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
+ IPC_MSG_IDS, sysvipc_msg_proc_show);
+}
diff --git a/ipc/sem.c b/ipc/sem.c
index bee55541731..454f6c6020a 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -47,8 +47,7 @@
* Thus: Perfect SMP scaling between independent semaphore arrays.
* If multiple semaphores in one array are used, then cache line
* trashing on the semaphore array spinlock will limit the scaling.
- * - semncnt and semzcnt are calculated on demand in count_semncnt() and
- * count_semzcnt()
+ * - semncnt and semzcnt are calculated on demand in count_semcnt()
* - the task that performs a successful semop() scans the list of all
* sleeping tasks and completes any pending operations that can be fulfilled.
* Semaphores are actively given to waiting tasks (necessary for FIFO).
@@ -87,7 +86,7 @@
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "util.h"
/* One semaphore structure for each semaphore in the system. */
@@ -110,6 +109,7 @@ struct sem_queue {
int pid; /* process id of requesting process */
int status; /* completion status of operation */
struct sembuf *sops; /* array of pending operations */
+ struct sembuf *blocking; /* the operation that blocked */
int nsops; /* number of operations */
int alter; /* does *sops alter the array? */
};
@@ -160,7 +160,7 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
* sem_array.pending{_alter,_cont},
* sem_array.sem_undo: global sem_lock() for read/write
* sem_undo.proc_next: only "current" is allowed to read/write that field.
- *
+ *
* sem_array.sem_base[i].pending_{const,alter}:
* global or semaphore sem_lock() for read/write
*/
@@ -564,7 +564,11 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
{
struct ipc_namespace *ns;
- struct ipc_ops sem_ops;
+ static const struct ipc_ops sem_ops = {
+ .getnew = newary,
+ .associate = sem_security,
+ .more_checks = sem_more_checks,
+ };
struct ipc_params sem_params;
ns = current->nsproxy->ipc_ns;
@@ -572,10 +576,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
if (nsems < 0 || nsems > ns->sc_semmsl)
return -EINVAL;
- sem_ops.getnew = newary;
- sem_ops.associate = sem_security;
- sem_ops.more_checks = sem_more_checks;
-
sem_params.key = key;
sem_params.flg = semflg;
sem_params.u.nsems = nsems;
@@ -586,21 +586,23 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
/**
* perform_atomic_semop - Perform (if possible) a semaphore operation
* @sma: semaphore array
- * @sops: array with operations that should be checked
- * @nsops: number of operations
- * @un: undo array
- * @pid: pid that did the change
+ * @q: struct sem_queue that describes the operation
*
* Returns 0 if the operation was possible.
* Returns 1 if the operation is impossible, the caller must sleep.
* Negative values are error codes.
*/
-static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
- int nsops, struct sem_undo *un, int pid)
+static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
{
- int result, sem_op;
+ int result, sem_op, nsops, pid;
struct sembuf *sop;
struct sem *curr;
+ struct sembuf *sops;
+ struct sem_undo *un;
+
+ sops = q->sops;
+ nsops = q->nsops;
+ un = q->undo;
for (sop = sops; sop < sops + nsops; sop++) {
curr = sma->sem_base + sop->sem_num;
@@ -628,6 +630,7 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
}
sop--;
+ pid = q->pid;
while (sop >= sops) {
sma->sem_base[sop->sem_num].sempid = pid;
sop--;
@@ -640,6 +643,8 @@ out_of_range:
goto undo;
would_block:
+ q->blocking = sop;
+
if (sop->sem_flg & IPC_NOWAIT)
result = -EAGAIN;
else
@@ -780,8 +785,7 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
q = container_of(walk, struct sem_queue, list);
walk = walk->next;
- error = perform_atomic_semop(sma, q->sops, q->nsops,
- q->undo, q->pid);
+ error = perform_atomic_semop(sma, q);
if (error <= 0) {
/* operation completed, remove from queue & wakeup */
@@ -893,8 +897,7 @@ again:
if (semnum != -1 && sma->sem_base[semnum].semval == 0)
break;
- error = perform_atomic_semop(sma, q->sops, q->nsops,
- q->undo, q->pid);
+ error = perform_atomic_semop(sma, q);
/* Does q->sleeper still need to sleep? */
if (error > 0)
@@ -989,65 +992,74 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
set_semotime(sma, sops);
}
-/* The following counts are associated to each semaphore:
- * semncnt number of tasks waiting on semval being nonzero
- * semzcnt number of tasks waiting on semval being zero
- * This model assumes that a task waits on exactly one semaphore.
- * Since semaphore operations are to be performed atomically, tasks actually
- * wait on a whole sequence of semaphores simultaneously.
- * The counts we return here are a rough approximation, but still
- * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
+/*
+ * check_qop: Test if a queued operation sleeps on the semaphore semnum
*/
-static int count_semncnt(struct sem_array *sma, ushort semnum)
+static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
+ bool count_zero)
{
- int semncnt;
- struct sem_queue *q;
+ struct sembuf *sop = q->blocking;
- semncnt = 0;
- list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
- struct sembuf *sops = q->sops;
- BUG_ON(sops->sem_num != semnum);
- if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
- semncnt++;
- }
+ /*
+ * Linux always (since 0.99.10) reported a task as sleeping on all
+ * semaphores. This violates SUS, therefore it was changed to the
+ * standard compliant behavior.
+ * Give the administrators a chance to notice that an application
+ * might misbehave because it relies on the Linux behavior.
+ */
+ pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
+ "The task %s (%d) triggered the difference, watch for misbehavior.\n",
+ current->comm, task_pid_nr(current));
- list_for_each_entry(q, &sma->pending_alter, list) {
- struct sembuf *sops = q->sops;
- int nsops = q->nsops;
- int i;
- for (i = 0; i < nsops; i++)
- if (sops[i].sem_num == semnum
- && (sops[i].sem_op < 0)
- && !(sops[i].sem_flg & IPC_NOWAIT))
- semncnt++;
- }
- return semncnt;
+ if (sop->sem_num != semnum)
+ return 0;
+
+ if (count_zero && sop->sem_op == 0)
+ return 1;
+ if (!count_zero && sop->sem_op < 0)
+ return 1;
+
+ return 0;
}
-static int count_semzcnt(struct sem_array *sma, ushort semnum)
+/* The following counts are associated to each semaphore:
+ * semncnt number of tasks waiting on semval being nonzero
+ * semzcnt number of tasks waiting on semval being zero
+ *
+ * Per definition, a task waits only on the semaphore of the first semop
+ * that cannot proceed, even if additional operation would block, too.
+ */
+static int count_semcnt(struct sem_array *sma, ushort semnum,
+ bool count_zero)
{
- int semzcnt;
+ struct list_head *l;
struct sem_queue *q;
+ int semcnt;
+
+ semcnt = 0;
+ /* First: check the simple operations. They are easy to evaluate */
+ if (count_zero)
+ l = &sma->sem_base[semnum].pending_const;
+ else
+ l = &sma->sem_base[semnum].pending_alter;
- semzcnt = 0;
- list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
- struct sembuf *sops = q->sops;
- BUG_ON(sops->sem_num != semnum);
- if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
- semzcnt++;
+ list_for_each_entry(q, l, list) {
+ /* all task on a per-semaphore list sleep on exactly
+ * that semaphore
+ */
+ semcnt++;
}
- list_for_each_entry(q, &sma->pending_const, list) {
- struct sembuf *sops = q->sops;
- int nsops = q->nsops;
- int i;
- for (i = 0; i < nsops; i++)
- if (sops[i].sem_num == semnum
- && (sops[i].sem_op == 0)
- && !(sops[i].sem_flg & IPC_NOWAIT))
- semzcnt++;
+ /* Then: check the complex operations. */
+ list_for_each_entry(q, &sma->pending_alter, list) {
+ semcnt += check_qop(sma, semnum, q, count_zero);
}
- return semzcnt;
+ if (count_zero) {
+ list_for_each_entry(q, &sma->pending_const, list) {
+ semcnt += check_qop(sma, semnum, q, count_zero);
+ }
+ }
+ return semcnt;
}
/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
@@ -1161,7 +1173,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
err = security_sem_semctl(NULL, cmd);
if (err)
return err;
-
+
memset(&seminfo, 0, sizeof(seminfo));
seminfo.semmni = ns->sc_semmni;
seminfo.semmns = ns->sc_semmns;
@@ -1181,7 +1193,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
}
max_id = ipc_get_maxid(&sem_ids(ns));
up_read(&sem_ids(ns).rwsem);
- if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
+ if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
return -EFAULT;
return (max_id < 0) ? 0 : max_id;
}
@@ -1449,10 +1461,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
err = curr->sempid;
goto out_unlock;
case GETNCNT:
- err = count_semncnt(sma, semnum);
+ err = count_semcnt(sma, semnum, 0);
goto out_unlock;
case GETZCNT:
- err = count_semzcnt(sma, semnum);
+ err = count_semcnt(sma, semnum, 1);
goto out_unlock;
}
@@ -1866,8 +1878,13 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
if (un && un->semid == -1)
goto out_unlock_free;
- error = perform_atomic_semop(sma, sops, nsops, un,
- task_tgid_vnr(current));
+ queue.sops = sops;
+ queue.nsops = nsops;
+ queue.undo = un;
+ queue.pid = task_tgid_vnr(current);
+ queue.alter = alter;
+
+ error = perform_atomic_semop(sma, &queue);
if (error == 0) {
/* If the operation was successful, then do
* the required updates.
@@ -1883,12 +1900,6 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
/* We need to sleep on this operation, so we put the current
* task into the pending queue and go to sleep.
*/
-
- queue.sops = sops;
- queue.nsops = nsops;
- queue.undo = un;
- queue.pid = task_tgid_vnr(current);
- queue.alter = alter;
if (nsops == 1) {
struct sem *curr;
@@ -2016,7 +2027,7 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
return error;
atomic_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list;
- } else
+ } else
tsk->sysvsem.undo_list = NULL;
return 0;
diff --git a/ipc/shm.c b/ipc/shm.c
index 76459616a7f..89fc354156c 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -43,7 +43,7 @@
#include <linux/mount.h>
#include <linux/ipc_namespace.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "util.h"
@@ -493,7 +493,11 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
if (size < SHMMIN || size > ns->shm_ctlmax)
return -EINVAL;
- if (ns->shm_tot + numpages > ns->shm_ctlall)
+ if (numpages << PAGE_SHIFT < size)
+ return -ENOSPC;
+
+ if (ns->shm_tot + numpages < ns->shm_tot ||
+ ns->shm_tot + numpages > ns->shm_ctlall)
return -ENOSPC;
shp = ipc_rcu_alloc(sizeof(*shp));
@@ -609,15 +613,15 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
{
struct ipc_namespace *ns;
- struct ipc_ops shm_ops;
+ static const struct ipc_ops shm_ops = {
+ .getnew = newseg,
+ .associate = shm_security,
+ .more_checks = shm_more_checks,
+ };
struct ipc_params shm_params;
ns = current->nsproxy->ipc_ns;
- shm_ops.getnew = newseg;
- shm_ops.associate = shm_security;
- shm_ops.more_checks = shm_more_checks;
-
shm_params.key = key;
shm_params.flg = shmflg;
shm_params.u.size = size;
@@ -694,7 +698,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf
out.shmmin = in->shmmin;
out.shmmni = in->shmmni;
out.shmseg = in->shmseg;
- out.shmall = in->shmall;
+ out.shmall = in->shmall;
return copy_to_user(buf, &out, sizeof(out));
}
@@ -1160,6 +1164,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
down_write(&current->mm->mmap_sem);
if (addr && !(shmflg & SHM_REMAP)) {
err = -EINVAL;
+ if (addr + size < addr)
+ goto invalid;
+
if (find_vma_intersection(current->mm, addr, addr + size))
goto invalid;
/*
diff --git a/ipc/util.c b/ipc/util.c
index 2eb0d1eaa31..27d74e69fd5 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -183,7 +183,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
* ipc_findkey - find a key in an ipc identifier set
* @ids: ipc identifier set
* @key: key to find
- *
+ *
* Returns the locked pointer to the ipc structure if found or NULL
* otherwise. If key is found ipc points to the owning ipc structure
*
@@ -317,7 +317,7 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
* when the key is IPC_PRIVATE.
*/
static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
- struct ipc_ops *ops, struct ipc_params *params)
+ const struct ipc_ops *ops, struct ipc_params *params)
{
int err;
@@ -344,7 +344,7 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
*/
static int ipc_check_perms(struct ipc_namespace *ns,
struct kern_ipc_perm *ipcp,
- struct ipc_ops *ops,
+ const struct ipc_ops *ops,
struct ipc_params *params)
{
int err;
@@ -375,7 +375,7 @@ static int ipc_check_perms(struct ipc_namespace *ns,
* On success, the ipc id is returned.
*/
static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
- struct ipc_ops *ops, struct ipc_params *params)
+ const struct ipc_ops *ops, struct ipc_params *params)
{
struct kern_ipc_perm *ipcp;
int flg = params->flg;
@@ -538,7 +538,7 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
granted_mode >>= 3;
/* is there some bit set in requested_mode but not in granted_mode? */
- if ((requested_mode & ~granted_mode & 0007) &&
+ if ((requested_mode & ~granted_mode & 0007) &&
!ns_capable(ns->user_ns, CAP_IPC_OWNER))
return -1;
@@ -678,7 +678,7 @@ out:
* Common routine called by sys_msgget(), sys_semget() and sys_shmget().
*/
int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
- struct ipc_ops *ops, struct ipc_params *params)
+ const struct ipc_ops *ops, struct ipc_params *params)
{
if (params->key == IPC_PRIVATE)
return ipcget_new(ns, ids, ops, params);
diff --git a/ipc/util.h b/ipc/util.h
index 9c47d6f6c7b..1a5a0fcd099 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -78,9 +78,9 @@ struct ipc_params {
* . routine to call for an extra check if needed
*/
struct ipc_ops {
- int (*getnew) (struct ipc_namespace *, struct ipc_params *);
- int (*associate) (struct kern_ipc_perm *, int);
- int (*more_checks) (struct kern_ipc_perm *, struct ipc_params *);
+ int (*getnew)(struct ipc_namespace *, struct ipc_params *);
+ int (*associate)(struct kern_ipc_perm *, int);
+ int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *);
};
struct seq_file;
@@ -142,7 +142,7 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
struct ipc64_perm *perm, int extra_perm);
#ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION
- /* On IA-64, we always use the "64-bit version" of the IPC structures. */
+/* On IA-64, we always use the "64-bit version" of the IPC structures. */
# define ipc_parse_version(cmd) IPC_64
#else
int ipc_parse_version(int *cmd);
@@ -201,7 +201,7 @@ static inline bool ipc_valid_object(struct kern_ipc_perm *perm)
struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
- struct ipc_ops *ops, struct ipc_params *params);
+ const struct ipc_ops *ops, struct ipc_params *params);
void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
void (*free)(struct ipc_namespace *, struct kern_ipc_perm *));
#endif