From 73c101011926c5832e6e141682180c4debe2cf45 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 8 Mar 2011 13:19:51 +0100 Subject: block: initial patch for on-stack per-task plugging This patch adds support for creating a queuing context outside of the queue itself. This enables us to batch up pieces of IO before grabbing the block device queue lock and submitting them to the IO scheduler. The context is created on the stack of the process and assigned in the task structure, so that we can auto-unplug it if we hit a schedule event. The current queue plugging happens implicitly if IO is submitted to an empty device, yet callers have to remember to unplug that IO when they are going to wait for it. This is an ugly API and has caused bugs in the past. Additionally, it requires hacks in the vm (->sync_page() callback) to handle that logic. By switching to an explicit plugging scheme we make the API a lot nicer and can get rid of the ->sync_page() hack in the vm. Signed-off-by: Jens Axboe --- kernel/sched.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index 18d38e4ec7b..ca098bf4cc6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3978,6 +3978,16 @@ need_resched_nonpreemptible: switch_count = &prev->nvcsw; } + /* + * If we are going to sleep and we have plugged IO queued, make + * sure to submit it to avoid deadlocks. + */ + if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) { + raw_spin_unlock(&rq->lock); + blk_flush_plug(prev); + raw_spin_lock(&rq->lock); + } + pre_schedule(rq, prev); if (unlikely(!rq->nr_running)) @@ -5333,6 +5343,7 @@ void __sched io_schedule(void) delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); + blk_flush_plug(current); current->in_iowait = 1; schedule(); current->in_iowait = 0; @@ -5348,6 +5359,7 @@ long __sched io_schedule_timeout(long timeout) delayacct_blkio_start(); atomic_inc(&rq->nr_iowait); + blk_flush_plug(current); current->in_iowait = 1; ret = schedule_timeout(timeout); current->in_iowait = 0; -- cgit v1.2.3-70-g09d2 From 16addf954d3954a72fd56abc02ffcba3c18529a1 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 18 Mar 2011 09:34:53 -0700 Subject: sched: Fix yield_to kernel-doc Add missing function parameters for yield_to(): Warning(kernel/sched.c:5470): No description found for parameter 'p' Warning(kernel/sched.c:5470): No description found for parameter 'preempt' Signed-off-by: Randy Dunlap Cc: Peter Zijlstra LKML-Reference: <20110318093453.8f7489a4.randy.dunlap@oracle.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index 58d66ea7d20..052120d6770 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5467,6 +5467,8 @@ EXPORT_SYMBOL(yield); * yield_to - yield the current processor to another thread in * your thread group, or accelerate that thread toward the * processor it's on. + * @p: target task + * @preempt: whether task preemption is allowed or not * * It's the caller's job to ensure that the target task struct * can't go away on us before we can do any checks. -- cgit v1.2.3-70-g09d2 From 20dd67407160eac577656cd2f8ee9a1fead960b8 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 23 Mar 2011 13:17:23 +0200 Subject: sched: Remove unused 'rq' variable and cpu_rq() call from alloc_fair_sched_group() Signed-off-by: Sergey Senozhatsky Cc: Steven Rostedt Cc: Peter Zijlstra LKML-Reference: <20110323111722.GA4244@swordfish.minsk.epam.com> Signed-off-by: Ingo Molnar --- kernel/sched.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index 052120d6770..a361e20ec2c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -8443,7 +8443,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) { struct cfs_rq *cfs_rq; struct sched_entity *se; - struct rq *rq; int i; tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL); @@ -8456,8 +8455,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) tg->shares = NICE_0_LOAD; for_each_possible_cpu(i) { - rq = cpu_rq(i); - cfs_rq = kzalloc_node(sizeof(struct cfs_rq), GFP_KERNEL, cpu_to_node(i)); if (!cfs_rq) -- cgit v1.2.3-70-g09d2 From b0e77598f87107001a00b8a4ece9c95e4254ccc4 Mon Sep 17 00:00:00 2001 From: "Serge E. Hallyn" Date: Wed, 23 Mar 2011 16:43:24 -0700 Subject: userns: user namespaces: convert several capable() calls CAP_IPC_OWNER and CAP_IPC_LOCK can be checked against current_user_ns(), because the resource comes from current's own ipc namespace. setuid/setgid are to uids in own namespace, so again checks can be against current_user_ns(). Changelog: Jan 11: Use task_ns_capable() in place of sched_capable(). Jan 11: Use nsown_capable() as suggested by Bastian Blank. Jan 11: Clarify (hopefully) some logic in futex and sched.c Feb 15: use ns_capable for ipc, not nsown_capable Feb 23: let copy_ipcs handle setting ipc_ns->user_ns Feb 23: pass ns down rather than taking it from current [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Serge E. Hallyn Acked-by: "Eric W. Biederman" Acked-by: Daniel Lezcano Acked-by: David Howells Cc: James Morris Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/ipc_namespace.h | 7 ++++--- ipc/msg.c | 8 ++++---- ipc/namespace.c | 13 ++++++++----- ipc/sem.c | 10 ++++++---- ipc/shm.c | 9 +++++---- ipc/util.c | 26 ++++++++++++++++---------- ipc/util.h | 5 +++-- kernel/futex.c | 11 ++++++++++- kernel/futex_compat.c | 11 ++++++++++- kernel/groups.c | 2 +- kernel/nsproxy.c | 7 +------ kernel/sched.c | 9 ++++++--- kernel/uid16.c | 2 +- 13 files changed, 75 insertions(+), 45 deletions(-) (limited to 'kernel/sched.c') diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index d3c32dcec62..a6d1655f960 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -5,6 +5,7 @@ #include #include #include +#include /* * ipc namespace events @@ -93,7 +94,7 @@ static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } #if defined(CONFIG_IPC_NS) extern struct ipc_namespace *copy_ipcs(unsigned long flags, - struct ipc_namespace *ns); + struct task_struct *tsk); static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) { if (ns) @@ -104,12 +105,12 @@ static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) extern void put_ipc_ns(struct ipc_namespace *ns); #else static inline struct ipc_namespace *copy_ipcs(unsigned long flags, - struct ipc_namespace *ns) + struct task_struct *tsk) { if (flags & CLONE_NEWIPC) return ERR_PTR(-EINVAL); - return ns; + return tsk->nsproxy->ipc_ns; } static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) diff --git a/ipc/msg.c b/ipc/msg.c index 747b65507a9..0e732e92e22 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -421,7 +421,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, return -EFAULT; } - ipcp = ipcctl_pre_down(&msg_ids(ns), msqid, cmd, + ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd, &msqid64.msg_perm, msqid64.msg_qbytes); if (IS_ERR(ipcp)) return PTR_ERR(ipcp); @@ -539,7 +539,7 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) success_return = 0; } err = -EACCES; - if (ipcperms(&msq->q_perm, S_IRUGO)) + if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); @@ -664,7 +664,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, struct msg_sender s; err = -EACCES; - if (ipcperms(&msq->q_perm, S_IWUGO)) + if (ipcperms(ns, &msq->q_perm, S_IWUGO)) goto out_unlock_free; err = security_msg_queue_msgsnd(msq, msg, msgflg); @@ -774,7 +774,7 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext, struct list_head *tmp; msg = ERR_PTR(-EACCES); - if (ipcperms(&msq->q_perm, S_IRUGO)) + if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock; msg = ERR_PTR(-EAGAIN); diff --git a/ipc/namespace.c b/ipc/namespace.c index aa188996269..3c3e5223e7e 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c @@ -15,7 +15,8 @@ #include "util.h" -static struct ipc_namespace *create_ipc_ns(struct ipc_namespace *old_ns) +static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk, + struct ipc_namespace *old_ns) { struct ipc_namespace *ns; int err; @@ -44,17 +45,19 @@ static struct ipc_namespace *create_ipc_ns(struct ipc_namespace *old_ns) ipcns_notify(IPCNS_CREATED); register_ipcns_notifier(ns); - ns->user_ns = old_ns->user_ns; - get_user_ns(ns->user_ns); + ns->user_ns = get_user_ns(task_cred_xxx(tsk, user)->user_ns); return ns; } -struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) +struct ipc_namespace *copy_ipcs(unsigned long flags, + struct task_struct *tsk) { + struct ipc_namespace *ns = tsk->nsproxy->ipc_ns; + if (!(flags & CLONE_NEWIPC)) return get_ipc_ns(ns); - return create_ipc_ns(ns); + return create_ipc_ns(tsk, ns); } /* diff --git a/ipc/sem.c b/ipc/sem.c index 0e0d49bbb86..ae040a0727c 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -817,7 +817,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, } err = -EACCES; - if (ipcperms (&sma->sem_perm, S_IRUGO)) + if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) goto out_unlock; err = security_sem_semctl(sma, cmd); @@ -862,7 +862,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, nsems = sma->sem_nsems; err = -EACCES; - if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) + if (ipcperms(ns, &sma->sem_perm, + (cmd == SETVAL || cmd == SETALL) ? S_IWUGO : S_IRUGO)) goto out_unlock; err = security_sem_semctl(sma, cmd); @@ -1047,7 +1048,8 @@ static int semctl_down(struct ipc_namespace *ns, int semid, return -EFAULT; } - ipcp = ipcctl_pre_down(&sem_ids(ns), semid, cmd, &semid64.sem_perm, 0); + ipcp = ipcctl_pre_down(ns, &sem_ids(ns), semid, cmd, + &semid64.sem_perm, 0); if (IS_ERR(ipcp)) return PTR_ERR(ipcp); @@ -1386,7 +1388,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, goto out_unlock_free; error = -EACCES; - if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) + if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) goto out_unlock_free; error = security_sem_semop(sma, sops, nsops, alter); diff --git a/ipc/shm.c b/ipc/shm.c index 7d3bb22a930..8644452f5c4 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -623,7 +623,8 @@ static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, return -EFAULT; } - ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); + ipcp = ipcctl_pre_down(ns, &shm_ids(ns), shmid, cmd, + &shmid64.shm_perm, 0); if (IS_ERR(ipcp)) return PTR_ERR(ipcp); @@ -737,7 +738,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) result = 0; } err = -EACCES; - if (ipcperms (&shp->shm_perm, S_IRUGO)) + if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) goto out_unlock; err = security_shm_shmctl(shp, cmd); if (err) @@ -773,7 +774,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) audit_ipc_obj(&(shp->shm_perm)); - if (!capable(CAP_IPC_LOCK)) { + if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { uid_t euid = current_euid(); err = -EPERM; if (euid != shp->shm_perm.uid && @@ -888,7 +889,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) } err = -EACCES; - if (ipcperms(&shp->shm_perm, acc_mode)) + if (ipcperms(ns, &shp->shm_perm, acc_mode)) goto out_unlock; err = security_shm_shmat(shp, shmaddr, shmflg); diff --git a/ipc/util.c b/ipc/util.c index 69a0cc13d96..8fd1b891ec0 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -329,12 +329,14 @@ retry: * * It is called with ipc_ids.rw_mutex and ipcp->lock held. */ -static int ipc_check_perms(struct kern_ipc_perm *ipcp, struct ipc_ops *ops, - struct ipc_params *params) +static int ipc_check_perms(struct ipc_namespace *ns, + struct kern_ipc_perm *ipcp, + struct ipc_ops *ops, + struct ipc_params *params) { int err; - if (ipcperms(ipcp, params->flg)) + if (ipcperms(ns, ipcp, params->flg)) err = -EACCES; else { err = ops->associate(ipcp, params->flg); @@ -396,7 +398,7 @@ retry: * ipc_check_perms returns the IPC id on * success */ - err = ipc_check_perms(ipcp, ops, params); + err = ipc_check_perms(ns, ipcp, ops, params); } ipc_unlock(ipcp); } @@ -610,10 +612,12 @@ void ipc_rcu_putref(void *ptr) * * Check user, group, other permissions for access * to ipc resources. return 0 if allowed + * + * @flag will most probably be 0 or S_...UGO from */ -int ipcperms (struct kern_ipc_perm *ipcp, short flag) -{ /* flag will most probably be 0 or S_...UGO from */ +int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) +{ uid_t euid = current_euid(); int requested_mode, granted_mode; @@ -627,7 +631,7 @@ int ipcperms (struct kern_ipc_perm *ipcp, short flag) granted_mode >>= 3; /* is there some bit set in requested_mode but not in granted_mode? */ if ((requested_mode & ~granted_mode & 0007) && - !capable(CAP_IPC_OWNER)) + !ns_capable(ns->user_ns, CAP_IPC_OWNER)) return -1; return security_ipc_permission(ipcp, flag); @@ -765,6 +769,7 @@ void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) /** * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd + * @ids: the ipc namespace * @ids: the table of ids where to look for the ipc * @id: the id of the ipc to retrieve * @cmd: the cmd to check @@ -779,7 +784,8 @@ void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) * - returns the ipc with both ipc and rw_mutex locks held in case of success * or an err-code without any lock held otherwise. */ -struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd, +struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns, + struct ipc_ids *ids, int id, int cmd, struct ipc64_perm *perm, int extra_perm) { struct kern_ipc_perm *ipcp; @@ -799,8 +805,8 @@ struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd, perm->gid, perm->mode); euid = current_euid(); - if (euid == ipcp->cuid || - euid == ipcp->uid || capable(CAP_SYS_ADMIN)) + if (euid == ipcp->cuid || euid == ipcp->uid || + ns_capable(ns->user_ns, CAP_SYS_ADMIN)) return ipcp; err = -EPERM; diff --git a/ipc/util.h b/ipc/util.h index 764b51a37a6..6f5c20bedaa 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -103,7 +103,7 @@ int ipc_get_maxid(struct ipc_ids *); void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *); /* must be called with ipcp locked */ -int ipcperms(struct kern_ipc_perm *ipcp, short flg); +int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); /* for rare, potentially huge allocations. * both function can sleep @@ -126,7 +126,8 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out); -struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd, +struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns, + struct ipc_ids *ids, int id, int cmd, struct ipc64_perm *perm, int extra_perm); #ifndef __ARCH_WANT_IPC_PARSE_VERSION diff --git a/kernel/futex.c b/kernel/futex.c index bda41571538..6570c459f31 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2418,10 +2418,19 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, goto err_unlock; ret = -EPERM; pcred = __task_cred(p); + /* If victim is in different user_ns, then uids are not + comparable, so we must have CAP_SYS_PTRACE */ + if (cred->user->user_ns != pcred->user->user_ns) { + if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE)) + goto err_unlock; + goto ok; + } + /* If victim is in same user_ns, then uids are comparable */ if (cred->euid != pcred->euid && cred->euid != pcred->uid && - !capable(CAP_SYS_PTRACE)) + !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE)) goto err_unlock; +ok: head = p->robust_list; rcu_read_unlock(); } diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index a7934ac75e5..5f9e689dc8f 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c @@ -153,10 +153,19 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, goto err_unlock; ret = -EPERM; pcred = __task_cred(p); + /* If victim is in different user_ns, then uids are not + comparable, so we must have CAP_SYS_PTRACE */ + if (cred->user->user_ns != pcred->user->user_ns) { + if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE)) + goto err_unlock; + goto ok; + } + /* If victim is in same user_ns, then uids are comparable */ if (cred->euid != pcred->euid && cred->euid != pcred->uid && - !capable(CAP_SYS_PTRACE)) + !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE)) goto err_unlock; +ok: head = p->compat_robust_list; rcu_read_unlock(); } diff --git a/kernel/groups.c b/kernel/groups.c index 253dc0f35cf..1cc476d52dd 100644 --- a/kernel/groups.c +++ b/kernel/groups.c @@ -233,7 +233,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) struct group_info *group_info; int retval; - if (!capable(CAP_SETGID)) + if (!nsown_capable(CAP_SETGID)) return -EPERM; if ((unsigned)gidsetsize > NGROUPS_MAX) return -EINVAL; diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index ac8a56e90bf..a05d191ffdd 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c @@ -75,16 +75,11 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, goto out_uts; } - new_nsp->ipc_ns = copy_ipcs(flags, tsk->nsproxy->ipc_ns); + new_nsp->ipc_ns = copy_ipcs(flags, tsk); if (IS_ERR(new_nsp->ipc_ns)) { err = PTR_ERR(new_nsp->ipc_ns); goto out_ipc; } - if (new_nsp->ipc_ns != tsk->nsproxy->ipc_ns) { - put_user_ns(new_nsp->ipc_ns->user_ns); - new_nsp->ipc_ns->user_ns = task_cred_xxx(tsk, user)->user_ns; - get_user_ns(new_nsp->ipc_ns->user_ns); - } new_nsp->pid_ns = copy_pid_ns(flags, task_active_pid_ns(tsk)); if (IS_ERR(new_nsp->pid_ns)) { diff --git a/kernel/sched.c b/kernel/sched.c index a172494a9a6..480adeb63f8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4892,8 +4892,11 @@ static bool check_same_owner(struct task_struct *p) rcu_read_lock(); pcred = __task_cred(p); - match = (cred->euid == pcred->euid || - cred->euid == pcred->uid); + if (cred->user->user_ns == pcred->user->user_ns) + match = (cred->euid == pcred->euid || + cred->euid == pcred->uid); + else + match = false; rcu_read_unlock(); return match; } @@ -5221,7 +5224,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) goto out_free_cpus_allowed; } retval = -EPERM; - if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) + if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE)) goto out_unlock; retval = security_task_setscheduler(p); diff --git a/kernel/uid16.c b/kernel/uid16.c index 419209893d8..51c6e89e861 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c @@ -189,7 +189,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist) struct group_info *group_info; int retval; - if (!capable(CAP_SETGID)) + if (!nsown_capable(CAP_SETGID)) return -EPERM; if ((unsigned)gidsetsize > NGROUPS_MAX) return -EINVAL; -- cgit v1.2.3-70-g09d2