summaryrefslogtreecommitdiffstats
path: root/fs/exec.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/exec.c')
-rw-r--r--fs/exec.c212
1 files changed, 105 insertions, 107 deletions
diff --git a/fs/exec.c b/fs/exec.c
index 0b88bf64614..a8efe35176b 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -22,7 +22,6 @@
* formats.
*/
-#include <linux/config.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/mman.h>
@@ -487,8 +486,6 @@ struct file *open_exec(const char *name)
if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
S_ISREG(inode->i_mode)) {
int err = vfs_permission(&nd, MAY_EXEC);
- if (!err && !(inode->i_mode & 0111))
- err = -EACCES;
file = ERR_PTR(err);
if (!err) {
file = nameidata_to_filp(&nd, O_RDONLY);
@@ -598,7 +595,7 @@ static int de_thread(struct task_struct *tsk)
if (!newsighand)
return -ENOMEM;
- if (thread_group_empty(current))
+ if (thread_group_empty(tsk))
goto no_thread_group;
/*
@@ -623,17 +620,17 @@ static int de_thread(struct task_struct *tsk)
* Reparenting needs write_lock on tasklist_lock,
* so it is safe to do it under read_lock.
*/
- if (unlikely(current->group_leader == child_reaper))
- child_reaper = current;
+ if (unlikely(tsk->group_leader == child_reaper))
+ child_reaper = tsk;
- zap_other_threads(current);
+ zap_other_threads(tsk);
read_unlock(&tasklist_lock);
/*
* Account for the thread group leader hanging around:
*/
count = 1;
- if (!thread_group_leader(current)) {
+ if (!thread_group_leader(tsk)) {
count = 2;
/*
* The SIGALRM timer survives the exec, but needs to point
@@ -642,14 +639,14 @@ static int de_thread(struct task_struct *tsk)
* synchronize with any firing (by calling del_timer_sync)
* before we can safely let the old group leader die.
*/
- sig->tsk = current;
+ sig->tsk = tsk;
spin_unlock_irq(lock);
if (hrtimer_cancel(&sig->real_timer))
hrtimer_restart(&sig->real_timer);
spin_lock_irq(lock);
}
while (atomic_read(&sig->count) > count) {
- sig->group_exit_task = current;
+ sig->group_exit_task = tsk;
sig->notify_count = count;
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(lock);
@@ -665,15 +662,13 @@ static int de_thread(struct task_struct *tsk)
* do is to wait for the thread group leader to become inactive,
* and to assume its PID:
*/
- if (!thread_group_leader(current)) {
- struct dentry *proc_dentry1, *proc_dentry2;
-
+ if (!thread_group_leader(tsk)) {
/*
* Wait for the thread group leader to be a zombie.
* It should already be zombie at this point, most
* of the time.
*/
- leader = current->group_leader;
+ leader = tsk->group_leader;
while (leader->exit_state != EXIT_ZOMBIE)
yield();
@@ -687,16 +682,12 @@ static int de_thread(struct task_struct *tsk)
* When we take on its identity by switching to its PID, we
* also take its birthdate (always earlier than our own).
*/
- current->start_time = leader->start_time;
+ tsk->start_time = leader->start_time;
- spin_lock(&leader->proc_lock);
- spin_lock(&current->proc_lock);
- proc_dentry1 = proc_pid_unhash(current);
- proc_dentry2 = proc_pid_unhash(leader);
write_lock_irq(&tasklist_lock);
- BUG_ON(leader->tgid != current->tgid);
- BUG_ON(current->pid == current->tgid);
+ BUG_ON(leader->tgid != tsk->tgid);
+ BUG_ON(tsk->pid == tsk->tgid);
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
@@ -705,34 +696,26 @@ static int de_thread(struct task_struct *tsk)
*/
/* Become a process group leader with the old leader's pid.
- * Note: The old leader also uses thispid until release_task
+ * The old leader becomes a thread of the this thread group.
+ * Note: The old leader also uses this pid until release_task
* is called. Odd but simple and correct.
*/
- detach_pid(current, PIDTYPE_PID);
- current->pid = leader->pid;
- attach_pid(current, PIDTYPE_PID, current->pid);
- attach_pid(current, PIDTYPE_PGID, current->signal->pgrp);
- attach_pid(current, PIDTYPE_SID, current->signal->session);
- list_add_tail_rcu(&current->tasks, &init_task.tasks);
-
- current->group_leader = current;
- leader->group_leader = current;
+ detach_pid(tsk, PIDTYPE_PID);
+ tsk->pid = leader->pid;
+ attach_pid(tsk, PIDTYPE_PID, tsk->pid);
+ transfer_pid(leader, tsk, PIDTYPE_PGID);
+ transfer_pid(leader, tsk, PIDTYPE_SID);
+ list_replace_rcu(&leader->tasks, &tsk->tasks);
- /* Reduce leader to a thread */
- detach_pid(leader, PIDTYPE_PGID);
- detach_pid(leader, PIDTYPE_SID);
- list_del_init(&leader->tasks);
+ tsk->group_leader = tsk;
+ leader->group_leader = tsk;
- current->exit_signal = SIGCHLD;
+ tsk->exit_signal = SIGCHLD;
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD;
write_unlock_irq(&tasklist_lock);
- spin_unlock(&leader->proc_lock);
- spin_unlock(&current->proc_lock);
- proc_pid_flush(proc_dentry1);
- proc_pid_flush(proc_dentry2);
}
/*
@@ -765,9 +748,9 @@ no_thread_group:
write_lock_irq(&tasklist_lock);
spin_lock(&oldsighand->siglock);
- spin_lock(&newsighand->siglock);
+ spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
- rcu_assign_pointer(current->sighand, newsighand);
+ rcu_assign_pointer(tsk->sighand, newsighand);
recalc_sigpending();
spin_unlock(&newsighand->siglock);
@@ -778,7 +761,7 @@ no_thread_group:
kmem_cache_free(sighand_cachep, oldsighand);
}
- BUG_ON(!thread_group_leader(current));
+ BUG_ON(!thread_group_leader(tsk));
return 0;
}
@@ -915,8 +898,7 @@ int flush_old_exec(struct linux_binprm * bprm)
return 0;
mmap_failed:
- put_files_struct(current->files);
- current->files = files;
+ reset_files_struct(current, files);
out:
return retval;
}
@@ -934,12 +916,6 @@ int prepare_binprm(struct linux_binprm *bprm)
int retval;
mode = inode->i_mode;
- /*
- * Check execute perms again - if the caller has CAP_DAC_OVERRIDE,
- * generic_permission lets a non-executable through
- */
- if (!(mode & 0111)) /* with at least _one_ execute bit set */
- return -EACCES;
if (bprm->file->f_op == NULL)
return -EACCES;
@@ -1379,67 +1355,102 @@ static void format_corename(char *corename, const char *pattern, long signr)
*out_ptr = 0;
}
-static void zap_threads (struct mm_struct *mm)
+static void zap_process(struct task_struct *start)
{
- struct task_struct *g, *p;
- struct task_struct *tsk = current;
- struct completion *vfork_done = tsk->vfork_done;
- int traced = 0;
+ struct task_struct *t;
- /*
- * Make sure nobody is waiting for us to release the VM,
- * otherwise we can deadlock when we wait on each other
- */
- if (vfork_done) {
- tsk->vfork_done = NULL;
- complete(vfork_done);
- }
+ start->signal->flags = SIGNAL_GROUP_EXIT;
+ start->signal->group_stop_count = 0;
- read_lock(&tasklist_lock);
- do_each_thread(g,p)
- if (mm == p->mm && p != tsk) {
- force_sig_specific(SIGKILL, p);
- mm->core_waiters++;
- if (unlikely(p->ptrace) &&
- unlikely(p->parent->mm == mm))
- traced = 1;
+ t = start;
+ do {
+ if (t != current && t->mm) {
+ t->mm->core_waiters++;
+ sigaddset(&t->pending.signal, SIGKILL);
+ signal_wake_up(t, 1);
}
- while_each_thread(g,p);
+ } while ((t = next_thread(t)) != start);
+}
- read_unlock(&tasklist_lock);
+static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
+ int exit_code)
+{
+ struct task_struct *g, *p;
+ unsigned long flags;
+ int err = -EAGAIN;
+
+ spin_lock_irq(&tsk->sighand->siglock);
+ if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
+ tsk->signal->group_exit_code = exit_code;
+ zap_process(tsk);
+ err = 0;
+ }
+ spin_unlock_irq(&tsk->sighand->siglock);
+ if (err)
+ return err;
- if (unlikely(traced)) {
- /*
- * We are zapping a thread and the thread it ptraces.
- * If the tracee went into a ptrace stop for exit tracing,
- * we could deadlock since the tracer is waiting for this
- * coredump to finish. Detach them so they can both die.
- */
- write_lock_irq(&tasklist_lock);
- do_each_thread(g,p) {
- if (mm == p->mm && p != tsk &&
- p->ptrace && p->parent->mm == mm) {
- __ptrace_detach(p, 0);
+ if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
+ goto done;
+
+ rcu_read_lock();
+ for_each_process(g) {
+ if (g == tsk->group_leader)
+ continue;
+
+ p = g;
+ do {
+ if (p->mm) {
+ if (p->mm == mm) {
+ /*
+ * p->sighand can't disappear, but
+ * may be changed by de_thread()
+ */
+ lock_task_sighand(p, &flags);
+ zap_process(p);
+ unlock_task_sighand(p, &flags);
+ }
+ break;
}
- } while_each_thread(g,p);
- write_unlock_irq(&tasklist_lock);
+ } while ((p = next_thread(p)) != g);
}
+ rcu_read_unlock();
+done:
+ return mm->core_waiters;
}
-static void coredump_wait(struct mm_struct *mm)
+static int coredump_wait(int exit_code)
{
- DECLARE_COMPLETION(startup_done);
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ struct completion startup_done;
+ struct completion *vfork_done;
int core_waiters;
+ init_completion(&mm->core_done);
+ init_completion(&startup_done);
mm->core_startup_done = &startup_done;
- zap_threads(mm);
- core_waiters = mm->core_waiters;
+ core_waiters = zap_threads(tsk, mm, exit_code);
up_write(&mm->mmap_sem);
+ if (unlikely(core_waiters < 0))
+ goto fail;
+
+ /*
+ * Make sure nobody is waiting for us to release the VM,
+ * otherwise we can deadlock when we wait on each other
+ */
+ vfork_done = tsk->vfork_done;
+ if (vfork_done) {
+ tsk->vfork_done = NULL;
+ complete(vfork_done);
+ }
+
if (core_waiters)
wait_for_completion(&startup_done);
+fail:
BUG_ON(mm->core_waiters);
+ return core_waiters;
}
int do_coredump(long signr, int exit_code, struct pt_regs * regs)
@@ -1473,22 +1484,9 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs)
}
mm->dumpable = 0;
- retval = -EAGAIN;
- spin_lock_irq(&current->sighand->siglock);
- if (!(current->signal->flags & SIGNAL_GROUP_EXIT)) {
- current->signal->flags = SIGNAL_GROUP_EXIT;
- current->signal->group_exit_code = exit_code;
- current->signal->group_stop_count = 0;
- retval = 0;
- }
- spin_unlock_irq(&current->sighand->siglock);
- if (retval) {
- up_write(&mm->mmap_sem);
+ retval = coredump_wait(exit_code);
+ if (retval < 0)
goto fail;
- }
-
- init_completion(&mm->core_done);
- coredump_wait(mm);
/*
* Clear any false indication of pending signals that might