summaryrefslogtreecommitdiffstats
path: root/fs/file.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2012-08-21 09:56:33 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2012-09-26 21:09:56 -0400
commit6a6d27de340c89c5323565b49f7851362619925d (patch)
treeb9b4ab23b43bd330584521917c9b2855c4a4e0cd /fs/file.c
parent723a1d77431b0c568730ffac4dd0bcbbd3400031 (diff)
take close-on-exec logics to fs/file.c, clean it up a bit
... and add cond_resched() there, while we are at it. We can get large latencies as is... Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/file.c')
-rw-r--r--fs/file.c37
1 files changed, 37 insertions, 0 deletions
diff --git a/fs/file.c b/fs/file.c
index fd4694e688a..92197dd9fdc 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -652,6 +652,43 @@ out_unlock:
return -EBADF;
}
+void do_close_on_exec(struct files_struct *files)
+{
+ unsigned i;
+ struct fdtable *fdt;
+
+ /* exec unshares first */
+ BUG_ON(atomic_read(&files->count) != 1);
+ spin_lock(&files->file_lock);
+ for (i = 0; ; i++) {
+ unsigned long set;
+ unsigned fd = i * BITS_PER_LONG;
+ fdt = files_fdtable(files);
+ if (fd >= fdt->max_fds)
+ break;
+ set = fdt->close_on_exec[i];
+ if (!set)
+ continue;
+ fdt->close_on_exec[i] = 0;
+ for ( ; set ; fd++, set >>= 1) {
+ struct file *file;
+ if (!(set & 1))
+ continue;
+ file = fdt->fd[fd];
+ if (!file)
+ continue;
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ __put_unused_fd(files, fd);
+ spin_unlock(&files->file_lock);
+ filp_close(file, files);
+ cond_resched();
+ spin_lock(&files->file_lock);
+ }
+
+ }
+ spin_unlock(&files->file_lock);
+}
+
struct file *fget(unsigned int fd)
{
struct file *file;