From a9e61e25f9d2e7e43bf17625f5cb56c9e0a89b17 Mon Sep 17 00:00:00 2001 From: Felix Blyakher Date: Tue, 31 Mar 2009 15:12:56 -0500 Subject: lockd: call locks_release_private to cleanup per-filesystem state For every lock request lockd creates a new file_lock object in nlmsvc_setgrantargs() by copying the passed in file_lock with locks_copy_lock(). A filesystem can attach it's own lock_operations vector to the file_lock. It has to be cleaned up at the end of the file_lock's life. However, lockd doesn't do it today, yet it asserts in nlmclnt_release_lockargs() that the per-filesystem state is clean. This patch fixes it by exporting locks_release_private() and adding it to nlmsvc_freegrantargs(), to be symmetrical to creating a file_lock in nlmsvc_setgrantargs(). Signed-off-by: Felix Blyakher Signed-off-by: J. Bruce Fields --- include/linux/fs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux/fs.h') diff --git a/include/linux/fs.h b/include/linux/fs.h index 5bed436f435..5ba615e8f53 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1108,6 +1108,7 @@ extern void locks_copy_lock(struct file_lock *, struct file_lock *); extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); extern void locks_remove_posix(struct file *, fl_owner_t); extern void locks_remove_flock(struct file *); +extern void locks_release_private(struct file_lock *); extern void posix_test_lock(struct file *, struct file_lock *); extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); extern int posix_lock_file_wait(struct file *, struct file_lock *); -- cgit v1.2.3-70-g09d2 From 6818173bd658439b83896a2a7586f64ab51bf29c Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 7 May 2009 15:37:36 +0200 Subject: splice: implement default splice_read method If f_op->splice_read() is not implemented, fall back to a plain read. Use vfs_readv() to read into previously allocated pages. This will allow splice and functions using splice, such as the loop device, to work on all filesystems. This includes "direct_io" files in fuse which bypass the page cache. Signed-off-by: Miklos Szeredi Signed-off-by: Jens Axboe --- drivers/block/loop.c | 11 +---- fs/coda/file.c | 9 ++-- fs/pipe.c | 14 ++++++ fs/read_write.c | 7 +-- fs/splice.c | 120 ++++++++++++++++++++++++++++++++++++++++++++-- include/linux/fs.h | 2 + include/linux/pipe_fs_i.h | 1 + 7 files changed, 140 insertions(+), 24 deletions(-) (limited to 'include/linux/fs.h') diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 9ca4bb01465..801f4ab8330 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -709,10 +709,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) goto out_putf; - /* new backing store needs to support loop (eg splice_read) */ - if (!inode->i_fop->splice_read) - goto out_putf; - /* size of the new backing store needs to be the same */ if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) goto out_putf; @@ -788,12 +784,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, error = -EINVAL; if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) { const struct address_space_operations *aops = mapping->a_ops; - /* - * If we can't read - sorry. If we only can't write - well, - * it's going to be read-only. - */ - if (!file->f_op->splice_read) - goto out_putf; + if (aops->write_begin) lo_flags |= LO_FLAGS_USE_AOPS; if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write) diff --git a/fs/coda/file.c b/fs/coda/file.c index 6a347fbc998..ffd42815fda 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -47,6 +47,8 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos, struct pipe_inode_info *pipe, size_t count, unsigned int flags) { + ssize_t (*splice_read)(struct file *, loff_t *, + struct pipe_inode_info *, size_t, unsigned int); struct coda_file_info *cfi; struct file *host_file; @@ -54,10 +56,11 @@ coda_file_splice_read(struct file *coda_file, loff_t *ppos, BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); host_file = cfi->cfi_container; - if (!host_file->f_op || !host_file->f_op->splice_read) - return -EINVAL; + splice_read = host_file->f_op->splice_read; + if (!splice_read) + splice_read = default_file_splice_read; - return host_file->f_op->splice_read(host_file, ppos, pipe, count,flags); + return splice_read(host_file, ppos, pipe, count, flags); } static ssize_t diff --git a/fs/pipe.c b/fs/pipe.c index 13414ec45b8..f7dd21ad85a 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -302,6 +302,20 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *info, return 0; } +/** + * generic_pipe_buf_release - put a reference to a &struct pipe_buffer + * @pipe: the pipe that the buffer belongs to + * @buf: the buffer to put a reference to + * + * Description: + * This function releases a reference to @buf. + */ +void generic_pipe_buf_release(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +{ + page_cache_release(buf->page); +} + static const struct pipe_buf_operations anon_pipe_buf_ops = { .can_merge = 1, .map = generic_pipe_buf_map, diff --git a/fs/read_write.c b/fs/read_write.c index 9d1e76bb9ee..6c8c55dec2b 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -805,12 +805,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, goto out; if (!(in_file->f_mode & FMODE_READ)) goto fput_in; - retval = -EINVAL; - in_inode = in_file->f_path.dentry->d_inode; - if (!in_inode) - goto fput_in; - if (!in_file->f_op || !in_file->f_op->splice_read) - goto fput_in; retval = -ESPIPE; if (!ppos) ppos = &in_file->f_pos; @@ -834,6 +828,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, retval = -EINVAL; if (!out_file->f_op || !out_file->f_op->sendpage) goto fput_out; + in_inode = in_file->f_path.dentry->d_inode; out_inode = out_file->f_path.dentry->d_inode; retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count); if (retval < 0) diff --git a/fs/splice.c b/fs/splice.c index e405cf552f5..3bd9cb21b38 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -507,9 +507,116 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, return ret; } - EXPORT_SYMBOL(generic_file_splice_read); +static const struct pipe_buf_operations default_pipe_buf_ops = { + .can_merge = 0, + .map = generic_pipe_buf_map, + .unmap = generic_pipe_buf_unmap, + .confirm = generic_pipe_buf_confirm, + .release = generic_pipe_buf_release, + .steal = generic_pipe_buf_steal, + .get = generic_pipe_buf_get, +}; + +static ssize_t kernel_readv(struct file *file, const struct iovec *vec, + unsigned long vlen, loff_t offset) +{ + mm_segment_t old_fs; + loff_t pos = offset; + ssize_t res; + + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ + res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); + set_fs(old_fs); + + return res; +} + +ssize_t default_file_splice_read(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags) +{ + unsigned int nr_pages; + unsigned int nr_freed; + size_t offset; + struct page *pages[PIPE_BUFFERS]; + struct partial_page partial[PIPE_BUFFERS]; + struct iovec vec[PIPE_BUFFERS]; + pgoff_t index; + ssize_t res; + size_t this_len; + int error; + int i; + struct splice_pipe_desc spd = { + .pages = pages, + .partial = partial, + .flags = flags, + .ops = &default_pipe_buf_ops, + .spd_release = spd_release_page, + }; + + index = *ppos >> PAGE_CACHE_SHIFT; + offset = *ppos & ~PAGE_CACHE_MASK; + nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + + for (i = 0; i < nr_pages && i < PIPE_BUFFERS && len; i++) { + struct page *page; + + page = alloc_page(GFP_HIGHUSER); + error = -ENOMEM; + if (!page) + goto err; + + this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); + vec[i].iov_base = (void __user *) kmap(page); + vec[i].iov_len = this_len; + pages[i] = page; + spd.nr_pages++; + len -= this_len; + offset = 0; + } + + res = kernel_readv(in, vec, spd.nr_pages, *ppos); + if (res < 0) + goto err; + + error = 0; + if (!res) + goto err; + + nr_freed = 0; + for (i = 0; i < spd.nr_pages; i++) { + kunmap(pages[i]); + this_len = min_t(size_t, vec[i].iov_len, res); + partial[i].offset = 0; + partial[i].len = this_len; + if (!this_len) { + __free_page(pages[i]); + pages[i] = NULL; + nr_freed++; + } + res -= this_len; + } + spd.nr_pages -= nr_freed; + + res = splice_to_pipe(pipe, &spd); + if (res > 0) + *ppos += res; + + return res; + +err: + for (i = 0; i < spd.nr_pages; i++) { + kunmap(pages[i]); + __free_page(pages[i]); + } + return error; +} +EXPORT_SYMBOL(default_file_splice_read); + /* * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' * using sendpage(). Return the number of bytes sent. @@ -933,11 +1040,10 @@ static long do_splice_to(struct file *in, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { + ssize_t (*splice_read)(struct file *, loff_t *, + struct pipe_inode_info *, size_t, unsigned int); int ret; - if (unlikely(!in->f_op || !in->f_op->splice_read)) - return -EINVAL; - if (unlikely(!(in->f_mode & FMODE_READ))) return -EBADF; @@ -945,7 +1051,11 @@ static long do_splice_to(struct file *in, loff_t *ppos, if (unlikely(ret < 0)) return ret; - return in->f_op->splice_read(in, ppos, pipe, len, flags); + splice_read = in->f_op->splice_read; + if (!splice_read) + splice_read = default_file_splice_read; + + return splice_read(in, ppos, pipe, len, flags); } /** diff --git a/include/linux/fs.h b/include/linux/fs.h index 5bed436f435..d926c2bea16 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2204,6 +2204,8 @@ extern int generic_segment_checks(const struct iovec *iov, /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); +extern ssize_t default_file_splice_read(struct file *, loff_t *, + struct pipe_inode_info *, size_t, unsigned int); extern ssize_t generic_file_splice_write(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index c8f038554e8..b43a9e03905 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); +void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); #endif -- cgit v1.2.3-70-g09d2 From 3be25f49b9d6a97eae9bcb96d3292072b7658bd8 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 21 May 2009 17:01:26 -0400 Subject: fsnotify: add marks to inodes so groups can interpret how to handle those inodes This patch creates a way for fsnotify groups to attach marks to inodes. These marks have little meaning to the generic fsnotify infrastructure and thus their meaning should be interpreted by the group that attached them to the inode's list. dnotify and inotify will make use of these markings to indicate which inodes are of interest to their respective groups. But this implementation has the useful property that in the future other listeners could actually use the marks for the exact opposite reason, aka to indicate which inodes it had NO interest in. Signed-off-by: Eric Paris Acked-by: Al Viro Cc: Christoph Hellwig --- fs/inode.c | 9 ++ fs/notify/Makefile | 2 +- fs/notify/fsnotify.c | 13 ++ fs/notify/fsnotify.h | 5 + fs/notify/group.c | 49 +++++- fs/notify/inode_mark.c | 329 +++++++++++++++++++++++++++++++++++++++ include/linux/fs.h | 5 + include/linux/fsnotify.h | 9 ++ include/linux/fsnotify_backend.h | 65 +++++++- 9 files changed, 483 insertions(+), 3 deletions(-) create mode 100644 fs/notify/inode_mark.c (limited to 'include/linux/fs.h') diff --git a/fs/inode.c b/fs/inode.c index bca0c618fdb..54c63ce3de2 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -189,6 +190,10 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) inode->i_private = NULL; inode->i_mapping = mapping; +#ifdef CONFIG_FSNOTIFY + inode->i_fsnotify_mask = 0; +#endif + return inode; out_free_security: @@ -221,6 +226,7 @@ void destroy_inode(struct inode *inode) BUG_ON(inode_has_buffers(inode)); ima_inode_free(inode); security_inode_free(inode); + fsnotify_inode_delete(inode); if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); else @@ -252,6 +258,9 @@ void inode_init_once(struct inode *inode) INIT_LIST_HEAD(&inode->inotify_watches); mutex_init(&inode->inotify_mutex); #endif +#ifdef CONFIG_FSNOTIFY + INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries); +#endif } EXPORT_SYMBOL(inode_init_once); diff --git a/fs/notify/Makefile b/fs/notify/Makefile index db5467b5b58..0922cc826c4 100644 --- a/fs/notify/Makefile +++ b/fs/notify/Makefile @@ -1,4 +1,4 @@ -obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o +obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o obj-y += dnotify/ obj-y += inotify/ diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 56bee0f10c3..d5654629c65 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -25,6 +25,15 @@ #include #include "fsnotify.h" +/* + * Clear all of the marks on an inode when it is being evicted from core + */ +void __fsnotify_inode_delete(struct inode *inode) +{ + fsnotify_clear_marks_by_inode(inode); +} +EXPORT_SYMBOL_GPL(__fsnotify_inode_delete); + /* * This is the main call to fsnotify. The VFS calls into hook specific functions * in linux/fsnotify.h. Those functions then in turn call here. Here will call @@ -43,6 +52,8 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is) if (!(mask & fsnotify_mask)) return; + if (!(mask & to_tell->i_fsnotify_mask)) + return; /* * SRCU!! the groups list is very very much read only and the path is * very hot. The VAST majority of events are not going to need to do @@ -51,6 +62,8 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is) idx = srcu_read_lock(&fsnotify_grp_srcu); list_for_each_entry_rcu(group, &fsnotify_groups, group_list) { if (mask & group->mask) { + if (!group->ops->should_send_event(group, to_tell, mask)) + continue; if (!event) { event = fsnotify_create_event(to_tell, mask, data, data_is); /* shit, we OOM'd and now we can't tell, maybe diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h index c6a8bd47657..8ebcbe893c9 100644 --- a/fs/notify/fsnotify.h +++ b/fs/notify/fsnotify.h @@ -12,4 +12,9 @@ extern struct srcu_struct fsnotify_grp_srcu; extern struct list_head fsnotify_groups; /* all bitwise OR of all event types (FS_*) for all fsnotify_groups */ extern __u32 fsnotify_mask; + +/* final kfree of a group */ +extern void fsnotify_final_destroy_group(struct fsnotify_group *group); +/* run the list of all marks associated with inode and flag them to be freed */ +extern void fsnotify_clear_marks_by_inode(struct inode *inode); #endif /* __FS_NOTIFY_FSNOTIFY_H_ */ diff --git a/fs/notify/group.c b/fs/notify/group.c index c6812953b96..a29d2fa6792 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -54,6 +54,29 @@ void fsnotify_recalc_global_mask(void) fsnotify_mask = mask; } +/* + * Update the group->mask by running all of the marks associated with this + * group and finding the bitwise | of all of the mark->mask. If we change + * the group->mask we need to update the global mask of events interesting + * to the system. + */ +void fsnotify_recalc_group_mask(struct fsnotify_group *group) +{ + __u32 mask = 0; + __u32 old_mask = group->mask; + struct fsnotify_mark_entry *entry; + + spin_lock(&group->mark_lock); + list_for_each_entry(entry, &group->mark_entries, g_list) + mask |= entry->mask; + spin_unlock(&group->mark_lock); + + group->mask = mask; + + if (old_mask != mask) + fsnotify_recalc_global_mask(); +} + /* * Take a reference to a group so things found under the fsnotify_grp_mutex * can't get freed under us @@ -66,7 +89,7 @@ static void fsnotify_get_group(struct fsnotify_group *group) /* * Final freeing of a group */ -static void fsnotify_destroy_group(struct fsnotify_group *group) +void fsnotify_final_destroy_group(struct fsnotify_group *group) { if (group->ops->free_group_priv) group->ops->free_group_priv(group); @@ -74,6 +97,24 @@ static void fsnotify_destroy_group(struct fsnotify_group *group) kfree(group); } +/* + * Trying to get rid of a group. We need to first get rid of any outstanding + * allocations and then free the group. Remember that fsnotify_clear_marks_by_group + * could miss marks that are being freed by inode and those marks could still + * hold a reference to this group (via group->num_marks) If we get into that + * situtation, the fsnotify_final_destroy_group will get called when that final + * mark is freed. + */ +static void fsnotify_destroy_group(struct fsnotify_group *group) +{ + /* clear all inode mark entries for this group */ + fsnotify_clear_marks_by_group(group); + + /* past the point of no return, matches the initial value of 1 */ + if (atomic_dec_and_test(&group->num_marks)) + fsnotify_final_destroy_group(group); +} + /* * Remove this group from the global list of groups that will get events * this can be done even if there are still references and things still using @@ -173,6 +214,10 @@ struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask, group->group_num = group_num; group->mask = mask; + spin_lock_init(&group->mark_lock); + atomic_set(&group->num_marks, 0); + INIT_LIST_HEAD(&group->mark_entries); + group->ops = ops; mutex_lock(&fsnotify_grp_mutex); @@ -188,6 +233,8 @@ struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask, /* group not found, add a new one */ list_add_rcu(&group->group_list, &fsnotify_groups); group->on_group_list = 1; + /* being on the fsnotify_groups list holds one num_marks */ + atomic_inc(&group->num_marks); mutex_unlock(&fsnotify_grp_mutex); diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c new file mode 100644 index 00000000000..cdc15414697 --- /dev/null +++ b/fs/notify/inode_mark.c @@ -0,0 +1,329 @@ +/* + * Copyright (C) 2008 Red Hat, Inc., Eric Paris + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* + * fsnotify inode mark locking/lifetime/and refcnting + * + * REFCNT: + * The mark->refcnt tells how many "things" in the kernel currently are + * referencing this object. The object typically will live inside the kernel + * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task + * which can find this object holding the appropriete locks, can take a reference + * and the object itself is guarenteed to survive until the reference is dropped. + * + * LOCKING: + * There are 3 spinlocks involved with fsnotify inode marks and they MUST + * be taken in order as follows: + * + * entry->lock + * group->mark_lock + * inode->i_lock + * + * entry->lock protects 2 things, entry->group and entry->inode. You must hold + * that lock to dereference either of these things (they could be NULL even with + * the lock) + * + * group->mark_lock protects the mark_entries list anchored inside a given group + * and each entry is hooked via the g_list. It also sorta protects the + * free_g_list, which when used is anchored by a private list on the stack of the + * task which held the group->mark_lock. + * + * inode->i_lock protects the i_fsnotify_mark_entries list anchored inside a + * given inode and each entry is hooked via the i_list. (and sorta the + * free_i_list) + * + * + * LIFETIME: + * Inode marks survive between when they are added to an inode and when their + * refcnt==0. + * + * The inode mark can be cleared for a number of different reasons including: + * - The inode is unlinked for the last time. (fsnotify_inode_remove) + * - The inode is being evicted from cache. (fsnotify_inode_delete) + * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes) + * - Something explicitly requests that it be removed. (fsnotify_destroy_mark_by_entry) + * - The fsnotify_group associated with the mark is going away and all such marks + * need to be cleaned up. (fsnotify_clear_marks_by_group) + * + * Worst case we are given an inode and need to clean up all the marks on that + * inode. We take i_lock and walk the i_fsnotify_mark_entries safely. For each + * mark on the list we take a reference (so the mark can't disappear under us). + * We remove that mark form the inode's list of marks and we add this mark to a + * private list anchored on the stack using i_free_list; At this point we no + * longer fear anything finding the mark using the inode's list of marks. + * + * We can safely and locklessly run the private list on the stack of everything + * we just unattached from the original inode. For each mark on the private list + * we grab the mark-> and can thus dereference mark->group and mark->inode. If + * we see the group and inode are not NULL we take those locks. Now holding all + * 3 locks we can completely remove the mark from other tasks finding it in the + * future. Remember, 10 things might already be referencing this mark, but they + * better be holding a ref. We drop our reference we took before we unhooked it + * from the inode. When the ref hits 0 we can free the mark. + * + * Very similarly for freeing by group, except we use free_g_list. + * + * This has the very interesting property of being able to run concurrently with + * any (or all) other directions. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include "fsnotify.h" + +void fsnotify_get_mark(struct fsnotify_mark_entry *entry) +{ + atomic_inc(&entry->refcnt); +} + +void fsnotify_put_mark(struct fsnotify_mark_entry *entry) +{ + if (atomic_dec_and_test(&entry->refcnt)) + entry->free_mark(entry); +} + +/* + * Recalculate the mask of events relevant to a given inode locked. + */ +static void fsnotify_recalc_inode_mask_locked(struct inode *inode) +{ + struct fsnotify_mark_entry *entry; + struct hlist_node *pos; + __u32 new_mask = 0; + + assert_spin_locked(&inode->i_lock); + + hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) + new_mask |= entry->mask; + inode->i_fsnotify_mask = new_mask; +} + +/* + * Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types + * any notifier is interested in hearing for this inode. + */ +void fsnotify_recalc_inode_mask(struct inode *inode) +{ + spin_lock(&inode->i_lock); + fsnotify_recalc_inode_mask_locked(inode); + spin_unlock(&inode->i_lock); +} + +/* + * Any time a mark is getting freed we end up here. + * The caller had better be holding a reference to this mark so we don't actually + * do the final put under the entry->lock + */ +void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry) +{ + struct fsnotify_group *group; + struct inode *inode; + + spin_lock(&entry->lock); + + group = entry->group; + inode = entry->inode; + + BUG_ON(group && !inode); + BUG_ON(!group && inode); + + /* if !group something else already marked this to die */ + if (!group) { + spin_unlock(&entry->lock); + return; + } + + /* 1 from caller and 1 for being on i_list/g_list */ + BUG_ON(atomic_read(&entry->refcnt) < 2); + + spin_lock(&group->mark_lock); + spin_lock(&inode->i_lock); + + hlist_del_init(&entry->i_list); + entry->inode = NULL; + + list_del_init(&entry->g_list); + entry->group = NULL; + + fsnotify_put_mark(entry); /* for i_list and g_list */ + + /* + * this mark is now off the inode->i_fsnotify_mark_entries list and we + * hold the inode->i_lock, so this is the perfect time to update the + * inode->i_fsnotify_mask + */ + fsnotify_recalc_inode_mask_locked(inode); + + spin_unlock(&inode->i_lock); + spin_unlock(&group->mark_lock); + spin_unlock(&entry->lock); + + /* + * Some groups like to know that marks are being freed. This is a + * callback to the group function to let it know that this entry + * is being freed. + */ + group->ops->freeing_mark(entry, group); + + /* + * it's possible that this group tried to destroy itself, but this + * this mark was simultaneously being freed by inode. If that's the + * case, we finish freeing the group here. + */ + if (unlikely(atomic_dec_and_test(&group->num_marks))) + fsnotify_final_destroy_group(group); +} + +/* + * Given a group, destroy all of the marks associated with that group. + */ +void fsnotify_clear_marks_by_group(struct fsnotify_group *group) +{ + struct fsnotify_mark_entry *lentry, *entry; + LIST_HEAD(free_list); + + spin_lock(&group->mark_lock); + list_for_each_entry_safe(entry, lentry, &group->mark_entries, g_list) { + list_add(&entry->free_g_list, &free_list); + list_del_init(&entry->g_list); + fsnotify_get_mark(entry); + } + spin_unlock(&group->mark_lock); + + list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) { + fsnotify_destroy_mark_by_entry(entry); + fsnotify_put_mark(entry); + } +} + +/* + * Given an inode, destroy all of the marks associated with that inode. + */ +void fsnotify_clear_marks_by_inode(struct inode *inode) +{ + struct fsnotify_mark_entry *entry, *lentry; + struct hlist_node *pos, *n; + LIST_HEAD(free_list); + + spin_lock(&inode->i_lock); + hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i_list) { + list_add(&entry->free_i_list, &free_list); + hlist_del_init(&entry->i_list); + fsnotify_get_mark(entry); + } + spin_unlock(&inode->i_lock); + + list_for_each_entry_safe(entry, lentry, &free_list, free_i_list) { + fsnotify_destroy_mark_by_entry(entry); + fsnotify_put_mark(entry); + } +} + +/* + * given a group and inode, find the mark associated with that combination. + * if found take a reference to that mark and return it, else return NULL + */ +struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, + struct inode *inode) +{ + struct fsnotify_mark_entry *entry; + struct hlist_node *pos; + + assert_spin_locked(&inode->i_lock); + + hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) { + if (entry->group == group) { + fsnotify_get_mark(entry); + return entry; + } + } + return NULL; +} + +/* + * Nothing fancy, just initialize lists and locks and counters. + */ +void fsnotify_init_mark(struct fsnotify_mark_entry *entry, + void (*free_mark)(struct fsnotify_mark_entry *entry)) + +{ + spin_lock_init(&entry->lock); + atomic_set(&entry->refcnt, 1); + INIT_HLIST_NODE(&entry->i_list); + entry->group = NULL; + entry->mask = 0; + entry->inode = NULL; + entry->free_mark = free_mark; +} + +/* + * Attach an initialized mark entry to a given group and inode. + * These marks may be used for the fsnotify backend to determine which + * event types should be delivered to which group and for which inodes. + */ +int fsnotify_add_mark(struct fsnotify_mark_entry *entry, + struct fsnotify_group *group, struct inode *inode) +{ + struct fsnotify_mark_entry *lentry; + int ret = 0; + + /* + * LOCKING ORDER!!!! + * entry->lock + * group->mark_lock + * inode->i_lock + */ + spin_lock(&entry->lock); + spin_lock(&group->mark_lock); + spin_lock(&inode->i_lock); + + entry->group = group; + entry->inode = inode; + + lentry = fsnotify_find_mark_entry(group, inode); + if (!lentry) { + hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries); + list_add(&entry->g_list, &group->mark_entries); + + fsnotify_get_mark(entry); /* for i_list and g_list */ + + atomic_inc(&group->num_marks); + + fsnotify_recalc_inode_mask_locked(inode); + } + + spin_unlock(&inode->i_lock); + spin_unlock(&group->mark_lock); + spin_unlock(&entry->lock); + + if (lentry) { + ret = -EEXIST; + fsnotify_put_mark(lentry); + } + + return ret; +} diff --git a/include/linux/fs.h b/include/linux/fs.h index 83d6b439724..275b0860044 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -755,6 +755,11 @@ struct inode { __u32 i_generation; +#ifdef CONFIG_FSNOTIFY + __u32 i_fsnotify_mask; /* all events this inode cares about */ + struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */ +#endif + #ifdef CONFIG_DNOTIFY unsigned long i_dnotify_mask; /* Directory notify events */ struct dnotify_struct *i_dnotify; /* for directory notifications */ diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 6c9ebefdac8..3856eb6e597 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -99,6 +99,14 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, audit_inode_child(new_name, moved, new_dir); } +/* + * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed + */ +static inline void fsnotify_inode_delete(struct inode *inode) +{ + __fsnotify_inode_delete(inode); +} + /* * fsnotify_nameremove - a filename was removed from a directory */ @@ -121,6 +129,7 @@ static inline void fsnotify_inoderemove(struct inode *inode) inotify_inode_is_dead(inode); fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE); + __fsnotify_inode_delete(inode); } /* diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 1a55718b38a..cad5c4d75c1 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -48,17 +48,25 @@ struct fsnotify_group; struct fsnotify_event; +struct fsnotify_mark_entry; /* * Each group much define these ops. The fsnotify infrastructure will call * these operations for each relevant group. * + * should_send_event - given a group, inode, and mask this function determines + * if the group is interested in this event. * handle_event - main call for a group to handle an fs event * free_group_priv - called when a group refcnt hits 0 to clean up the private union + * freeing-mark - this means that a mark has been flagged to die when everything + * finishes using it. The function is supplied with what must be a + * valid group and inode to use to clean up. */ struct fsnotify_ops { + bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask); int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event); void (*free_group_priv)(struct fsnotify_group *group); + void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group); }; /* @@ -97,7 +105,14 @@ struct fsnotify_group { const struct fsnotify_ops *ops; /* how this group handles things */ - /* prevents double list_del of group_list. protected by global fsnotify_gr_mutex */ + /* stores all fastapth entries assoc with this group so they can be cleaned on unregister */ + spinlock_t mark_lock; /* protect mark_entries list */ + atomic_t num_marks; /* 1 for each mark entry and 1 for not being + * past the point of no return when freeing + * a group */ + struct list_head mark_entries; /* all inode mark entries for this group */ + + /* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */ bool on_group_list; /* groups can define private fields here or use the void *private */ @@ -137,12 +152,38 @@ struct fsnotify_event { __u32 mask; /* the type of access, bitwise OR for FS_* event types */ }; +/* + * a mark is simply an entry attached to an in core inode which allows an + * fsnotify listener to indicate they are either no longer interested in events + * of a type matching mask or only interested in those events. + * + * these are flushed when an inode is evicted from core and may be flushed + * when the inode is modified (as seen by fsnotify_access). Some fsnotify users + * (such as dnotify) will flush these when the open fd is closed and not at + * inode eviction or modification. + */ +struct fsnotify_mark_entry { + __u32 mask; /* mask this mark entry is for */ + /* we hold ref for each i_list and g_list. also one ref for each 'thing' + * in kernel that found and may be using this mark. */ + atomic_t refcnt; /* active things looking at this mark */ + struct inode *inode; /* inode this entry is associated with */ + struct fsnotify_group *group; /* group this mark entry is for */ + struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */ + struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */ + spinlock_t lock; /* protect group, inode, and killme */ + struct list_head free_i_list; /* tmp list used when freeing this mark */ + struct list_head free_g_list; /* tmp list used when freeing this mark */ + void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */ +}; + #ifdef CONFIG_FSNOTIFY /* called from the vfs helpers */ /* main fsnotify call to send events */ extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is); +extern void __fsnotify_inode_delete(struct inode *inode); /* called from fsnotify listeners, such as fanotify or dnotify */ @@ -153,6 +194,8 @@ extern void fsnotify_recalc_global_mask(void); extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask, const struct fsnotify_ops *ops); +/* run all marks associated with this group and update group->mask */ +extern void fsnotify_recalc_group_mask(struct fsnotify_group *group); /* drop reference on a group from fsnotify_obtain_group */ extern void fsnotify_put_group(struct fsnotify_group *group); @@ -163,6 +206,22 @@ extern void fsnotify_put_event(struct fsnotify_event *event); extern struct fsnotify_event_private_data *fsnotify_get_priv_from_event(struct fsnotify_group *group, struct fsnotify_event *event); +/* functions used to manipulate the marks attached to inodes */ + +/* run all marks associated with an inode and update inode->i_fsnotify_mask */ +extern void fsnotify_recalc_inode_mask(struct inode *inode); +extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry)); +/* find (and take a reference) to a mark associated with group and inode */ +extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode); +/* attach the mark to both the group and the inode */ +extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode); +/* given a mark, flag it to be freed when all references are dropped */ +extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry); +/* run all the marks in a group, and flag them to be freed */ +extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); +extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry); +extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry); + /* put here because inotify does some weird stuff when destroying watches */ extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, int data_is); @@ -170,6 +229,10 @@ extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is) {} + +static inline void __fsnotify_inode_delete(struct inode *inode) +{} + #endif /* CONFIG_FSNOTIFY */ #endif /* __KERNEL __ */ -- cgit v1.2.3-70-g09d2 From 3c5119c05d624f95f4967d16b38c9624b816bdb9 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Thu, 21 May 2009 17:01:33 -0400 Subject: dnotify: reimplement dnotify using fsnotify Reimplement dnotify using fsnotify. Signed-off-by: Eric Paris Acked-by: Al Viro Cc: Christoph Hellwig --- MAINTAINERS | 6 +- fs/notify/dnotify/Kconfig | 1 + fs/notify/dnotify/dnotify.c | 469 ++++++++++++++++++++++++++++++--------- include/linux/dnotify.h | 29 +-- include/linux/fs.h | 5 - include/linux/fsnotify.h | 68 ++---- include/linux/fsnotify_backend.h | 3 + 7 files changed, 398 insertions(+), 183 deletions(-) (limited to 'include/linux/fs.h') diff --git a/MAINTAINERS b/MAINTAINERS index ccdb57524e3..96e0c8c6079 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1802,10 +1802,10 @@ F: drivers/char/epca* F: drivers/char/digi* DIRECTORY NOTIFICATION (DNOTIFY) -P: Stephen Rothwell -M: sfr@canb.auug.org.au +P: Eric Paris +M: eparis@parisplace.org L: linux-kernel@vger.kernel.org -S: Supported +S: Maintained F: Documentation/filesystems/dnotify.txt F: fs/notify/dnotify/ F: include/linux/dnotify.h diff --git a/fs/notify/dnotify/Kconfig b/fs/notify/dnotify/Kconfig index 26adf5dfa64..904ff8d5405 100644 --- a/fs/notify/dnotify/Kconfig +++ b/fs/notify/dnotify/Kconfig @@ -1,5 +1,6 @@ config DNOTIFY bool "Dnotify support" + depends on FSNOTIFY default y help Dnotify is a directory-based per-fd file change notification system diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index b0aa2cde80b..d9d80f502c6 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -3,6 +3,9 @@ * * Copyright (C) 2000,2001,2002 Stephen Rothwell * + * Copyright (C) 2009 Eric Paris + * dnotify was largly rewritten to use the new fsnotify infrastructure + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any @@ -21,24 +24,178 @@ #include #include #include +#include int dir_notify_enable __read_mostly = 1; -static struct kmem_cache *dn_cache __read_mostly; +static struct kmem_cache *dnotify_struct_cache __read_mostly; +static struct kmem_cache *dnotify_mark_entry_cache __read_mostly; +static struct fsnotify_group *dnotify_group __read_mostly; +static DEFINE_MUTEX(dnotify_mark_mutex); + +/* + * dnotify will attach one of these to each inode (i_fsnotify_mark_entries) which + * is being watched by dnotify. If multiple userspace applications are watching + * the same directory with dnotify their information is chained in dn + */ +struct dnotify_mark_entry { + struct fsnotify_mark_entry fsn_entry; + struct dnotify_struct *dn; +}; -static void redo_inode_mask(struct inode *inode) +/* + * When a process starts or stops watching an inode the set of events which + * dnotify cares about for that inode may change. This function runs the + * list of everything receiving dnotify events about this directory and calculates + * the set of all those events. After it updates what dnotify is interested in + * it calls the fsnotify function so it can update the set of all events relevant + * to this inode. + */ +static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry) { - unsigned long new_mask; + __u32 new_mask, old_mask; struct dnotify_struct *dn; + struct dnotify_mark_entry *dnentry = container_of(entry, + struct dnotify_mark_entry, + fsn_entry); + + assert_spin_locked(&entry->lock); + old_mask = entry->mask; new_mask = 0; - for (dn = inode->i_dnotify; dn != NULL; dn = dn->dn_next) - new_mask |= dn->dn_mask & ~DN_MULTISHOT; - inode->i_dnotify_mask = new_mask; + for (dn = dnentry->dn; dn != NULL; dn = dn->dn_next) + new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); + entry->mask = new_mask; + + if (old_mask == new_mask) + return; + + if (entry->inode) + fsnotify_recalc_inode_mask(entry->inode); } +/* + * Mains fsnotify call where events are delivered to dnotify. + * Find the dnotify mark on the relevant inode, run the list of dnotify structs + * on that mark and determine which of them has expressed interest in receiving + * events of this type. When found send the correct process and signal and + * destroy the dnotify struct if it was not registered to receive multiple + * events. + */ +static int dnotify_handle_event(struct fsnotify_group *group, + struct fsnotify_event *event) +{ + struct fsnotify_mark_entry *entry = NULL; + struct dnotify_mark_entry *dnentry; + struct inode *to_tell; + struct dnotify_struct *dn; + struct dnotify_struct **prev; + struct fown_struct *fown; + + to_tell = event->to_tell; + + spin_lock(&to_tell->i_lock); + entry = fsnotify_find_mark_entry(group, to_tell); + spin_unlock(&to_tell->i_lock); + + /* unlikely since we alreay passed dnotify_should_send_event() */ + if (unlikely(!entry)) + return 0; + dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry); + + spin_lock(&entry->lock); + prev = &dnentry->dn; + while ((dn = *prev) != NULL) { + if ((dn->dn_mask & event->mask) == 0) { + prev = &dn->dn_next; + continue; + } + fown = &dn->dn_filp->f_owner; + send_sigio(fown, dn->dn_fd, POLL_MSG); + if (dn->dn_mask & FS_DN_MULTISHOT) + prev = &dn->dn_next; + else { + *prev = dn->dn_next; + kmem_cache_free(dnotify_struct_cache, dn); + dnotify_recalc_inode_mask(entry); + } + } + + spin_unlock(&entry->lock); + fsnotify_put_mark(entry); + + return 0; +} + +/* + * Given an inode and mask determine if dnotify would be interested in sending + * userspace notification for that pair. + */ +static bool dnotify_should_send_event(struct fsnotify_group *group, + struct inode *inode, __u32 mask) +{ + struct fsnotify_mark_entry *entry; + bool send; + + /* !dir_notify_enable should never get here, don't waste time checking + if (!dir_notify_enable) + return 0; */ + + /* not a dir, dnotify doesn't care */ + if (!S_ISDIR(inode->i_mode)) + return false; + + spin_lock(&inode->i_lock); + entry = fsnotify_find_mark_entry(group, inode); + spin_unlock(&inode->i_lock); + + /* no mark means no dnotify watch */ + if (!entry) + return false; + + spin_lock(&entry->lock); + send = (mask & entry->mask) ? true : false; + spin_unlock(&entry->lock); + fsnotify_put_mark(entry); /* matches fsnotify_find_mark_entry */ + + return send; +} + +static void dnotify_freeing_mark(struct fsnotify_mark_entry *entry, + struct fsnotify_group *group) +{ + /* dnotify doesn't care than an inode is on the way out */ +} + +static void dnotify_free_mark(struct fsnotify_mark_entry *entry) +{ + struct dnotify_mark_entry *dnentry = container_of(entry, + struct dnotify_mark_entry, + fsn_entry); + + BUG_ON(dnentry->dn); + + kmem_cache_free(dnotify_mark_entry_cache, dnentry); +} + +static struct fsnotify_ops dnotify_fsnotify_ops = { + .handle_event = dnotify_handle_event, + .should_send_event = dnotify_should_send_event, + .free_group_priv = NULL, + .freeing_mark = dnotify_freeing_mark, +}; + +/* + * Called every time a file is closed. Looks first for a dnotify mark on the + * inode. If one is found run all of the ->dn entries attached to that + * mark for one relevant to this process closing the file and remove that + * dnotify_struct. If that was the last dnotify_struct also remove the + * fsnotify_mark_entry. + */ void dnotify_flush(struct file *filp, fl_owner_t id) { + struct fsnotify_mark_entry *entry; + struct dnotify_mark_entry *dnentry; struct dnotify_struct *dn; struct dnotify_struct **prev; struct inode *inode; @@ -46,145 +203,243 @@ void dnotify_flush(struct file *filp, fl_owner_t id) inode = filp->f_path.dentry->d_inode; if (!S_ISDIR(inode->i_mode)) return; + spin_lock(&inode->i_lock); - prev = &inode->i_dnotify; + entry = fsnotify_find_mark_entry(dnotify_group, inode); + spin_unlock(&inode->i_lock); + if (!entry) + return; + dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry); + + mutex_lock(&dnotify_mark_mutex); + + spin_lock(&entry->lock); + prev = &dnentry->dn; while ((dn = *prev) != NULL) { if ((dn->dn_owner == id) && (dn->dn_filp == filp)) { *prev = dn->dn_next; - redo_inode_mask(inode); - kmem_cache_free(dn_cache, dn); + kmem_cache_free(dnotify_struct_cache, dn); + dnotify_recalc_inode_mask(entry); break; } prev = &dn->dn_next; } - spin_unlock(&inode->i_lock); + + spin_unlock(&entry->lock); + + /* nothing else could have found us thanks to the dnotify_mark_mutex */ + if (dnentry->dn == NULL) + fsnotify_destroy_mark_by_entry(entry); + + fsnotify_recalc_group_mask(dnotify_group); + + mutex_unlock(&dnotify_mark_mutex); + + fsnotify_put_mark(entry); +} + +/* this conversion is done only at watch creation */ +static __u32 convert_arg(unsigned long arg) +{ + __u32 new_mask = FS_EVENT_ON_CHILD; + + if (arg & DN_MULTISHOT) + new_mask |= FS_DN_MULTISHOT; + if (arg & DN_DELETE) + new_mask |= (FS_DELETE | FS_MOVED_FROM); + if (arg & DN_MODIFY) + new_mask |= FS_MODIFY; + if (arg & DN_ACCESS) + new_mask |= FS_ACCESS; + if (arg & DN_ATTRIB) + new_mask |= FS_ATTRIB; + if (arg & DN_RENAME) + new_mask |= FS_DN_RENAME; + if (arg & DN_CREATE) + new_mask |= (FS_CREATE | FS_MOVED_TO); + + return new_mask; } +/* + * If multiple processes watch the same inode with dnotify there is only one + * dnotify mark in inode->i_fsnotify_mark_entries but we chain a dnotify_struct + * onto that mark. This function either attaches the new dnotify_struct onto + * that list, or it |= the mask onto an existing dnofiy_struct. + */ +static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnentry, + fl_owner_t id, int fd, struct file *filp, __u32 mask) +{ + struct dnotify_struct *odn; + + odn = dnentry->dn; + while (odn != NULL) { + /* adding more events to existing dnofiy_struct? */ + if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { + odn->dn_fd = fd; + odn->dn_mask |= mask; + return -EEXIST; + } + odn = odn->dn_next; + } + + dn->dn_mask = mask; + dn->dn_fd = fd; + dn->dn_filp = filp; + dn->dn_owner = id; + dn->dn_next = dnentry->dn; + dnentry->dn = dn; + + return 0; +} + +/* + * When a process calls fcntl to attach a dnotify watch to a directory it ends + * up here. Allocate both a mark for fsnotify to add and a dnotify_struct to be + * attached to the fsnotify_mark. + */ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) { + struct dnotify_mark_entry *new_dnentry, *dnentry; + struct fsnotify_mark_entry *new_entry, *entry; struct dnotify_struct *dn; - struct dnotify_struct *odn; - struct dnotify_struct **prev; struct inode *inode; fl_owner_t id = current->files; struct file *f; - int error = 0; + int destroy = 0, error = 0; + __u32 mask; + + /* we use these to tell if we need to kfree */ + new_entry = NULL; + dn = NULL; + if (!dir_notify_enable) { + error = -EINVAL; + goto out_err; + } + + /* a 0 mask means we are explicitly removing the watch */ if ((arg & ~DN_MULTISHOT) == 0) { dnotify_flush(filp, id); - return 0; + error = 0; + goto out_err; } - if (!dir_notify_enable) - return -EINVAL; + + /* dnotify only works on directories */ inode = filp->f_path.dentry->d_inode; - if (!S_ISDIR(inode->i_mode)) - return -ENOTDIR; - dn = kmem_cache_alloc(dn_cache, GFP_KERNEL); - if (dn == NULL) - return -ENOMEM; - spin_lock(&inode->i_lock); - prev = &inode->i_dnotify; - while ((odn = *prev) != NULL) { - if ((odn->dn_owner == id) && (odn->dn_filp == filp)) { - odn->dn_fd = fd; - odn->dn_mask |= arg; - inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; - goto out_free; - } - prev = &odn->dn_next; + if (!S_ISDIR(inode->i_mode)) { + error = -ENOTDIR; + goto out_err; } - rcu_read_lock(); - f = fcheck(fd); - rcu_read_unlock(); - /* we'd lost the race with close(), sod off silently */ - /* note that inode->i_lock prevents reordering problems - * between accesses to descriptor table and ->i_dnotify */ - if (f != filp) - goto out_free; + /* expect most fcntl to add new rather than augment old */ + dn = kmem_cache_alloc(dnotify_struct_cache, GFP_KERNEL); + if (!dn) { + error = -ENOMEM; + goto out_err; + } - error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); - if (error) - goto out_free; + /* new fsnotify mark, we expect most fcntl calls to add a new mark */ + new_dnentry = kmem_cache_alloc(dnotify_mark_entry_cache, GFP_KERNEL); + if (!new_dnentry) { + error = -ENOMEM; + goto out_err; + } - dn->dn_mask = arg; - dn->dn_fd = fd; - dn->dn_filp = filp; - dn->dn_owner = id; - inode->i_dnotify_mask |= arg & ~DN_MULTISHOT; - dn->dn_next = inode->i_dnotify; - inode->i_dnotify = dn; - spin_unlock(&inode->i_lock); - return 0; + /* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */ + mask = convert_arg(arg); -out_free: - spin_unlock(&inode->i_lock); - kmem_cache_free(dn_cache, dn); - return error; -} + /* set up the new_entry and new_dnentry */ + new_entry = &new_dnentry->fsn_entry; + fsnotify_init_mark(new_entry, dnotify_free_mark); + new_entry->mask = mask; + new_dnentry->dn = NULL; -void __inode_dir_notify(struct inode *inode, unsigned long event) -{ - struct dnotify_struct * dn; - struct dnotify_struct **prev; - struct fown_struct * fown; - int changed = 0; + /* this is needed to prevent the fcntl/close race described below */ + mutex_lock(&dnotify_mark_mutex); + /* add the new_entry or find an old one. */ spin_lock(&inode->i_lock); - prev = &inode->i_dnotify; - while ((dn = *prev) != NULL) { - if ((dn->dn_mask & event) == 0) { - prev = &dn->dn_next; - continue; - } - fown = &dn->dn_filp->f_owner; - send_sigio(fown, dn->dn_fd, POLL_MSG); - if (dn->dn_mask & DN_MULTISHOT) - prev = &dn->dn_next; - else { - *prev = dn->dn_next; - changed = 1; - kmem_cache_free(dn_cache, dn); - } - } - if (changed) - redo_inode_mask(inode); + entry = fsnotify_find_mark_entry(dnotify_group, inode); spin_unlock(&inode->i_lock); -} - -EXPORT_SYMBOL(__inode_dir_notify); + if (entry) { + dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry); + spin_lock(&entry->lock); + } else { + fsnotify_add_mark(new_entry, dnotify_group, inode); + spin_lock(&new_entry->lock); + entry = new_entry; + dnentry = new_dnentry; + /* we used new_entry, so don't free it */ + new_entry = NULL; + } -/* - * This is hopelessly wrong, but unfixable without API changes. At - * least it doesn't oops the kernel... - * - * To safely access ->d_parent we need to keep d_move away from it. Use the - * dentry's d_lock for this. - */ -void dnotify_parent(struct dentry *dentry, unsigned long event) -{ - struct dentry *parent; + rcu_read_lock(); + f = fcheck(fd); + rcu_read_unlock(); - if (!dir_notify_enable) - return; + /* if (f != filp) means that we lost a race and another task/thread + * actually closed the fd we are still playing with before we grabbed + * the dnotify_mark_mutex and entry->lock. Since closing the fd is the + * only time we clean up the mark entries we need to get our mark off + * the list. */ + if (f != filp) { + /* if we added ourselves, shoot ourselves, it's possible that + * the flush actually did shoot this entry. That's fine too + * since multiple calls to destroy_mark is perfectly safe, if + * we found a dnentry already attached to the inode, just sod + * off silently as the flush at close time dealt with it. + */ + if (dnentry == new_dnentry) + destroy = 1; + goto out; + } - spin_lock(&dentry->d_lock); - parent = dentry->d_parent; - if (parent->d_inode->i_dnotify_mask & event) { - dget(parent); - spin_unlock(&dentry->d_lock); - __inode_dir_notify(parent->d_inode, event); - dput(parent); - } else { - spin_unlock(&dentry->d_lock); + error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); + if (error) { + /* if we added, we must shoot */ + if (dnentry == new_dnentry) + destroy = 1; + goto out; } + + error = attach_dn(dn, dnentry, id, fd, filp, mask); + /* !error means that we attached the dn to the dnentry, so don't free it */ + if (!error) + dn = NULL; + /* -EEXIST means that we didn't add this new dn and used an old one. + * that isn't an error (and the unused dn should be freed) */ + else if (error == -EEXIST) + error = 0; + + dnotify_recalc_inode_mask(entry); +out: + spin_unlock(&entry->lock); + + if (destroy) + fsnotify_destroy_mark_by_entry(entry); + + fsnotify_recalc_group_mask(dnotify_group); + + mutex_unlock(&dnotify_mark_mutex); + fsnotify_put_mark(entry); +out_err: + if (new_entry) + fsnotify_put_mark(new_entry); + if (dn) + kmem_cache_free(dnotify_struct_cache, dn); + return error; } -EXPORT_SYMBOL_GPL(dnotify_parent); static int __init dnotify_init(void) { - dn_cache = kmem_cache_create("dnotify_cache", - sizeof(struct dnotify_struct), 0, SLAB_PANIC, NULL); + dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC); + dnotify_mark_entry_cache = KMEM_CACHE(dnotify_mark_entry, SLAB_PANIC); + + dnotify_group = fsnotify_obtain_group(DNOTIFY_GROUP_NUM, + 0, &dnotify_fsnotify_ops); + if (IS_ERR(dnotify_group)) + panic("unable to allocate fsnotify group for dnotify\n"); return 0; } diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h index 102a902b439..ecc06286226 100644 --- a/include/linux/dnotify.h +++ b/include/linux/dnotify.h @@ -10,7 +10,7 @@ struct dnotify_struct { struct dnotify_struct * dn_next; - unsigned long dn_mask; + __u32 dn_mask; int dn_fd; struct file * dn_filp; fl_owner_t dn_owner; @@ -21,23 +21,18 @@ struct dnotify_struct { #ifdef CONFIG_DNOTIFY -extern void __inode_dir_notify(struct inode *, unsigned long); +#define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\ + FS_MODIFY | FS_MODIFY_CHILD |\ + FS_ACCESS | FS_ACCESS_CHILD |\ + FS_ATTRIB | FS_ATTRIB_CHILD |\ + FS_CREATE | FS_DN_RENAME |\ + FS_MOVED_FROM | FS_MOVED_TO) + extern void dnotify_flush(struct file *, fl_owner_t); extern int fcntl_dirnotify(int, struct file *, unsigned long); -extern void dnotify_parent(struct dentry *, unsigned long); - -static inline void inode_dir_notify(struct inode *inode, unsigned long event) -{ - if (inode->i_dnotify_mask & (event)) - __inode_dir_notify(inode, event); -} #else -static inline void __inode_dir_notify(struct inode *inode, unsigned long event) -{ -} - static inline void dnotify_flush(struct file *filp, fl_owner_t id) { } @@ -47,14 +42,6 @@ static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) return -EINVAL; } -static inline void dnotify_parent(struct dentry *dentry, unsigned long event) -{ -} - -static inline void inode_dir_notify(struct inode *inode, unsigned long event) -{ -} - #endif /* CONFIG_DNOTIFY */ #endif /* __KERNEL __ */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 275b0860044..323b5ce474c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -760,11 +760,6 @@ struct inode { struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */ #endif -#ifdef CONFIG_DNOTIFY - unsigned long i_dnotify_mask; /* Directory notify events */ - struct dnotify_struct *i_dnotify; /* for directory notifications */ -#endif - #ifdef CONFIG_INOTIFY struct list_head inotify_watches; /* watches on this inode */ struct mutex inotify_mutex; /* protects the watches list */ diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 6a662ed0bc8..db12d9de352 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -74,13 +74,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, __u32 new_dir_mask = 0; if (old_dir == new_dir) { - inode_dir_notify(old_dir, DN_RENAME); old_dir_mask = FS_DN_RENAME; - } else { - inode_dir_notify(old_dir, DN_DELETE); - old_dir_mask = FS_DELETE; - inode_dir_notify(new_dir, DN_CREATE); - new_dir_mask = FS_CREATE; } if (isdir) { @@ -132,7 +126,6 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) if (isdir) mask |= FS_IN_ISDIR; - dnotify_parent(dentry, DN_DELETE); fsnotify_parent(dentry, mask); } @@ -154,7 +147,6 @@ static inline void fsnotify_inoderemove(struct inode *inode) */ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) { - inode_dir_notify(inode, DN_CREATE); inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, dentry->d_inode); audit_inode_child(dentry->d_name.name, dentry, inode); @@ -169,7 +161,6 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) */ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) { - inode_dir_notify(dir, DN_CREATE); inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name, inode); fsnotify_link_count(inode); @@ -186,7 +177,6 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) __u32 mask = (FS_CREATE | FS_IN_ISDIR); struct inode *d_inode = dentry->d_inode; - inode_dir_notify(inode, DN_CREATE); inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode); audit_inode_child(dentry->d_name.name, dentry, inode); @@ -204,7 +194,6 @@ static inline void fsnotify_access(struct dentry *dentry) if (S_ISDIR(inode->i_mode)) mask |= FS_IN_ISDIR; - dnotify_parent(dentry, DN_ACCESS); inotify_inode_queue_event(inode, mask, 0, NULL, NULL); fsnotify_parent(dentry, mask); @@ -222,7 +211,6 @@ static inline void fsnotify_modify(struct dentry *dentry) if (S_ISDIR(inode->i_mode)) mask |= FS_IN_ISDIR; - dnotify_parent(dentry, DN_MODIFY); inotify_inode_queue_event(inode, mask, 0, NULL, NULL); fsnotify_parent(dentry, mask); @@ -289,47 +277,33 @@ static inline void fsnotify_xattr(struct dentry *dentry) static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) { struct inode *inode = dentry->d_inode; - int dn_mask = 0; - __u32 in_mask = 0; + __u32 mask = 0; + + if (ia_valid & ATTR_UID) + mask |= FS_ATTRIB; + if (ia_valid & ATTR_GID) + mask |= FS_ATTRIB; + if (ia_valid & ATTR_SIZE) + mask |= FS_MODIFY; - if (ia_valid & ATTR_UID) { - in_mask |= FS_ATTRIB; - dn_mask |= DN_ATTRIB; - } - if (ia_valid & ATTR_GID) { - in_mask |= FS_ATTRIB; - dn_mask |= DN_ATTRIB; - } - if (ia_valid & ATTR_SIZE) { - in_mask |= FS_MODIFY; - dn_mask |= DN_MODIFY; - } /* both times implies a utime(s) call */ if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) - { - in_mask |= FS_ATTRIB; - dn_mask |= DN_ATTRIB; - } else if (ia_valid & ATTR_ATIME) { - in_mask |= FS_ACCESS; - dn_mask |= DN_ACCESS; - } else if (ia_valid & ATTR_MTIME) { - in_mask |= FS_MODIFY; - dn_mask |= DN_MODIFY; - } - if (ia_valid & ATTR_MODE) { - in_mask |= FS_ATTRIB; - dn_mask |= DN_ATTRIB; - } + mask |= FS_ATTRIB; + else if (ia_valid & ATTR_ATIME) + mask |= FS_ACCESS; + else if (ia_valid & ATTR_MTIME) + mask |= FS_MODIFY; + + if (ia_valid & ATTR_MODE) + mask |= FS_ATTRIB; - if (dn_mask) - dnotify_parent(dentry, dn_mask); - if (in_mask) { + if (mask) { if (S_ISDIR(inode->i_mode)) - in_mask |= FS_IN_ISDIR; - inotify_inode_queue_event(inode, in_mask, 0, NULL, NULL); + mask |= FS_IN_ISDIR; + inotify_inode_queue_event(inode, mask, 0, NULL, NULL); - fsnotify_parent(dentry, in_mask); - fsnotify(inode, in_mask, inode, FSNOTIFY_EVENT_INODE); + fsnotify_parent(dentry, mask); + fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE); } } diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 13d2dd57004..9ea800e840f 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -57,6 +57,9 @@ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ FS_DELETE) +/* listeners that hard code group numbers near the top */ +#define DNOTIFY_GROUP_NUM UINT_MAX + struct fsnotify_group; struct fsnotify_event; struct fsnotify_mark_entry; -- cgit v1.2.3-70-g09d2 From 589ff870ed60a9ebdd5ec99ec3f5afe1282fe151 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 18 Apr 2009 03:28:19 -0400 Subject: Switch collect_mounts() to struct path Signed-off-by: Al Viro --- fs/namespace.c | 4 ++-- include/linux/fs.h | 2 +- kernel/audit_tree.c | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux/fs.h') diff --git a/fs/namespace.c b/fs/namespace.c index 88a904d5aa2..c85962206aa 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1253,11 +1253,11 @@ Enomem: return NULL; } -struct vfsmount *collect_mounts(struct vfsmount *mnt, struct dentry *dentry) +struct vfsmount *collect_mounts(struct path *path) { struct vfsmount *tree; down_write(&namespace_sem); - tree = copy_tree(mnt, dentry, CL_COPY_ALL | CL_PRIVATE); + tree = copy_tree(path->mnt, path->dentry, CL_COPY_ALL | CL_PRIVATE); up_write(&namespace_sem); return tree; } diff --git a/include/linux/fs.h b/include/linux/fs.h index 323b5ce474c..03fb2102b8f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1800,7 +1800,7 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); extern int may_umount_tree(struct vfsmount *); extern int may_umount(struct vfsmount *); extern long do_mount(char *, char *, char *, unsigned long, void *); -extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *); +extern struct vfsmount *collect_mounts(struct path *); extern void drop_collected_mounts(struct vfsmount *); extern int vfs_statfs(struct dentry *, struct kstatfs *); diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 6e7351739a8..1f6396d7668 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -568,7 +568,7 @@ void audit_trim_trees(void) if (err) goto skip_it; - root_mnt = collect_mounts(path.mnt, path.dentry); + root_mnt = collect_mounts(&path); path_put(&path); if (!root_mnt) goto skip_it; @@ -660,7 +660,7 @@ int audit_add_tree_rule(struct audit_krule *rule) err = kern_path(tree->pathname, 0, &path); if (err) goto Err; - mnt = collect_mounts(path.mnt, path.dentry); + mnt = collect_mounts(&path); path_put(&path); if (!mnt) { err = -ENOMEM; @@ -720,7 +720,7 @@ int audit_tag_tree(char *old, char *new) err = kern_path(new, 0, &path); if (err) return err; - tagged = collect_mounts(path.mnt, path.dentry); + tagged = collect_mounts(&path); path_put(&path); if (!tagged) return -ENOMEM; -- cgit v1.2.3-70-g09d2 From 876a9f76abbcb775f8d21cbc99fa161f9e5937f1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 28 Apr 2009 18:05:55 +0200 Subject: remove s_async_list Remove the unused s_async_list in the superblock, a leftover of the broken async inode deletion code that leaked into mainline. Having this in the middle of the sync/unmount path is not helpful for the following cleanups. Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/super.c | 8 -------- include/linux/fs.h | 5 ----- 2 files changed, 13 deletions(-) (limited to 'include/linux/fs.h') diff --git a/fs/super.c b/fs/super.c index c170551c23f..3d9f117dd2a 100644 --- a/fs/super.c +++ b/fs/super.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include "internal.h" @@ -72,7 +71,6 @@ static struct super_block *alloc_super(struct file_system_type *type) INIT_HLIST_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); INIT_LIST_HEAD(&s->s_dentry_lru); - INIT_LIST_HEAD(&s->s_async_list); init_rwsem(&s->s_umount); mutex_init(&s->s_lock); lockdep_set_class(&s->s_umount, &type->s_umount_key); @@ -342,11 +340,6 @@ void generic_shutdown_super(struct super_block *sb) lock_super(sb); sb->s_flags &= ~MS_ACTIVE; - /* - * wait for asynchronous fs operations to finish before going further - */ - async_synchronize_full_domain(&sb->s_async_list); - /* bad name - it should be evict_inodes() */ invalidate_inodes(sb); lock_kernel(); @@ -517,7 +510,6 @@ restart: sb->s_count++; spin_unlock(&sb_lock); down_read(&sb->s_umount); - async_synchronize_full_domain(&sb->s_async_list); if (sb->s_root && (wait || sb->s_dirt)) sb->s_op->sync_fs(sb, wait); up_read(&sb->s_umount); diff --git a/include/linux/fs.h b/include/linux/fs.h index 03fb2102b8f..36bcff7036e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1372,11 +1372,6 @@ struct super_block { * generic_show_options() */ char *s_options; - - /* - * storage for asynchronous operations - */ - struct list_head s_async_list; }; extern struct timespec current_fs_time(struct super_block *sb); -- cgit v1.2.3-70-g09d2 From 429479f031322a0cc5c921ffb2321a51718dc875 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 27 Apr 2009 16:43:50 +0200 Subject: vfs: Make __fsync_super() a static function (version 4) __fsync_super() does the same thing as fsync_super(). So change the only caller to use fsync_super() and make __fsync_super() static. This removes unnecessarily duplicated call to sync_blockdev() and prepares ground for the changes to __fsync_super() in the following patches. Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/block_dev.c | 2 +- fs/super.c | 7 +++---- include/linux/fs.h | 1 - 3 files changed, 4 insertions(+), 6 deletions(-) (limited to 'include/linux/fs.h') diff --git a/fs/block_dev.c b/fs/block_dev.c index 931f6b8c4b2..fe47f722761 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -241,7 +241,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) sb->s_frozen = SB_FREEZE_WRITE; smp_wmb(); - __fsync_super(sb); + fsync_super(sb); sb->s_frozen = SB_FREEZE_TRANS; smp_wmb(); diff --git a/fs/super.c b/fs/super.c index fae91ba38e4..8dbe1ead9dd 100644 --- a/fs/super.c +++ b/fs/super.c @@ -289,7 +289,7 @@ EXPORT_SYMBOL(unlock_super); * device. Takes the superblock lock. Requires a second blkdev * flush by the caller to complete the operation. */ -void __fsync_super(struct super_block *sb) +static int __fsync_super(struct super_block *sb) { sync_inodes_sb(sb, 0); vfs_dq_sync(sb); @@ -300,7 +300,7 @@ void __fsync_super(struct super_block *sb) unlock_super(sb); if (sb->s_op->sync_fs) sb->s_op->sync_fs(sb, 1); - sync_blockdev(sb->s_bdev); + return sync_blockdev(sb->s_bdev); } /* @@ -310,8 +310,7 @@ void __fsync_super(struct super_block *sb) */ int fsync_super(struct super_block *sb) { - __fsync_super(sb); - return sync_blockdev(sb->s_bdev); + return __fsync_super(sb); } EXPORT_SYMBOL_GPL(fsync_super); diff --git a/include/linux/fs.h b/include/linux/fs.h index 36bcff7036e..41a9907f342 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2078,7 +2078,6 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync); extern void sync_supers(void); extern void sync_filesystems(int wait); -extern void __fsync_super(struct super_block *sb); extern void emergency_sync(void); extern void emergency_remount(void); extern int do_remount_sb(struct super_block *sb, int flags, -- cgit v1.2.3-70-g09d2 From 5cee5815d1564bbbd505fea86f4550f1efdb5cd0 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 27 Apr 2009 16:43:51 +0200 Subject: vfs: Make sys_sync() use fsync_super() (version 4) It is unnecessarily fragile to have two places (fsync_super() and do_sync()) doing data integrity sync of the filesystem. Alter __fsync_super() to accommodate needs of both callers and use it. So after this patch __fsync_super() is the only place where we gather all the calls needed to properly send all data on a filesystem to disk. Nice bonus is that we get a complete livelock avoidance and write_supers() is now only used for periodic writeback of superblocks. sync_blockdevs() introduced a couple of patches ago is gone now. [build fixes folded] Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/block_dev.c | 15 ++++++---- fs/fs-writeback.c | 49 -------------------------------- fs/internal.h | 16 +++++------ fs/super.c | 72 +++++++++++++++-------------------------------- fs/sync.c | 31 +++++++------------- include/linux/fs.h | 2 +- include/linux/writeback.h | 1 - 7 files changed, 51 insertions(+), 135 deletions(-) (limited to 'include/linux/fs.h') diff --git a/fs/block_dev.c b/fs/block_dev.c index fe47f722761..4b6a3b9d01e 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -176,17 +176,22 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, iov, offset, nr_segs, blkdev_get_blocks, NULL); } +int __sync_blockdev(struct block_device *bdev, int wait) +{ + if (!bdev) + return 0; + if (!wait) + return filemap_flush(bdev->bd_inode->i_mapping); + return filemap_write_and_wait(bdev->bd_inode->i_mapping); +} + /* * Write out and wait upon all the dirty data associated with a block * device via its mapping. Does not take the superblock lock. */ int sync_blockdev(struct block_device *bdev) { - int ret = 0; - - if (bdev) - ret = filemap_write_and_wait(bdev->bd_inode->i_mapping); - return ret; + return __sync_blockdev(bdev, 1); } EXPORT_SYMBOL(sync_blockdev); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 91013ff7dd5..e0fb2e78959 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -678,55 +678,6 @@ void sync_inodes_sb(struct super_block *sb, int wait) sync_sb_inodes(sb, &wbc); } -/** - * sync_inodes - writes all inodes to disk - * @wait: wait for completion - * - * sync_inodes() goes through each super block's dirty inode list, writes the - * inodes out, waits on the writeout and puts the inodes back on the normal - * list. - * - * This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle - * part of the sync functions is that the blockdev "superblock" is processed - * last. This is because the write_inode() function of a typical fs will - * perform no I/O, but will mark buffers in the blockdev mapping as dirty. - * What we want to do is to perform all that dirtying first, and then write - * back all those inode blocks via the blockdev mapping in one sweep. So the - * additional (somewhat redundant) sync_blockdev() calls here are to make - * sure that really happens. Because if we call sync_inodes_sb(wait=1) with - * outstanding dirty inodes, the writeback goes block-at-a-time within the - * filesystem's write_inode(). This is extremely slow. - */ -static void __sync_inodes(int wait) -{ - struct super_block *sb; - - spin_lock(&sb_lock); -restart: - list_for_each_entry(sb, &super_blocks, s_list) { - sb->s_count++; - spin_unlock(&sb_lock); - down_read(&sb->s_umount); - if (sb->s_root) { - sync_inodes_sb(sb, wait); - sync_blockdev(sb->s_bdev); - } - up_read(&sb->s_umount); - spin_lock(&sb_lock); - if (__put_super_and_need_restart(sb)) - goto restart; - } - spin_unlock(&sb_lock); -} - -void sync_inodes(int wait) -{ - __sync_inodes(0); - - if (wait) - __sync_inodes(1); -} - /** * write_inode_now - write an inode to disk * @inode: inode to write to disk diff --git a/fs/internal.h b/fs/internal.h index 343a537ab80..dbec3cc2833 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -25,6 +25,8 @@ static inline int sb_is_blkdev_sb(struct super_block *sb) return sb == blockdev_superblock; } +extern int __sync_blockdev(struct block_device *bdev, int wait); + #else static inline void bdev_cache_init(void) { @@ -34,6 +36,11 @@ static inline int sb_is_blkdev_sb(struct super_block *sb) { return 0; } + +static inline int __sync_blockdev(struct block_device *bdev, int wait) +{ + return 0; +} #endif /* @@ -71,12 +78,3 @@ extern void chroot_fs_refs(struct path *, struct path *); * file_table.c */ extern void mark_files_ro(struct super_block *); - -/* - * super.c - */ -#ifdef CONFIG_BLOCK -extern void sync_blockdevs(void); -#else -static inline void sync_blockdevs(void) { } -#endif diff --git a/fs/super.c b/fs/super.c index 8dbe1ead9dd..c8ce5ed0424 100644 --- a/fs/super.c +++ b/fs/super.c @@ -284,23 +284,23 @@ EXPORT_SYMBOL(lock_super); EXPORT_SYMBOL(unlock_super); /* - * Write out and wait upon all dirty data associated with this - * superblock. Filesystem data as well as the underlying block - * device. Takes the superblock lock. Requires a second blkdev - * flush by the caller to complete the operation. + * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0) + * just dirties buffers with inodes so we have to submit IO for these buffers + * via __sync_blockdev(). This also speeds up the wait == 1 case since in that + * case write_inode() functions do sync_dirty_buffer() and thus effectively + * write one block at a time. */ -static int __fsync_super(struct super_block *sb) +static int __fsync_super(struct super_block *sb, int wait) { - sync_inodes_sb(sb, 0); vfs_dq_sync(sb); - sync_inodes_sb(sb, 1); + sync_inodes_sb(sb, wait); lock_super(sb); if (sb->s_dirt && sb->s_op->write_super) sb->s_op->write_super(sb); unlock_super(sb); if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - return sync_blockdev(sb->s_bdev); + sb->s_op->sync_fs(sb, wait); + return __sync_blockdev(sb->s_bdev, wait); } /* @@ -310,7 +310,12 @@ static int __fsync_super(struct super_block *sb) */ int fsync_super(struct super_block *sb) { - return __fsync_super(sb); + int ret; + + ret = __fsync_super(sb, 0); + if (ret < 0) + return ret; + return __fsync_super(sb, 1); } EXPORT_SYMBOL_GPL(fsync_super); @@ -469,20 +474,18 @@ restart: } /* - * Call the ->sync_fs super_op against all filesystems which are r/w and - * which implement it. + * Sync all the data for all the filesystems (called by sys_sync() and + * emergency sync) * * This operation is careful to avoid the livelock which could easily happen - * if two or more filesystems are being continuously dirtied. s_need_sync_fs + * if two or more filesystems are being continuously dirtied. s_need_sync * is used only here. We set it against all filesystems and then clear it as * we sync them. So redirtied filesystems are skipped. * * But if process A is currently running sync_filesystems and then process B - * calls sync_filesystems as well, process B will set all the s_need_sync_fs + * calls sync_filesystems as well, process B will set all the s_need_sync * flags again, which will cause process A to resync everything. Fix that with * a local mutex. - * - * (Fabian) Avoid sync_fs with clean fs & wait mode 0 */ void sync_filesystems(int wait) { @@ -492,25 +495,23 @@ void sync_filesystems(int wait) mutex_lock(&mutex); /* Could be down_interruptible */ spin_lock(&sb_lock); list_for_each_entry(sb, &super_blocks, s_list) { - if (!sb->s_op->sync_fs) - continue; if (sb->s_flags & MS_RDONLY) continue; - sb->s_need_sync_fs = 1; + sb->s_need_sync = 1; } restart: list_for_each_entry(sb, &super_blocks, s_list) { - if (!sb->s_need_sync_fs) + if (!sb->s_need_sync) continue; - sb->s_need_sync_fs = 0; + sb->s_need_sync = 0; if (sb->s_flags & MS_RDONLY) continue; /* hm. Was remounted r/o meanwhile */ sb->s_count++; spin_unlock(&sb_lock); down_read(&sb->s_umount); if (sb->s_root) - sb->s_op->sync_fs(sb, wait); + __fsync_super(sb, wait); up_read(&sb->s_umount); /* restart only when sb is no longer on the list */ spin_lock(&sb_lock); @@ -521,33 +522,6 @@ restart: mutex_unlock(&mutex); } -#ifdef CONFIG_BLOCK -/* - * Sync all block devices underlying some superblock - */ -void sync_blockdevs(void) -{ - struct super_block *sb; - - spin_lock(&sb_lock); -restart: - list_for_each_entry(sb, &super_blocks, s_list) { - if (!sb->s_bdev) - continue; - sb->s_count++; - spin_unlock(&sb_lock); - down_read(&sb->s_umount); - if (sb->s_root) - sync_blockdev(sb->s_bdev); - up_read(&sb->s_umount); - spin_lock(&sb_lock); - if (__put_super_and_need_restart(sb)) - goto restart; - } - spin_unlock(&sb_lock); -} -#endif - /** * get_super - get the superblock of a device * @bdev: device to get the superblock for diff --git a/fs/sync.c b/fs/sync.c index 631fd5aece7..be0798cc33d 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -18,35 +18,24 @@ #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) -/* - * sync everything. Start out by waking pdflush, because that writes back - * all queues in parallel. - */ -static void do_sync(unsigned long wait) +SYSCALL_DEFINE0(sync) { - wakeup_pdflush(0); - sync_inodes(0); /* All mappings, inodes and their blockdevs */ - vfs_dq_sync(NULL); - sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */ - sync_supers(); /* Write the superblocks */ - sync_filesystems(0); /* Start syncing the filesystems */ - sync_filesystems(wait); /* Waitingly sync the filesystems */ - sync_blockdevs(); - if (!wait) - printk("Emergency Sync complete\n"); + sync_filesystems(0); + sync_filesystems(1); if (unlikely(laptop_mode)) laptop_sync_completion(); -} - -SYSCALL_DEFINE0(sync) -{ - do_sync(1); return 0; } static void do_sync_work(struct work_struct *work) { - do_sync(0); + /* + * Sync twice to reduce the possibility we skipped some inodes / pages + * because they were temporarily locked + */ + sync_filesystems(0); + sync_filesystems(0); + printk("Emergency Sync complete\n"); kfree(work); } diff --git a/include/linux/fs.h b/include/linux/fs.h index 41a9907f342..f00df653cf2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1321,7 +1321,7 @@ struct super_block { struct rw_semaphore s_umount; struct mutex s_lock; int s_count; - int s_need_sync_fs; + int s_need_sync; atomic_t s_active; #ifdef CONFIG_SECURITY void *s_security; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 93445477f86..3224820c851 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -79,7 +79,6 @@ struct writeback_control { void writeback_inodes(struct writeback_control *wbc); int inode_wait(void *); void sync_inodes_sb(struct super_block *, int wait); -void sync_inodes(int wait); /* writeback.h requires fs.h; it, too, is not included from here. */ static inline void wait_on_inode(struct inode *inode) -- cgit v1.2.3-70-g09d2 From c15c54f5f056ee4819da9fde59a5f2cd45445f23 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 27 Apr 2009 16:43:52 +0200 Subject: vfs: Move syncing code from super.c to sync.c (version 4) Move sync_filesystems(), __fsync_super(), fsync_super() from super.c to sync.c where it fits better. [build fixes folded] Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/super.c | 85 ------------------------------------------------------ fs/sync.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/fs.h | 3 +- 3 files changed, 86 insertions(+), 87 deletions(-) (limited to 'include/linux/fs.h') diff --git a/fs/super.c b/fs/super.c index c8ce5ed0424..f822c74f25b 100644 --- a/fs/super.c +++ b/fs/super.c @@ -283,42 +283,6 @@ void unlock_super(struct super_block * sb) EXPORT_SYMBOL(lock_super); EXPORT_SYMBOL(unlock_super); -/* - * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0) - * just dirties buffers with inodes so we have to submit IO for these buffers - * via __sync_blockdev(). This also speeds up the wait == 1 case since in that - * case write_inode() functions do sync_dirty_buffer() and thus effectively - * write one block at a time. - */ -static int __fsync_super(struct super_block *sb, int wait) -{ - vfs_dq_sync(sb); - sync_inodes_sb(sb, wait); - lock_super(sb); - if (sb->s_dirt && sb->s_op->write_super) - sb->s_op->write_super(sb); - unlock_super(sb); - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, wait); - return __sync_blockdev(sb->s_bdev, wait); -} - -/* - * Write out and wait upon all dirty data associated with this - * superblock. Filesystem data as well as the underlying block - * device. Takes the superblock lock. - */ -int fsync_super(struct super_block *sb) -{ - int ret; - - ret = __fsync_super(sb, 0); - if (ret < 0) - return ret; - return __fsync_super(sb, 1); -} -EXPORT_SYMBOL_GPL(fsync_super); - /** * generic_shutdown_super - common helper for ->kill_sb() * @sb: superblock to kill @@ -473,55 +437,6 @@ restart: spin_unlock(&sb_lock); } -/* - * Sync all the data for all the filesystems (called by sys_sync() and - * emergency sync) - * - * This operation is careful to avoid the livelock which could easily happen - * if two or more filesystems are being continuously dirtied. s_need_sync - * is used only here. We set it against all filesystems and then clear it as - * we sync them. So redirtied filesystems are skipped. - * - * But if process A is currently running sync_filesystems and then process B - * calls sync_filesystems as well, process B will set all the s_need_sync - * flags again, which will cause process A to resync everything. Fix that with - * a local mutex. - */ -void sync_filesystems(int wait) -{ - struct super_block *sb; - static DEFINE_MUTEX(mutex); - - mutex_lock(&mutex); /* Could be down_interruptible */ - spin_lock(&sb_lock); - list_for_each_entry(sb, &super_blocks, s_list) { - if (sb->s_flags & MS_RDONLY) - continue; - sb->s_need_sync = 1; - } - -restart: - list_for_each_entry(sb, &super_blocks, s_list) { - if (!sb->s_need_sync) - continue; - sb->s_need_sync = 0; - if (sb->s_flags & MS_RDONLY) - continue; /* hm. Was remounted r/o meanwhile */ - sb->s_count++; - spin_unlock(&sb_lock); - down_read(&sb->s_umount); - if (sb->s_root) - __fsync_super(sb, wait); - up_read(&sb->s_umount); - /* restart only when sb is no longer on the list */ - spin_lock(&sb_lock); - if (__put_super_and_need_restart(sb)) - goto restart; - } - spin_unlock(&sb_lock); - mutex_unlock(&mutex); -} - /** * get_super - get the superblock of a device * @bdev: device to get the superblock for diff --git a/fs/sync.c b/fs/sync.c index be0798cc33d..d5fa7b79982 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -18,6 +18,91 @@ #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) +/* + * Do the filesystem syncing work. For simple filesystems sync_inodes_sb(sb, 0) + * just dirties buffers with inodes so we have to submit IO for these buffers + * via __sync_blockdev(). This also speeds up the wait == 1 case since in that + * case write_inode() functions do sync_dirty_buffer() and thus effectively + * write one block at a time. + */ +static int __fsync_super(struct super_block *sb, int wait) +{ + vfs_dq_sync(sb); + sync_inodes_sb(sb, wait); + lock_super(sb); + if (sb->s_dirt && sb->s_op->write_super) + sb->s_op->write_super(sb); + unlock_super(sb); + if (sb->s_op->sync_fs) + sb->s_op->sync_fs(sb, wait); + return __sync_blockdev(sb->s_bdev, wait); +} + +/* + * Write out and wait upon all dirty data associated with this + * superblock. Filesystem data as well as the underlying block + * device. Takes the superblock lock. + */ +int fsync_super(struct super_block *sb) +{ + int ret; + + ret = __fsync_super(sb, 0); + if (ret < 0) + return ret; + return __fsync_super(sb, 1); +} +EXPORT_SYMBOL_GPL(fsync_super); + +/* + * Sync all the data for all the filesystems (called by sys_sync() and + * emergency sync) + * + * This operation is careful to avoid the livelock which could easily happen + * if two or more filesystems are being continuously dirtied. s_need_sync + * is used only here. We set it against all filesystems and then clear it as + * we sync them. So redirtied filesystems are skipped. + * + * But if process A is currently running sync_filesystems and then process B + * calls sync_filesystems as well, process B will set all the s_need_sync + * flags again, which will cause process A to resync everything. Fix that with + * a local mutex. + */ +static void sync_filesystems(int wait) +{ + struct super_block *sb; + static DEFINE_MUTEX(mutex); + + mutex_lock(&mutex); /* Could be down_interruptible */ + spin_lock(&sb_lock); + list_for_each_entry(sb, &super_blocks, s_list) { + if (sb->s_flags & MS_RDONLY) + continue; + sb->s_need_sync = 1; + } + +restart: + list_for_each_entry(sb, &super_blocks, s_list) { + if (!sb->s_need_sync) + continue; + sb->s_need_sync = 0; + if (sb->s_flags & MS_RDONLY) + continue; /* hm. Was remounted r/o meanwhile */ + sb->s_count++; + spin_unlock(&sb_lock); + down_read(&sb->s_umount); + if (sb->s_root) + __fsync_super(sb, wait); + up_read(&sb->s_umount); + /* restart only when sb is no longer on the list */ + spin_lock(&sb_lock); + if (__put_super_and_need_restart(sb)) + goto restart; + } + spin_unlock(&sb_lock); + mutex_unlock(&mutex); +} + SYSCALL_DEFINE0(sync) { sync_filesystems(0); diff --git a/include/linux/fs.h b/include/linux/fs.h index f00df653cf2..d3f7159993c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1942,7 +1942,6 @@ extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); -extern int fsync_super(struct super_block *); extern int fsync_no_super(struct block_device *); #else static inline void bd_forget(struct inode *inode) {} @@ -1959,6 +1958,7 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) return 0; } #endif +extern int fsync_super(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; extern const struct file_operations bad_sock_fops; @@ -2077,7 +2077,6 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync); extern void sync_supers(void); -extern void sync_filesystems(int wait); extern void emergency_sync(void); extern void emergency_remount(void); extern int do_remount_sb(struct super_block *sb, int flags, -- cgit v1.2.3-70-g09d2 From 60b0680fa236ac4e17ce31a50048c9d75f9ec831 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 27 Apr 2009 16:43:53 +0200 Subject: vfs: Rename fsync_super() to sync_filesystem() (version 4) Rename the function so that it better describe what it really does. Also remove the unnecessary include of buffer_head.h. Signed-off-by: Jan Kara Signed-off-by: Al Viro --- fs/block_dev.c | 4 ++-- fs/cachefiles/interface.c | 2 +- fs/super.c | 5 ++--- fs/sync.c | 12 ++++++------ include/linux/fs.h | 2 +- 5 files changed, 12 insertions(+), 13 deletions(-) (limited to 'include/linux/fs.h') diff --git a/fs/block_dev.c b/fs/block_dev.c index 4b6a3b9d01e..3a6d4fb2a32 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -204,7 +204,7 @@ int fsync_bdev(struct block_device *bdev) { struct super_block *sb = get_super(bdev); if (sb) { - int res = fsync_super(sb); + int res = sync_filesystem(sb); drop_super(sb); return res; } @@ -246,7 +246,7 @@ struct super_block *freeze_bdev(struct block_device *bdev) sb->s_frozen = SB_FREEZE_WRITE; smp_wmb(); - fsync_super(sb); + sync_filesystem(sb); sb->s_frozen = SB_FREEZE_TRANS; smp_wmb(); diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 1e962348d11..dafd484d7bd 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -354,7 +354,7 @@ static void cachefiles_sync_cache(struct fscache_cache *_cache) /* make sure all pages pinned by operations on behalf of the netfs are * written to disc */ cachefiles_begin_secure(cache, &saved_cred); - ret = fsync_super(cache->mnt->mnt_sb); + ret = sync_filesystem(cache->mnt->mnt_sb); cachefiles_end_secure(cache, saved_cred); if (ret == -EIO) diff --git a/fs/super.c b/fs/super.c index f822c74f25b..721236e6177 100644 --- a/fs/super.c +++ b/fs/super.c @@ -28,7 +28,6 @@ #include #include #include -#include /* for fsync_super() */ #include #include #include @@ -304,7 +303,7 @@ void generic_shutdown_super(struct super_block *sb) if (sb->s_root) { shrink_dcache_for_umount(sb); - fsync_super(sb); + sync_filesystem(sb); lock_super(sb); sb->s_flags &= ~MS_ACTIVE; @@ -543,7 +542,7 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force) if (flags & MS_RDONLY) acct_auto_close(sb); shrink_dcache_sb(sb); - fsync_super(sb); + sync_filesystem(sb); /* If we are remounting RDONLY and current sb is read/write, make sure there are no rw files opened */ diff --git a/fs/sync.c b/fs/sync.c index d5fa7b79982..8aa870a4d40 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -25,7 +25,7 @@ * case write_inode() functions do sync_dirty_buffer() and thus effectively * write one block at a time. */ -static int __fsync_super(struct super_block *sb, int wait) +static int __sync_filesystem(struct super_block *sb, int wait) { vfs_dq_sync(sb); sync_inodes_sb(sb, wait); @@ -43,16 +43,16 @@ static int __fsync_super(struct super_block *sb, int wait) * superblock. Filesystem data as well as the underlying block * device. Takes the superblock lock. */ -int fsync_super(struct super_block *sb) +int sync_filesystem(struct super_block *sb) { int ret; - ret = __fsync_super(sb, 0); + ret = __sync_filesystem(sb, 0); if (ret < 0) return ret; - return __fsync_super(sb, 1); + return __sync_filesystem(sb, 1); } -EXPORT_SYMBOL_GPL(fsync_super); +EXPORT_SYMBOL_GPL(sync_filesystem); /* * Sync all the data for all the filesystems (called by sys_sync() and @@ -92,7 +92,7 @@ restart: spin_unlock(&sb_lock); down_read(&sb->s_umount); if (sb->s_root) - __fsync_super(sb, wait); + __sync_filesystem(sb, wait); up_read(&sb->s_umount); /* restart only when sb is no longer on the list */ spin_lock(&sb_lock); diff --git a/include/linux/fs.h b/include/linux/fs.h index d3f7159993c..fb1822bed7c 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1958,7 +1958,7 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) return 0; } #endif -extern int fsync_super(struct super_block *); +extern int sync_filesystem(struct super_block *); extern const struct file_operations def_blk_fops; extern const struct file_operations def_chr_fops; extern const struct file_operations bad_sock_fops; -- cgit v1.2.3-70-g09d2 From 62c6943b4b1e818aea60c11c5a68a50785b83119 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 7 May 2009 03:12:29 -0400 Subject: Trim a bit of crap from fs.h do_remount_sb() is fs/internal.h fodder, fsync_no_super() is long gone. Signed-off-by: Al Viro --- fs/internal.h | 5 +++++ include/linux/fs.h | 3 --- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'include/linux/fs.h') diff --git a/fs/internal.h b/fs/internal.h index dbec3cc2833..d55ef562f0b 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -78,3 +78,8 @@ extern void chroot_fs_refs(struct path *, struct path *); * file_table.c */ extern void mark_files_ro(struct super_block *); + +/* + * super.c + */ +extern int do_remount_sb(struct super_block *, int, void *, int); diff --git a/include/linux/fs.h b/include/linux/fs.h index fb1822bed7c..e7833ef5d1d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1942,7 +1942,6 @@ extern struct super_block *freeze_bdev(struct block_device *); extern void emergency_thaw_all(void); extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); extern int fsync_bdev(struct block_device *); -extern int fsync_no_super(struct block_device *); #else static inline void bd_forget(struct inode *inode) {} static inline int sync_blockdev(struct block_device *bdev) { return 0; } @@ -2079,8 +2078,6 @@ extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync); extern void sync_supers(void); extern void emergency_sync(void); extern void emergency_remount(void); -extern int do_remount_sb(struct super_block *sb, int flags, - void *data, int force); #ifdef CONFIG_BLOCK extern sector_t bmap(struct inode *, sector_t); #endif -- cgit v1.2.3-70-g09d2 From 9fd5746fd3d7838bf6ff991d50f1257057d1156f Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Thu, 21 May 2009 16:01:00 -0400 Subject: fs: Remove i_cindex from struct inode The only user of the i_cindex element in the inode structure is used is by the firewire drivers. As part of an attempt to slim down the inode structure to save memory --- since a typical Linux system will have hundreds of thousands if not millions of inodes cached, a reduction in the size inode has high leverage. The firewire driver does not need i_cindex in any fast path, so it's simple enough to calculate when it is needed, instead of wasting space in the inode structure. Signed-off-by: "Theodore Ts'o" Cc: krh@redhat.com Cc: stefanr@s5r6.in-berlin.de Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Al Viro --- drivers/ieee1394/dv1394.c | 5 +++-- drivers/ieee1394/ieee1394_core.h | 6 +++++- fs/char_dev.c | 14 +++++++++++++- include/linux/cdev.h | 2 ++ include/linux/fs.h | 1 - 5 files changed, 23 insertions(+), 5 deletions(-) (limited to 'include/linux/fs.h') diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c index 823a6297a1a..2cd00b5b45b 100644 --- a/drivers/ieee1394/dv1394.c +++ b/drivers/ieee1394/dv1394.c @@ -1789,12 +1789,13 @@ static int dv1394_open(struct inode *inode, struct file *file) } else { /* look up the card by ID */ unsigned long flags; + int idx = ieee1394_file_to_instance(file); spin_lock_irqsave(&dv1394_cards_lock, flags); if (!list_empty(&dv1394_cards)) { struct video_card *p; list_for_each_entry(p, &dv1394_cards, list) { - if ((p->id) == ieee1394_file_to_instance(file)) { + if ((p->id) == idx) { video = p; break; } @@ -1803,7 +1804,7 @@ static int dv1394_open(struct inode *inode, struct file *file) spin_unlock_irqrestore(&dv1394_cards_lock, flags); if (!video) { - debug_printk("dv1394: OHCI card %d not found", ieee1394_file_to_instance(file)); + debug_printk("dv1394: OHCI card %d not found", idx); return -ENODEV; } diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h index 21d50f73a21..28b9f58bafd 100644 --- a/drivers/ieee1394/ieee1394_core.h +++ b/drivers/ieee1394/ieee1394_core.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include "hosts.h" @@ -155,7 +156,10 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size, */ static inline unsigned char ieee1394_file_to_instance(struct file *file) { - return file->f_path.dentry->d_inode->i_cindex; + int idx = cdev_index(file->f_path.dentry->d_inode); + if (idx < 0) + idx = 0; + return idx; } extern int hpsb_disable_irm; diff --git a/fs/char_dev.c b/fs/char_dev.c index 38f71222a55..b7c9d5187a7 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -375,7 +375,6 @@ static int chrdev_open(struct inode *inode, struct file *filp) p = inode->i_cdev; if (!p) { inode->i_cdev = p = new; - inode->i_cindex = idx; list_add(&inode->i_devices, &p->list); new = NULL; } else if (!cdev_get(p)) @@ -405,6 +404,18 @@ static int chrdev_open(struct inode *inode, struct file *filp) return ret; } +int cdev_index(struct inode *inode) +{ + int idx; + struct kobject *kobj; + + kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx); + if (!kobj) + return -1; + kobject_put(kobj); + return idx; +} + void cd_forget(struct inode *inode) { spin_lock(&cdev_lock); @@ -557,6 +568,7 @@ EXPORT_SYMBOL(cdev_init); EXPORT_SYMBOL(cdev_alloc); EXPORT_SYMBOL(cdev_del); EXPORT_SYMBOL(cdev_add); +EXPORT_SYMBOL(cdev_index); EXPORT_SYMBOL(register_chrdev); EXPORT_SYMBOL(unregister_chrdev); EXPORT_SYMBOL(directly_mappable_cdev_bdi); diff --git a/include/linux/cdev.h b/include/linux/cdev.h index fb4591977b0..f389e319a45 100644 --- a/include/linux/cdev.h +++ b/include/linux/cdev.h @@ -28,6 +28,8 @@ int cdev_add(struct cdev *, dev_t, unsigned); void cdev_del(struct cdev *); +int cdev_index(struct inode *inode); + void cd_forget(struct inode *); extern struct backing_dev_info directly_mappable_cdev_bdi; diff --git a/include/linux/fs.h b/include/linux/fs.h index e7833ef5d1d..bcd63706db8 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -751,7 +751,6 @@ struct inode { struct block_device *i_bdev; struct cdev *i_cdev; }; - int i_cindex; __u32 i_generation; -- cgit v1.2.3-70-g09d2 From 28ad0c118b0ed98b042d362acfe0017591921138 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Thu, 21 May 2009 16:01:02 -0400 Subject: fs: Rearrange inode structure elements to avoid waste due to padding Signed-off-by: "Theodore Ts'o" Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Al Viro --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/fs.h') diff --git a/include/linux/fs.h b/include/linux/fs.h index bcd63706db8..d883aa1fc2e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -729,8 +729,8 @@ struct inode { struct timespec i_atime; struct timespec i_mtime; struct timespec i_ctime; - unsigned int i_blkbits; blkcnt_t i_blocks; + unsigned int i_blkbits; unsigned short i_bytes; umode_t i_mode; spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ -- cgit v1.2.3-70-g09d2 From d5aacad548db1ff547adf35d0a77eb2a8ed4fe14 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 7 Jun 2009 14:56:44 -0400 Subject: New helper - simple_fsync() writes associated buffers, then does sync_inode() to write the inode itself (and to make it clean). Depends on ->write_inode() honouring the second argument. Signed-off-by: Al Viro --- fs/libfs.c | 25 +++++++++++++++++++++++++ include/linux/fs.h | 2 ++ 2 files changed, 27 insertions(+) (limited to 'include/linux/fs.h') diff --git a/fs/libfs.c b/fs/libfs.c index 80046ddf506..ddfa89948c3 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -9,6 +9,8 @@ #include #include #include +#include +#include #include @@ -807,6 +809,29 @@ struct dentry *generic_fh_to_parent(struct super_block *sb, struct fid *fid, } EXPORT_SYMBOL_GPL(generic_fh_to_parent); +int simple_fsync(struct file *file, struct dentry *dentry, int datasync) +{ + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = 0, /* metadata-only; caller takes care of data */ + }; + struct inode *inode = dentry->d_inode; + int err; + int ret; + + ret = sync_mapping_buffers(inode->i_mapping); + if (!(inode->i_state & I_DIRTY)) + return ret; + if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) + return ret; + + err = sync_inode(inode, &wbc); + if (ret == 0) + ret = err; + return ret; +} +EXPORT_SYMBOL(simple_fsync); + EXPORT_SYMBOL(dcache_dir_close); EXPORT_SYMBOL(dcache_dir_lseek); EXPORT_SYMBOL(dcache_dir_open); diff --git a/include/linux/fs.h b/include/linux/fs.h index d883aa1fc2e..ede84fa7da5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2345,6 +2345,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count); extern ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos, const void *from, size_t available); +extern int simple_fsync(struct file *, struct dentry *, int); + #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, struct page *, struct page *); -- cgit v1.2.3-70-g09d2 From 3446a8aa7ebcbc0a799e5e8fc4f2da0738d6bc21 Mon Sep 17 00:00:00 2001 From: Vegard Nossum Date: Sat, 16 May 2009 11:22:14 +0200 Subject: fs: introduce __getname_gfp() The purpose of this change is to allow __getname() users to pass a custom GFP mask to kmem_cache_alloc(). This is needed for annotating a certain kmemcheck false positive. Cc: Al Viro Signed-off-by: Vegard Nossum --- include/linux/fs.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'include/linux/fs.h') diff --git a/include/linux/fs.h b/include/linux/fs.h index ede84fa7da5..6d12174fbe1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1919,8 +1919,9 @@ extern void __init vfs_caches_init(unsigned long); extern struct kmem_cache *names_cachep; -#define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) -#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) +#define __getname_gfp(gfp) kmem_cache_alloc(names_cachep, (gfp)) +#define __getname() __getname_gfp(GFP_KERNEL) +#define __putname(name) kmem_cache_free(names_cachep, (void *)(name)) #ifndef CONFIG_AUDITSYSCALL #define putname(name) __putname(name) #else -- cgit v1.2.3-70-g09d2 From 1ebf26a9b338534def47f307c6c8694b6dfc0a79 Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Tue, 16 Jun 2009 15:31:19 -0700 Subject: readahead: make mmap_miss an unsigned int This makes the performance impact of possible mmap_miss wrap around to be temporary and tolerable: i.e. MMAP_LOTSAMISS=100 extra readarounds. Otherwise if ever mmap_miss wraps around to negative, it takes INT_MAX cache misses to bring it back to normal state. During the time mmap readaround will be _enabled_ for whatever wild random workload. That's almost permanent performance impact. Signed-off-by: Wu Fengguang Cc: Ying Han Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/fs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux/fs.h') diff --git a/include/linux/fs.h b/include/linux/fs.h index ede84fa7da5..8146e0264ef 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -879,7 +879,7 @@ struct file_ra_state { there are only # of pages ahead */ unsigned int ra_pages; /* Maximum readahead window */ - int mmap_miss; /* Cache miss stat for mmap accesses */ + unsigned int mmap_miss; /* Cache miss stat for mmap accesses */ loff_t prev_pos; /* Cache last read() position */ }; -- cgit v1.2.3-70-g09d2 From 286973552f051404abdb58dd9b2f8f7558efe4e5 Mon Sep 17 00:00:00 2001 From: Mike Waychison Date: Tue, 16 Jun 2009 15:32:59 -0700 Subject: mm: remove __invalidate_mapping_pages variant Remove __invalidate_mapping_pages atomic variant now that its sole caller can sleep (fixed in eccb95cee4f0d56faa46ef22fb94dd4a3578d3eb ("vfs: fix lock inversion in drop_pagecache_sb()")). This fixes softlockups that can occur while in the drop_caches path. Signed-off-by: Mike Waychison Cc: Jan Kara Cc: Wu Fengguang Cc: Dave Chinner Cc: Nick Piggin Acked-by: Jan Kara Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/drop_caches.c | 2 +- include/linux/fs.h | 3 --- mm/truncate.c | 39 ++++++++++++++++----------------------- 3 files changed, 17 insertions(+), 27 deletions(-) (limited to 'include/linux/fs.h') diff --git a/fs/drop_caches.c b/fs/drop_caches.c index b6a719a909f..a2edb791344 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -24,7 +24,7 @@ static void drop_pagecache_sb(struct super_block *sb) continue; __iget(inode); spin_unlock(&inode_lock); - __invalidate_mapping_pages(inode->i_mapping, 0, -1, true); + invalidate_mapping_pages(inode->i_mapping, 0, -1); iput(toput_inode); toput_inode = inode; spin_lock(&inode_lock); diff --git a/include/linux/fs.h b/include/linux/fs.h index 8146e0264ef..f5ae9f19b8a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2036,9 +2036,6 @@ extern int __invalidate_device(struct block_device *); extern int invalidate_partition(struct gendisk *, int); #endif extern int invalidate_inodes(struct super_block *); -unsigned long __invalidate_mapping_pages(struct address_space *mapping, - pgoff_t start, pgoff_t end, - bool be_atomic); unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); diff --git a/mm/truncate.c b/mm/truncate.c index 12e1579f916..ccc3ecf7cb9 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -267,8 +267,21 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) } EXPORT_SYMBOL(truncate_inode_pages); -unsigned long __invalidate_mapping_pages(struct address_space *mapping, - pgoff_t start, pgoff_t end, bool be_atomic) +/** + * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode + * @mapping: the address_space which holds the pages to invalidate + * @start: the offset 'from' which to invalidate + * @end: the offset 'to' which to invalidate (inclusive) + * + * This function only removes the unlocked pages, if you want to + * remove all the pages of one inode, you must call truncate_inode_pages. + * + * invalidate_mapping_pages() will not block on IO activity. It will not + * invalidate pages which are dirty, locked, under writeback or mapped into + * pagetables. + */ +unsigned long invalidate_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t end) { struct pagevec pvec; pgoff_t next = start; @@ -309,30 +322,10 @@ unlock: break; } pagevec_release(&pvec); - if (likely(!be_atomic)) - cond_resched(); + cond_resched(); } return ret; } - -/** - * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode - * @mapping: the address_space which holds the pages to invalidate - * @start: the offset 'from' which to invalidate - * @end: the offset 'to' which to invalidate (inclusive) - * - * This function only removes the unlocked pages, if you want to - * remove all the pages of one inode, you must call truncate_inode_pages. - * - * invalidate_mapping_pages() will not block on IO activity. It will not - * invalidate pages which are dirty, locked, under writeback or mapped into - * pagetables. - */ -unsigned long invalidate_mapping_pages(struct address_space *mapping, - pgoff_t start, pgoff_t end) -{ - return __invalidate_mapping_pages(mapping, start, end, false); -} EXPORT_SYMBOL(invalidate_mapping_pages); /* -- cgit v1.2.3-70-g09d2 From 3e63cbb1efca7dd3137de1bb475e2e068e38ef23 Mon Sep 17 00:00:00 2001 From: Ankit Jain Date: Fri, 19 Jun 2009 14:28:07 -0400 Subject: fs: Add new pre-allocation ioctls to vfs for compatibility with legacy xfs ioctls This patch adds ioctls to vfs for compatibility with legacy XFS pre-allocation ioctls (XFS_IOC_*RESVP*). The implementation effectively invokes sys_fallocate for the new ioctls. Also handles the compat_ioctl case. Note: These legacy ioctls are also implemented by OCFS2. [AV: folded fixes from hch] Signed-off-by: Ankit Jain Signed-off-by: Christoph Hellwig Signed-off-by: Al Viro --- fs/compat_ioctl.c | 48 +++++++++++++++++++++++++++++++++++++++++ fs/ioctl.c | 35 ++++++++++++++++++++++++++++++ fs/open.c | 58 +++++++++++++++++++++++++------------------------- include/linux/falloc.h | 21 ++++++++++++++++++ include/linux/fs.h | 6 ++++++ 5 files changed, 139 insertions(+), 29 deletions(-) (limited to 'include/linux/fs.h') diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index c135202c38b..626c7483b4d 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -1779,6 +1780,41 @@ lp_timeout_trans(unsigned int fd, unsigned int cmd, unsigned long arg) return sys_ioctl(fd, cmd, (unsigned long)tn); } +/* on ia32 l_start is on a 32-bit boundary */ +#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) +struct space_resv_32 { + __s16 l_type; + __s16 l_whence; + __s64 l_start __attribute__((packed)); + /* len == 0 means until end of file */ + __s64 l_len __attribute__((packed)); + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; /* reserve area */ +}; + +#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32) +#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32) + +/* just account for different alignment */ +static int compat_ioctl_preallocate(struct file *file, unsigned long arg) +{ + struct space_resv_32 __user *p32 = (void __user *)arg; + struct space_resv __user *p = compat_alloc_user_space(sizeof(*p)); + + if (copy_in_user(&p->l_type, &p32->l_type, sizeof(s16)) || + copy_in_user(&p->l_whence, &p32->l_whence, sizeof(s16)) || + copy_in_user(&p->l_start, &p32->l_start, sizeof(s64)) || + copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) || + copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) || + copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) || + copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32))) + return -EFAULT; + + return ioctl_preallocate(file, p); +} +#endif + typedef int (*ioctl_trans_handler_t)(unsigned int, unsigned int, unsigned long, struct file *); @@ -2756,6 +2792,18 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd, case FIOQSIZE: break; +#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) + case FS_IOC_RESVSP_32: + case FS_IOC_RESVSP64_32: + error = compat_ioctl_preallocate(filp, arg); + goto out_fput; +#else + case FS_IOC_RESVSP: + case FS_IOC_RESVSP64: + error = ioctl_preallocate(filp, (void __user *)arg); + goto out_fput; +#endif + case FIBMAP: case FIGETBSZ: case FIONREAD: diff --git a/fs/ioctl.c b/fs/ioctl.c index 001f8d3118f..5612880fcbe 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -15,6 +15,7 @@ #include #include #include +#include #include @@ -403,6 +404,37 @@ EXPORT_SYMBOL(generic_block_fiemap); #endif /* CONFIG_BLOCK */ +/* + * This provides compatibility with legacy XFS pre-allocation ioctls + * which predate the fallocate syscall. + * + * Only the l_start, l_len and l_whence fields of the 'struct space_resv' + * are used here, rest are ignored. + */ +int ioctl_preallocate(struct file *filp, void __user *argp) +{ + struct inode *inode = filp->f_path.dentry->d_inode; + struct space_resv sr; + + if (copy_from_user(&sr, argp, sizeof(sr))) + return -EFAULT; + + switch (sr.l_whence) { + case SEEK_SET: + break; + case SEEK_CUR: + sr.l_start += filp->f_pos; + break; + case SEEK_END: + sr.l_start += i_size_read(inode); + break; + default: + return -EINVAL; + } + + return do_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len); +} + static int file_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { @@ -414,6 +446,9 @@ static int file_ioctl(struct file *filp, unsigned int cmd, return ioctl_fibmap(filp, p); case FIONREAD: return put_user(i_size_read(inode) - filp->f_pos, p); + case FS_IOC_RESVSP: + case FS_IOC_RESVSP64: + return ioctl_preallocate(filp, p); } return vfs_ioctl(filp, cmd, arg); diff --git a/fs/open.c b/fs/open.c index 7200e23d925..dd98e807602 100644 --- a/fs/open.c +++ b/fs/open.c @@ -378,63 +378,63 @@ SYSCALL_ALIAS(sys_ftruncate64, SyS_ftruncate64); #endif #endif /* BITS_PER_LONG == 32 */ -SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len) + +int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { - struct file *file; - struct inode *inode; - long ret = -EINVAL; + struct inode *inode = file->f_path.dentry->d_inode; + long ret; if (offset < 0 || len <= 0) - goto out; + return -EINVAL; /* Return error if mode is not supported */ - ret = -EOPNOTSUPP; if (mode && !(mode & FALLOC_FL_KEEP_SIZE)) - goto out; + return -EOPNOTSUPP; - ret = -EBADF; - file = fget(fd); - if (!file) - goto out; if (!(file->f_mode & FMODE_WRITE)) - goto out_fput; + return -EBADF; /* * Revalidate the write permissions, in case security policy has * changed since the files were opened. */ ret = security_file_permission(file, MAY_WRITE); if (ret) - goto out_fput; + return ret; - inode = file->f_path.dentry->d_inode; - - ret = -ESPIPE; if (S_ISFIFO(inode->i_mode)) - goto out_fput; + return -ESPIPE; - ret = -ENODEV; /* * Let individual file system decide if it supports preallocation * for directories or not. */ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) - goto out_fput; + return -ENODEV; - ret = -EFBIG; /* Check for wrap through zero too */ if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) - goto out_fput; + return -EFBIG; - if (inode->i_op->fallocate) - ret = inode->i_op->fallocate(inode, mode, offset, len); - else - ret = -EOPNOTSUPP; + if (!inode->i_op->fallocate) + return -EOPNOTSUPP; -out_fput: - fput(file); -out: - return ret; + return inode->i_op->fallocate(inode, mode, offset, len); } + +SYSCALL_DEFINE(fallocate)(int fd, int mode, loff_t offset, loff_t len) +{ + struct file *file; + int error = -EBADF; + + file = fget(fd); + if (file) { + error = do_fallocate(file, mode, offset, len); + fput(file); + } + + return error; +} + #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS asmlinkage long SyS_fallocate(long fd, long mode, loff_t offset, loff_t len) { diff --git a/include/linux/falloc.h b/include/linux/falloc.h index 8e912ab6a07..3c155107d61 100644 --- a/include/linux/falloc.h +++ b/include/linux/falloc.h @@ -3,4 +3,25 @@ #define FALLOC_FL_KEEP_SIZE 0x01 /* default is extend size */ +#ifdef __KERNEL__ + +/* + * Space reservation ioctls and argument structure + * are designed to be compatible with the legacy XFS ioctls. + */ +struct space_resv { + __s16 l_type; + __s16 l_whence; + __s64 l_start; + __s64 l_len; /* len == 0 means until end of file */ + __s32 l_sysid; + __u32 l_pid; + __s32 l_pad[4]; /* reserved area */ +}; + +#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv) +#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv) + +#endif /* __KERNEL__ */ + #endif /* _FALLOC_H_ */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 1ff5e4e0195..79e302ddde0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1906,6 +1906,8 @@ static inline int break_lease(struct inode *inode, unsigned int mode) extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, struct file *filp); +extern int do_fallocate(struct file *file, int mode, loff_t offset, + loff_t len); extern long do_sys_open(int dfd, const char __user *filename, int flags, int mode); extern struct file *filp_open(const char *, int, int); @@ -1914,6 +1916,10 @@ extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, extern int filp_close(struct file *, fl_owner_t id); extern char * getname(const char __user *); +/* fs/ioctl.c */ + +extern int ioctl_preallocate(struct file *filp, void __user *argp); + /* fs/dcache.c */ extern void __init vfs_caches_init_early(void); extern void __init vfs_caches_init(unsigned long); -- cgit v1.2.3-70-g09d2 From f19d4a8fa6f9b6ccf54df0971c97ffcaa390b7b0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 8 Jun 2009 19:50:45 -0400 Subject: add caching of ACLs in struct inode No helpers, no conversions yet. Signed-off-by: Al Viro --- fs/inode.c | 10 ++++++++++ include/linux/fs.h | 7 +++++++ 2 files changed, 17 insertions(+) (limited to 'include/linux/fs.h') diff --git a/fs/inode.c b/fs/inode.c index f643be565df..e193cd592fa 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -25,6 +25,7 @@ #include #include #include +#include /* * This is needed for the following functions: @@ -189,6 +190,9 @@ struct inode *inode_init_always(struct super_block *sb, struct inode *inode) } inode->i_private = NULL; inode->i_mapping = mapping; +#ifdef CONFIG_FS_POSIX_ACL + inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; +#endif #ifdef CONFIG_FSNOTIFY inode->i_fsnotify_mask = 0; @@ -227,6 +231,12 @@ void destroy_inode(struct inode *inode) ima_inode_free(inode); security_inode_free(inode); fsnotify_inode_delete(inode); +#ifdef CONFIG_FS_POSIX_ACL + if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED) + posix_acl_release(inode->i_acl); + if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) + posix_acl_release(inode->i_default_acl); +#endif if (inode->i_sb->s_op->destroy_inode) inode->i_sb->s_op->destroy_inode(inode); else diff --git a/include/linux/fs.h b/include/linux/fs.h index 79e302ddde0..0872372184f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -710,6 +710,9 @@ static inline int mapping_writably_mapped(struct address_space *mapping) #define i_size_ordered_init(inode) do { } while (0) #endif +struct posix_acl; +#define ACL_NOT_CACHED ((void *)(-1)) + struct inode { struct hlist_node i_hash; struct list_head i_list; @@ -772,6 +775,10 @@ struct inode { atomic_t i_writecount; #ifdef CONFIG_SECURITY void *i_security; +#endif +#ifdef CONFIG_FS_POSIX_ACL + struct posix_acl *i_acl; + struct posix_acl *i_default_acl; #endif void *i_private; /* fs or device private pointer */ }; -- cgit v1.2.3-70-g09d2