summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2008-10-02 14:50:12 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-02 15:53:13 -0700
commit16dbc6c9616363fe53811abcbd935336dc0a0f01 (patch)
treedef1129950caf1e861563b7cbdc1874e7c41fc5c
parent08650869e0ec581f8d88cfdb563d37f5383abfe2 (diff)
inotify: fix lock ordering wrt do_page_fault's mmap_sem
Fix inotify lock order reversal with mmap_sem due to holding locks over copy_to_user. Signed-off-by: Nick Piggin <npiggin@suse.de> Reported-by: "Daniel J Blueman" <daniel.blueman@gmail.com> Tested-by: "Daniel J Blueman" <daniel.blueman@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/inotify_user.c27
-rw-r--r--include/asm-x86/uaccess_64.h1
2 files changed, 21 insertions, 7 deletions
diff --git a/fs/inotify_user.c b/fs/inotify_user.c
index 60249429a25..d85c7d931cd 100644
--- a/fs/inotify_user.c
+++ b/fs/inotify_user.c
@@ -323,7 +323,7 @@ out:
}
/*
- * remove_kevent - cleans up and ultimately frees the given kevent
+ * remove_kevent - cleans up the given kevent
*
* Caller must hold dev->ev_mutex.
*/
@@ -334,7 +334,13 @@ static void remove_kevent(struct inotify_device *dev,
dev->event_count--;
dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
+}
+/*
+ * free_kevent - frees the given kevent.
+ */
+static void free_kevent(struct inotify_kernel_event *kevent)
+{
kfree(kevent->name);
kmem_cache_free(event_cachep, kevent);
}
@@ -350,6 +356,7 @@ static void inotify_dev_event_dequeue(struct inotify_device *dev)
struct inotify_kernel_event *kevent;
kevent = inotify_dev_get_event(dev);
remove_kevent(dev, kevent);
+ free_kevent(kevent);
}
}
@@ -433,17 +440,15 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
dev = file->private_data;
while (1) {
- int events;
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&dev->ev_mutex);
- events = !list_empty(&dev->events);
- mutex_unlock(&dev->ev_mutex);
- if (events) {
+ if (!list_empty(&dev->events)) {
ret = 0;
break;
}
+ mutex_unlock(&dev->ev_mutex);
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
@@ -462,7 +467,6 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
if (ret)
return ret;
- mutex_lock(&dev->ev_mutex);
while (1) {
struct inotify_kernel_event *kevent;
@@ -481,6 +485,13 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
}
break;
}
+ remove_kevent(dev, kevent);
+
+ /*
+ * Must perform the copy_to_user outside the mutex in order
+ * to avoid a lock order reversal with mmap_sem.
+ */
+ mutex_unlock(&dev->ev_mutex);
if (copy_to_user(buf, &kevent->event, event_size)) {
ret = -EFAULT;
@@ -498,7 +509,9 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
count -= kevent->event.len;
}
- remove_kevent(dev, kevent);
+ free_kevent(kevent);
+
+ mutex_lock(&dev->ev_mutex);
}
mutex_unlock(&dev->ev_mutex);
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 515d4dce96b..45806d60bcb 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -7,6 +7,7 @@
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/prefetch.h>
+#include <linux/lockdep.h>
#include <asm/page.h>
/*