summaryrefslogtreecommitdiffstats
path: root/fs/notify/notification.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/notify/notification.c')
-rw-r--r--fs/notify/notification.c41
1 files changed, 13 insertions, 28 deletions
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index b493c378445..dafd0b7687b 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -289,43 +289,28 @@ static void initialize_event(struct fsnotify_event *event)
/*
* Caller damn well better be holding whatever mutex is protecting the
- * old_holder->event_list.
+ * old_holder->event_list and the new_event must be a clean event which
+ * cannot be found anywhere else in the kernel.
*/
int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
struct fsnotify_event *new_event)
{
struct fsnotify_event *old_event = old_holder->event;
- struct fsnotify_event_holder *new_holder = NULL;
+ struct fsnotify_event_holder *new_holder = &new_event->holder;
+
+ enum event_spinlock_class {
+ SPINLOCK_OLD,
+ SPINLOCK_NEW,
+ };
/*
- * There is one fsnotify_event_holder embedded inside each fsnotify_event.
- * Check if we expect to be able to use that holder. If not alloc a new
- * holder.
- * For the overflow event it's possible that something will use the in
- * event holder before we get the lock so we may need to jump back and
- * alloc a new holder, this can't happen for most events...
+ * if the new_event's embedded holder is in use someone
+ * screwed up and didn't give us a clean new event.
*/
- if (!list_empty(&new_event->holder.event_list)) {
-alloc_holder:
- new_holder = fsnotify_alloc_event_holder();
- if (!new_holder)
- return -ENOMEM;
- }
+ BUG_ON(!list_empty(&new_holder->event_list));
- spin_lock(&old_event->lock);
- spin_lock(&new_event->lock);
-
- if (list_empty(&new_event->holder.event_list)) {
- if (unlikely(new_holder))
- fsnotify_destroy_event_holder(new_holder);
- new_holder = &new_event->holder;
- } else if (unlikely(!new_holder)) {
- /* between the time we checked above and got the lock the in
- * event holder was used, go back and get a new one */
- spin_unlock(&new_event->lock);
- spin_unlock(&old_event->lock);
- goto alloc_holder;
- }
+ spin_lock_nested(&old_event->lock, SPINLOCK_OLD);
+ spin_lock_nested(&new_event->lock, SPINLOCK_NEW);
new_holder->event = new_event;
list_replace_init(&old_holder->event_list, &new_holder->event_list);