summaryrefslogtreecommitdiffstats
path: root/fs/jffs2
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2010-05-19 17:05:14 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2010-05-19 17:10:44 +0100
commitae3b6ba06c8ed399ef920724ee8136e540878294 (patch)
tree427f89ac31defa7b3114c50ab706c0f6df539136 /fs/jffs2
parentacb64a43e4503fbea9faf123f2403da7af8831eb (diff)
jffs2: Use jffs2_garbage_collect_trigger() to trigger pending erases
This is now done in a GC pass; we don't need to trigger kupdated to do it. Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'fs/jffs2')
-rw-r--r--fs/jffs2/erase.c6
-rw-r--r--fs/jffs2/gc.c2
-rw-r--r--fs/jffs2/nodemgmt.c4
-rw-r--r--fs/jffs2/scan.c4
-rw-r--r--fs/jffs2/wbuf.c6
5 files changed, 12 insertions, 10 deletions
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index 563c857ca54..6286ad9b00f 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -168,10 +168,10 @@ static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblo
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
list_move_tail(&jeb->list, &c->erase_complete_list);
+ /* Wake the GC thread to mark them clean */
+ jffs2_garbage_collect_trigger(c);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
- /* Ensure that kupdated calls us again to mark them clean */
- jffs2_erase_pending_trigger(c);
wake_up(&c->erase_wait);
}
@@ -491,9 +491,9 @@ filebad:
refile:
/* Stick it back on the list from whence it came and come back later */
- jffs2_erase_pending_trigger(c);
mutex_lock(&c->erase_free_sem);
spin_lock(&c->erase_completion_lock);
+ jffs2_garbage_collect_trigger(c);
list_move(&jeb->list, &c->erase_complete_list);
spin_unlock(&c->erase_completion_lock);
mutex_unlock(&c->erase_free_sem);
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 1ea4a843a43..f5e96bd656e 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -448,7 +448,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
list_add_tail(&c->gcblock->list, &c->erase_pending_list);
c->gcblock = NULL;
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
}
spin_unlock(&c->erase_completion_lock);
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index dd2d920d332..694aa5b0350 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -229,7 +229,7 @@ static int jffs2_find_nextblock(struct jffs2_sb_info *c)
ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
list_move_tail(&ejeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
D1(printk(KERN_DEBUG "jffs2_find_nextblock: Triggering erase of erasable block at 0x%08x\n",
ejeb->offset));
}
@@ -625,7 +625,7 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
list_add_tail(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
} else {
/* Sometimes, however, we leave it elsewhere so it doesn't get
immediately reused, and we spread the load a bit. */
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 696686cc206..46f870d1cc3 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -260,7 +260,9 @@ int jffs2_scan_medium(struct jffs2_sb_info *c)
ret = -EIO;
goto out;
}
- jffs2_erase_pending_trigger(c);
+ spin_lock(&c->erase_completion_lock);
+ jffs2_garbage_collect_trigger(c);
+ spin_unlock(&c->erase_completion_lock);
}
ret = 0;
out:
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 5ef7bac265e..be114beca13 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -121,7 +121,7 @@ static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c)
D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
list_add_tail(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
} else {
/* Sometimes, however, we leave it elsewhere so it doesn't get
immediately reused, and we spread the load a bit. */
@@ -152,7 +152,7 @@ static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock
D1(printk("Refiling block at %08x to erase_pending_list\n", jeb->offset));
list_add(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
}
if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) {
@@ -543,7 +543,7 @@ static void jffs2_wbuf_recover(struct jffs2_sb_info *c)
D1(printk(KERN_DEBUG "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset));
list_move(&jeb->list, &c->erase_pending_list);
c->nr_erasing_blocks++;
- jffs2_erase_pending_trigger(c);
+ jffs2_garbage_collect_trigger(c);
}
jffs2_dbg_acct_sanity_check_nolock(c, jeb);