summaryrefslogtreecommitdiffstats
path: root/fs/ubifs
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-05-16 13:44:48 +0300
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2011-05-16 15:48:48 +0300
commit7c47bfd0dbb20e5d7fa4e37cfd76bb73d39b32b4 (patch)
tree277a373b3262229f31d3a2733a55cacaac5e5ef7 /fs/ubifs
parent617992069513c1e789c707c4d75ff03bf7dd0fb0 (diff)
UBIFS: always cleanup the recovered LEB
Now when we call 'ubifs_recover_leb()' only for LEBs which are potentially corrupted (i.e., only for last buds, not for all of them), we can cleanup every LEB, not only those where we find corruption. The reason - unstable bits. Even though the LEB may look good now, it might contain unstable bits which may hit us a bit later. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'fs/ubifs')
-rw-r--r--fs/ubifs/recovery.c29
1 files changed, 9 insertions, 20 deletions
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 7d922033d66..4d10b6e36ec 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -609,7 +609,7 @@ static int drop_incomplete_group(struct ubifs_scan_leb *sleb, int *offs)
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
int offs, void *sbuf, int grouped)
{
- int ret = 0, err, len = c->leb_size - offs, need_clean = 0;
+ int ret = 0, err, len = c->leb_size - offs;
int start = offs;
struct ubifs_scan_leb *sleb;
void *buf = sbuf + offs;
@@ -620,9 +620,6 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
if (IS_ERR(sleb))
return sleb;
- if (sleb->ecc)
- need_clean = 1;
-
while (len >= 8) {
dbg_scan("look at LEB %d:%d (%d bytes left)",
lnum, offs, len);
@@ -665,21 +662,18 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
}
if (ret == SCANNED_GARBAGE || ret == SCANNED_A_BAD_PAD_NODE) {
- if (is_last_write(c, buf, offs)) {
+ if (is_last_write(c, buf, offs))
clean_buf(c, &buf, lnum, &offs, &len);
- need_clean = 1;
- } else
+ else
goto corrupted_rescan;
} else if (ret == SCANNED_A_CORRUPT_NODE) {
- if (no_more_nodes(c, buf, len, lnum, offs)) {
+ if (no_more_nodes(c, buf, len, lnum, offs))
clean_buf(c, &buf, lnum, &offs, &len);
- need_clean = 1;
- } else
+ else
goto corrupted_rescan;
} else if (!is_empty(buf, len)) {
if (is_last_write(c, buf, offs)) {
clean_buf(c, &buf, lnum, &offs, &len);
- need_clean = 1;
} else {
int corruption = first_non_ff(buf, len);
@@ -701,21 +695,16 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
buf = sbuf + offs;
len = c->leb_size - offs;
clean_buf(c, &buf, lnum, &offs, &len);
- need_clean = 1;
}
- if (offs % c->min_io_size) {
+ if (offs % c->min_io_size)
clean_buf(c, &buf, lnum, &offs, &len);
- need_clean = 1;
- }
ubifs_end_scan(c, sleb, lnum, offs);
- if (need_clean) {
- err = fix_unclean_leb(c, sleb, start);
- if (err)
- goto error;
- }
+ err = fix_unclean_leb(c, sleb, start);
+ if (err)
+ goto error;
return sleb;