diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2007-07-18 13:42:10 +0300 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2007-07-18 16:59:09 +0300 |
commit | 784c145444e7dd58ae740d406155b72ac658f151 (patch) | |
tree | 31c304913db2011a3a9f09da9dbaf2f74004d887 /drivers | |
parent | 63b6c1ed56f69fdd35122dc591164587e3407ba0 (diff) |
UBI: fix error handling in erase worker
Do not switch to read-only mode in case of -EINTR and some
other obvious cases. Switch to RO mode only when we do not
know what is the error.
Reported-by: Vinit Agnihotri <vinit.agnihotri@gmail.com>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/mtd/ubi/wl.c | 89 |
1 files changed, 48 insertions, 41 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index d512cf16350..9de95376209 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1060,9 +1060,8 @@ out_unlock: static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, int cancel) { - int err; struct ubi_wl_entry *e = wl_wrk->e; - int pnum = e->pnum; + int pnum = e->pnum, err, need; if (cancel) { dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec); @@ -1097,62 +1096,70 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, kfree(wl_wrk); kmem_cache_free(wl_entries_slab, e); - if (err != -EIO) { + if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || + err == -EBUSY) { + int err1; + + /* Re-schedule the LEB for erasure */ + err1 = schedule_erase(ubi, e, 0); + if (err1) { + err = err1; + goto out_ro; + } + return err; + } else if (err != -EIO) { /* * If this is not %-EIO, we have no idea what to do. Scheduling * this physical eraseblock for erasure again would cause * errors again and again. Well, lets switch to RO mode. */ - ubi_ro_mode(ubi); - return err; + goto out_ro; } /* It is %-EIO, the PEB went bad */ if (!ubi->bad_allowed) { ubi_err("bad physical eraseblock %d detected", pnum); - ubi_ro_mode(ubi); - err = -EIO; - } else { - int need; - - spin_lock(&ubi->volumes_lock); - need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; - if (need > 0) { - need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; - ubi->avail_pebs -= need; - ubi->rsvd_pebs += need; - ubi->beb_rsvd_pebs += need; - if (need > 0) - ubi_msg("reserve more %d PEBs", need); - } + goto out_ro; + } - if (ubi->beb_rsvd_pebs == 0) { - spin_unlock(&ubi->volumes_lock); - ubi_err("no reserved physical eraseblocks"); - ubi_ro_mode(ubi); - return -EIO; - } + spin_lock(&ubi->volumes_lock); + need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs + 1; + if (need > 0) { + need = ubi->avail_pebs >= need ? need : ubi->avail_pebs; + ubi->avail_pebs -= need; + ubi->rsvd_pebs += need; + ubi->beb_rsvd_pebs += need; + if (need > 0) + ubi_msg("reserve more %d PEBs", need); + } + if (ubi->beb_rsvd_pebs == 0) { spin_unlock(&ubi->volumes_lock); - ubi_msg("mark PEB %d as bad", pnum); + ubi_err("no reserved physical eraseblocks"); + goto out_ro; + } - err = ubi_io_mark_bad(ubi, pnum); - if (err) { - ubi_ro_mode(ubi); - return err; - } + spin_unlock(&ubi->volumes_lock); + ubi_msg("mark PEB %d as bad", pnum); - spin_lock(&ubi->volumes_lock); - ubi->beb_rsvd_pebs -= 1; - ubi->bad_peb_count += 1; - ubi->good_peb_count -= 1; - ubi_calculate_reserved(ubi); - if (ubi->beb_rsvd_pebs == 0) - ubi_warn("last PEB from the reserved pool was used"); - spin_unlock(&ubi->volumes_lock); - } + err = ubi_io_mark_bad(ubi, pnum); + if (err) + goto out_ro; + + spin_lock(&ubi->volumes_lock); + ubi->beb_rsvd_pebs -= 1; + ubi->bad_peb_count += 1; + ubi->good_peb_count -= 1; + ubi_calculate_reserved(ubi); + if (ubi->beb_rsvd_pebs == 0) + ubi_warn("last PEB from the reserved pool was used"); + spin_unlock(&ubi->volumes_lock); + + return err; +out_ro: + ubi_ro_mode(ubi); return err; } |