diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2009-05-24 14:13:34 +0300 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2009-06-02 13:53:35 +0300 |
commit | b86a2c56e512f46d140a4bcb4e35e8a7d4a99a4b (patch) | |
tree | 59c3e036dfd767b73e700bd7fd8cb4bee15c4f58 /drivers/mtd/ubi/ubi.h | |
parent | 87960c0b12d0c5a0b37e0c79aef77aa1a0b10d44 (diff) |
UBI: do not switch to R/O mode on read errors
This patch improves UBI errors handling. ATM UBI switches to
R/O mode when the WL worker fails to read the source PEB.
This means that the upper layers (e.g., UBIFS) has no
chances to unmap the erroneous PEB and fix the error.
This patch changes this behaviour and makes UBI put PEBs
like this into a separate RB-tree, thus preventing the
WL worker from hitting the same read errors again and
again.
But there is a 10% limit on a maximum amount of PEBs like this.
If there are too much of them, UBI switches to R/O mode.
Additionally, this patch teaches UBI not to panic and
switch to R/O mode if after a PEB has been copied, the
target LEB cannot be read back. Instead, now UBI cancels
the operation and schedules the target PEB for torturing.
The error paths has been tested by ingecting errors
into 'ubi_eba_copy_leb()'.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi/ubi.h')
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index fd9b20da5b6b..6d929329a8d5 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -105,6 +105,10 @@ enum { * * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source * PEB was put meanwhile, or there is I/O on the source PEB + * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source + * PEB + * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target + * PEB * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target * PEB * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the @@ -112,6 +116,8 @@ enum { */ enum { MOVE_CANCEL_RACE = 1, + MOVE_SOURCE_RD_ERR, + MOVE_TARGET_RD_ERR, MOVE_TARGET_WR_ERR, MOVE_CANCEL_BITFLIPS, }; @@ -334,14 +340,15 @@ struct ubi_wl_entry; * @alc_mutex: serializes "atomic LEB change" operations * * @used: RB-tree of used physical eraseblocks + * @erroneous: RB-tree of erroneous used physical eraseblocks * @free: RB-tree of free physical eraseblocks * @scrub: RB-tree of physical eraseblocks which need scrubbing * @pq: protection queue (contain physical eraseblocks which are temporarily * protected from the wear-leveling worker) * @pq_head: protection queue head * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, - * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works - * fields + * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works and + * @erroneous_peb_count fields * @move_mutex: serializes eraseblock moves * @work_sem: synchronizes the WL worker with use tasks * @wl_scheduled: non-zero if the wear-leveling was scheduled @@ -361,6 +368,8 @@ struct ubi_wl_entry; * @peb_size: physical eraseblock size * @bad_peb_count: count of bad physical eraseblocks * @good_peb_count: count of good physical eraseblocks + * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous + * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks * @min_io_size: minimal input/output unit size of the underlying MTD device * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers * @ro_mode: if the UBI device is in read-only mode @@ -418,6 +427,7 @@ struct ubi_device { /* Wear-leveling sub-system's stuff */ struct rb_root used; + struct rb_root erroneous; struct rb_root free; struct rb_root scrub; struct list_head pq[UBI_PROT_QUEUE_LEN]; @@ -442,6 +452,8 @@ struct ubi_device { int peb_size; int bad_peb_count; int good_peb_count; + int erroneous_peb_count; + int max_erroneous; int min_io_size; int hdrs_min_io_size; int ro_mode; |