diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2007-09-13 14:28:14 +0300 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2007-10-14 13:10:21 +0300 |
commit | e8823bd63d50bb1f9bd73f1197230e1f7217456a (patch) | |
tree | 3b8b849512686aebda99f83d290c9e09ed983967 /drivers/mtd | |
parent | 6986646ba752fef150286926aa922ef04e9d19dd (diff) | |
download | op-kernel-dev-e8823bd63d50bb1f9bd73f1197230e1f7217456a.zip op-kernel-dev-e8823bd63d50bb1f9bd73f1197230e1f7217456a.tar.gz |
UBI: fix atomic LEB change problems
When the UBI device is nearly full, i.e. all LEBs are mapped, we have
only one spare LEB left - the one we reserved for WL purposes. Well,
I do not count the LEBs which were reserved for bad PEB handling -
suppose NOR flash for simplicity. If an "atomic LEB change operation"
is run, and the WL unit is moving a LEB, we have no spare LEBs to
finish the operation and fail, which is not good. Moreover, if there
are 2 or more simultanious "atomic LEB change" requests, only one of
them has chances to succeed, the other will fail with -ENOSPC. Not
good either.
This patch does 2 things:
1. Reserves one PEB for the "atomic LEB change" operation.
2. Serealize the operations so that only on of them may run
at a time (by means of a mutex).
Pointed-to-by: Brijesh Singh <brijesh.s.singh@gmail.com>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r-- | drivers/mtd/ubi/eba.c | 48 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 6 |
2 files changed, 33 insertions, 21 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 81bb6a3..7b7add6 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -46,6 +46,9 @@ #include <linux/err.h> #include "ubi.h" +/* Number of physical eraseblocks reserved for atomic LEB change operation */ +#define EBA_RESERVED_PEBS 1 + /** * struct ltree_entry - an entry in the lock tree. * @rb: links RB-tree nodes @@ -827,6 +830,9 @@ write_error: * data, which has to be aligned. This function guarantees that in case of an * unclean reboot the old contents is preserved. Returns zero in case of * success and a negative error code in case of failure. + * + * UBI reserves one LEB for the "atomic LEB change" operation, so only one + * LEB change may be done at a time. This is ensured by @ubi->alc_mutex. */ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, const void *buf, int len, int dtype) @@ -843,11 +849,10 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, if (!vid_hdr) return -ENOMEM; + mutex_lock(&ubi->alc_mutex); err = leb_write_lock(ubi, vol_id, lnum); - if (err) { - ubi_free_vid_hdr(ubi, vid_hdr); - return err; - } + if (err) + goto out_mutex; vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); vid_hdr->vol_id = cpu_to_be32(vol_id); @@ -864,9 +869,8 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, retry: pnum = ubi_wl_get_peb(ubi, dtype); if (pnum < 0) { - ubi_free_vid_hdr(ubi, vid_hdr); - leb_write_unlock(ubi, vol_id, lnum); - return pnum; + err = pnum; + goto out_leb_unlock; } dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", @@ -888,17 +892,18 @@ retry: if (vol->eba_tbl[lnum] >= 0) { err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); - if (err) { - ubi_free_vid_hdr(ubi, vid_hdr); - leb_write_unlock(ubi, vol_id, lnum); - return err; - } + if (err) + goto out_leb_unlock; } vol->eba_tbl[lnum] = pnum; + +out_leb_unlock: leb_write_unlock(ubi, vol_id, lnum); +out_mutex: + mutex_unlock(&ubi->alc_mutex); ubi_free_vid_hdr(ubi, vid_hdr); - return 0; + return err; write_error: if (err != -EIO || !ubi->bad_allowed) { @@ -908,17 +913,13 @@ write_error: * mode just in case. */ ubi_ro_mode(ubi); - leb_write_unlock(ubi, vol_id, lnum); - ubi_free_vid_hdr(ubi, vid_hdr); - return err; + goto out_leb_unlock; } err = ubi_wl_put_peb(ubi, pnum, 1); if (err || ++tries > UBI_IO_RETRIES) { ubi_ro_mode(ubi); - leb_write_unlock(ubi, vol_id, lnum); - ubi_free_vid_hdr(ubi, vid_hdr); - return err; + goto out_leb_unlock; } vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); @@ -1122,6 +1123,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) dbg_eba("initialize EBA unit"); spin_lock_init(&ubi->ltree_lock); + mutex_init(&ubi->alc_mutex); ubi->ltree = RB_ROOT; if (ubi_devices_cnt == 0) { @@ -1183,6 +1185,14 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ubi->rsvd_pebs += ubi->beb_rsvd_pebs; } + if (ubi->avail_pebs < EBA_RESERVED_PEBS) { + ubi_err("no enough physical eraseblocks (%d, need %d)", + ubi->avail_pebs, EBA_RESERVED_PEBS); + goto out_free; + } + ubi->avail_pebs -= EBA_RESERVED_PEBS; + ubi->rsvd_pebs += EBA_RESERVED_PEBS; + dbg_eba("EBA unit is initialized"); return 0; diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index cc010111..5e941a6 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -221,14 +221,15 @@ struct ubi_wl_entry; * @vtbl_slots: how many slots are available in the volume table * @vtbl_size: size of the volume table in bytes * @vtbl: in-RAM volume table copy + * @vtbl_mutex: protects on-flash volume table * * @max_ec: current highest erase counter value * @mean_ec: current mean erase counter value * - * global_sqnum: global sequence number + * @global_sqnum: global sequence number * @ltree_lock: protects the lock tree and @global_sqnum * @ltree: the lock tree - * @vtbl_mutex: protects on-flash volume table + * @alc_mutex: serializes "atomic LEB change" operations * * @used: RB-tree of used physical eraseblocks * @free: RB-tree of free physical eraseblocks @@ -308,6 +309,7 @@ struct ubi_device { unsigned long long global_sqnum; spinlock_t ltree_lock; struct rb_root ltree; + struct mutex alc_mutex; /* Wear-leveling unit's stuff */ struct rb_root used; |