summaryrefslogtreecommitdiff
path: root/drivers/mtd/ubi/wl.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@linux.intel.com>2012-05-17 08:26:24 +0300
committerArtem Bityutskiy <artem.bityutskiy@linux.intel.com>2012-05-20 20:26:02 +0300
commit2c5ec5ce66c0170829c5c128b9235429936442ac (patch)
tree7010d8c62bcad3ce725096971cebdcd0289cd648 /drivers/mtd/ubi/wl.c
parent55e93e55aaa9c38e45767bf3c963d03082f28978 (diff)
UBI: rename seb to aeb
After re-naming the 'struct ubi_scan_leb' we should adjust all variables named 'seb' to something else, because 'seb' stands for "scanning eraseblock". Let's rename it to 'aeb' which stands for "attaching eraseblock" which is a bit more consistend and has the same length. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@linux.intel.com>
Diffstat (limited to 'drivers/mtd/ubi/wl.c')
-rw-r--r--drivers/mtd/ubi/wl.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 12a85d5372cd..fb870f4fc924 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1385,7 +1385,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_attach_info *si)
int err, i;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *sv;
- struct ubi_ainf_peb *seb, *tmp;
+ struct ubi_ainf_peb *aeb, *tmp;
struct ubi_wl_entry *e;
ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
@@ -1406,15 +1406,15 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_attach_info *si)
INIT_LIST_HEAD(&ubi->pq[i]);
ubi->pq_head = 0;
- list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
+ list_for_each_entry_safe(aeb, tmp, &si->erase, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) {
kmem_cache_free(ubi_wl_entry_slab, e);
@@ -1422,32 +1422,32 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_attach_info *si)
}
}
- list_for_each_entry(seb, &si->free, u.list) {
+ list_for_each_entry(aeb, &si->free, u.list) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi_assert(e->ec >= 0);
wl_tree_add(e, &ubi->free);
ubi->lookuptbl[e->pnum] = e;
}
ubi_rb_for_each_entry(rb1, sv, &si->volumes, rb) {
- ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
+ ubi_rb_for_each_entry(rb2, aeb, &sv->root, u.rb) {
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
- e->pnum = seb->pnum;
- e->ec = seb->ec;
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
- if (!seb->scrub) {
+ if (!aeb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
wl_tree_add(e, &ubi->used);