diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-03 14:42:24 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-06-03 14:42:24 -0700 |
commit | 744983d8784214c4f184be7448efb216315b48ae (patch) | |
tree | d674fc3b9bce6ff73a2294ce30a75ccb87f83b5e /drivers | |
parent | 4e583ff9df9126e275c32c6ecff54b0316fe1dee (diff) | |
parent | 8c03a1c21d72210f81cb369cc528e3fde4b45411 (diff) |
Merge tag 'for-linus-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs
Pull JFFS2, UBI and UBIFS updates from Richard Weinberger:
"JFFS2:
- Fixes for a memory leak
UBI:
- Fixes for fastmap (UAF, high CPU usage)
UBIFS:
- Minor cleanups"
* tag 'for-linus-5.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs:
ubi: ubi_create_volume: Fix use-after-free when volume creation failed
ubi: fastmap: Check wl_pool for free peb before wear leveling
ubi: fastmap: Fix high cpu usage of ubi_bgt by making sure wl_pool not empty
ubifs: Use NULL instead of using plain integer as pointer
ubifs: Simplify the return expression of run_gc()
jffs2: fix memory leak in jffs2_do_fill_super
jffs2: Use kzalloc instead of kmalloc/memset
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/mtd/ubi/fastmap-wl.c | 121 | ||||
-rw-r--r-- | drivers/mtd/ubi/fastmap.c | 11 | ||||
-rw-r--r-- | drivers/mtd/ubi/ubi.h | 4 | ||||
-rw-r--r-- | drivers/mtd/ubi/vmt.c | 1 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.c | 33 | ||||
-rw-r--r-- | drivers/mtd/ubi/wl.h | 2 |
6 files changed, 123 insertions, 49 deletions
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 28f55f9cf715..0ee452275578 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -97,6 +97,33 @@ out: return e; } +/* + * has_enough_free_count - whether ubi has enough free pebs to fill fm pools + * @ubi: UBI device description object + * @is_wl_pool: whether UBI is filling wear leveling pool + * + * This helper function checks whether there are enough free pebs (deducted + * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after + * there is at least one of free pebs is filled into fm_wl_pool. + * For wear leveling pool, UBI should also reserve free pebs for bad pebs + * handling, because there maybe no enough free pebs for user volumes after + * producing new bad pebs. + */ +static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool) +{ + int fm_used = 0; // fastmap non anchor pebs. + int beb_rsvd_pebs; + + if (!ubi->free.rb_node) + return false; + + beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0; + if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled)) + fm_used = ubi->fm_size / ubi->leb_size - 1; + + return ubi->free_count - beb_rsvd_pebs > fm_used; +} + /** * ubi_refill_pools - refills all fastmap PEB pools. * @ubi: UBI device description object @@ -120,21 +147,17 @@ void ubi_refill_pools(struct ubi_device *ubi) wl_tree_add(ubi->fm_anchor, &ubi->free); ubi->free_count++; } - if (ubi->fm_next_anchor) { - wl_tree_add(ubi->fm_next_anchor, &ubi->free); - ubi->free_count++; - } - /* All available PEBs are in ubi->free, now is the time to get + /* + * All available PEBs are in ubi->free, now is the time to get * the best anchor PEBs. */ ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1); - ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); for (;;) { enough = 0; if (pool->size < pool->max_size) { - if (!ubi->free.rb_node) + if (!has_enough_free_count(ubi, false)) break; e = wl_get_wle(ubi); @@ -147,8 +170,7 @@ void ubi_refill_pools(struct ubi_device *ubi) enough++; if (wl_pool->size < wl_pool->max_size) { - if (!ubi->free.rb_node || - (ubi->free_count - ubi->beb_rsvd_pebs < 5)) + if (!has_enough_free_count(ubi, true)) break; e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); @@ -253,6 +275,58 @@ out: return ret; } +/** + * next_peb_for_wl - returns next PEB to be used internally by the + * WL sub-system. + * + * @ubi: UBI device description object + */ +static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi) +{ + struct ubi_fm_pool *pool = &ubi->fm_wl_pool; + int pnum; + + if (pool->used == pool->size) + return NULL; + + pnum = pool->pebs[pool->used]; + return ubi->lookuptbl[pnum]; +} + +/** + * need_wear_leveling - checks whether to trigger a wear leveling work. + * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool' + * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into + * 'wl_pool' by ubi_refill_pools(). + * + * @ubi: UBI device description object + */ +static bool need_wear_leveling(struct ubi_device *ubi) +{ + int ec; + struct ubi_wl_entry *e; + + if (!ubi->used.rb_node) + return false; + + e = next_peb_for_wl(ubi); + if (!e) { + if (!ubi->free.rb_node) + return false; + e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); + ec = e->ec; + } else { + ec = e->ec; + if (ubi->free.rb_node) { + e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF); + ec = max(ec, e->ec); + } + } + e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb); + + return ec - e->ec >= UBI_WL_THRESHOLD; +} + /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system. * * @ubi: UBI device description object @@ -286,20 +360,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi) { struct ubi_work *wrk; + struct ubi_wl_entry *anchor; spin_lock(&ubi->wl_lock); - /* Do we have a next anchor? */ - if (!ubi->fm_next_anchor) { - ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1); - if (!ubi->fm_next_anchor) - /* Tell wear leveling to produce a new anchor PEB */ - ubi->fm_do_produce_anchor = 1; + /* Do we already have an anchor? */ + if (ubi->fm_anchor) { + spin_unlock(&ubi->wl_lock); + return 0; } - /* Do wear leveling to get a new anchor PEB or check the - * existing next anchor candidate. - */ + /* See if we can find an anchor PEB on the list of free PEBs */ + anchor = ubi_wl_get_fm_peb(ubi, 1); + if (anchor) { + ubi->fm_anchor = anchor; + spin_unlock(&ubi->wl_lock); + return 0; + } + + ubi->fm_do_produce_anchor = 1; + /* No luck, trigger wear leveling to produce a new anchor PEB. */ if (ubi->wl_scheduled) { spin_unlock(&ubi->wl_lock); return 0; @@ -381,11 +461,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi) ubi->fm_anchor = NULL; } - if (ubi->fm_next_anchor) { - return_unused_peb(ubi, ubi->fm_next_anchor); - ubi->fm_next_anchor = NULL; - } - if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) kfree(ubi->fm->e[i]); diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 6b5f1ffd961b..6e95c4b1473e 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c @@ -1230,17 +1230,6 @@ static int ubi_write_fastmap(struct ubi_device *ubi, fm_pos += sizeof(*fec); ubi_assert(fm_pos <= ubi->fm_size); } - if (ubi->fm_next_anchor) { - fec = (struct ubi_fm_ec *)(fm_raw + fm_pos); - - fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum); - set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs); - fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec); - - free_peb_count++; - fm_pos += sizeof(*fec); - ubi_assert(fm_pos <= ubi->fm_size); - } fmh->free_peb_count = cpu_to_be32(free_peb_count); ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 7c083ad58274..078112e23dfd 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h @@ -489,8 +489,7 @@ struct ubi_debug_info { * @fm_work: fastmap work queue * @fm_work_scheduled: non-zero if fastmap work was scheduled * @fast_attach: non-zero if UBI was attached by fastmap - * @fm_anchor: The new anchor PEB used during fastmap update - * @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated + * @fm_anchor: The next anchor PEB to use for fastmap * @fm_do_produce_anchor: If true produce an anchor PEB in wl * * @used: RB-tree of used physical eraseblocks @@ -601,7 +600,6 @@ struct ubi_device { int fm_work_scheduled; int fast_attach; struct ubi_wl_entry *fm_anchor; - struct ubi_wl_entry *fm_next_anchor; int fm_do_produce_anchor; /* Wear-leveling sub-system's stuff */ diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 1bc7b3a05604..6ea95ade4ca6 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -309,7 +309,6 @@ out_mapping: ubi->volumes[vol_id] = NULL; ubi->vol_count -= 1; spin_unlock(&ubi->volumes_lock); - ubi_eba_destroy_table(eba_tbl); out_acc: spin_lock(&ubi->volumes_lock); ubi->rsvd_pebs -= vol->reserved_pebs; diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 8455f1d47f3c..55bae06cf408 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -670,7 +670,11 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ubi_assert(!ubi->move_from && !ubi->move_to); ubi_assert(!ubi->move_to_put); +#ifdef CONFIG_MTD_UBI_FASTMAP + if (!next_peb_for_wl(ubi) || +#else if (!ubi->free.rb_node || +#endif (!ubi->used.rb_node && !ubi->scrub.rb_node)) { /* * No free physical eraseblocks? Well, they must be waiting in @@ -689,16 +693,16 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, #ifdef CONFIG_MTD_UBI_FASTMAP e1 = find_anchor_wl_entry(&ubi->used); - if (e1 && ubi->fm_next_anchor && - (ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) { + if (e1 && ubi->fm_anchor && + (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) { ubi->fm_do_produce_anchor = 1; - /* fm_next_anchor is no longer considered a good anchor - * candidate. + /* + * fm_anchor is no longer considered a good anchor. * NULL assignment also prevents multiple wear level checks * of this PEB. */ - wl_tree_add(ubi->fm_next_anchor, &ubi->free); - ubi->fm_next_anchor = NULL; + wl_tree_add(ubi->fm_anchor, &ubi->free); + ubi->fm_anchor = NULL; ubi->free_count++; } @@ -1003,8 +1007,6 @@ out_cancel: static int ensure_wear_leveling(struct ubi_device *ubi, int nested) { int err = 0; - struct ubi_wl_entry *e1; - struct ubi_wl_entry *e2; struct ubi_work *wrk; spin_lock(&ubi->wl_lock); @@ -1017,6 +1019,13 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) * the WL worker has to be scheduled anyway. */ if (!ubi->scrub.rb_node) { +#ifdef CONFIG_MTD_UBI_FASTMAP + if (!need_wear_leveling(ubi)) + goto out_unlock; +#else + struct ubi_wl_entry *e1; + struct ubi_wl_entry *e2; + if (!ubi->used.rb_node || !ubi->free.rb_node) /* No physical eraseblocks - no deal */ goto out_unlock; @@ -1032,6 +1041,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested) if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) goto out_unlock; +#endif dbg_wl("schedule wear-leveling"); } else dbg_wl("schedule scrubbing"); @@ -1085,12 +1095,13 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) if (!err) { spin_lock(&ubi->wl_lock); - if (!ubi->fm_disabled && !ubi->fm_next_anchor && + if (!ubi->fm_disabled && !ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { - /* Abort anchor production, if needed it will be + /* + * Abort anchor production, if needed it will be * enabled again in the wear leveling started below. */ - ubi->fm_next_anchor = e; + ubi->fm_anchor = e; ubi->fm_do_produce_anchor = 0; } else { wl_tree_add(e, &ubi->free); diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h index c93a53293786..5ebe374a08ae 100644 --- a/drivers/mtd/ubi/wl.h +++ b/drivers/mtd/ubi/wl.h @@ -5,6 +5,8 @@ static void update_fastmap_work_fn(struct work_struct *wrk); static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root); static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi); +static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi); +static bool need_wear_leveling(struct ubi_device *ubi); static void ubi_fastmap_close(struct ubi_device *ubi); static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count) { |