diff options
author | Chunhai Guo <guochunhai@vivo.com> | 2024-04-02 03:27:57 -0600 |
---|---|---|
committer | Gao Xiang <hsiangkao@linux.alibaba.com> | 2024-05-08 17:12:50 +0800 |
commit | d6db47e571dcaecaeaafa8840d00ae849ae3907b (patch) | |
tree | c45287a1cf5c3e76da537f387b13b8059ebe285e /fs/erofs | |
parent | f36f3010f67611a45d66e773bc91e4c66a9abab5 (diff) |
erofs: do not use pagepool in z_erofs_gbuf_growsize()
Let's use alloc_pages_bulk_array() for simplicity and get rid of
unnecessary pagepool.
Signed-off-by: Chunhai Guo <guochunhai@vivo.com>
Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240402092757.2635257-1-guochunhai@vivo.com
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Diffstat (limited to 'fs/erofs')
-rw-r--r-- | fs/erofs/zutil.c | 67 |
1 files changed, 31 insertions, 36 deletions
diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index 2fa90b10b985..bc88dc4fe8bd 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -60,63 +60,58 @@ void z_erofs_put_gbuf(void *ptr) __releases(gbuf->lock) int z_erofs_gbuf_growsize(unsigned int nrpages) { static DEFINE_MUTEX(gbuf_resize_mutex); - struct page *pagepool = NULL; - int delta, ret, i, j; + struct page **tmp_pages = NULL; + struct z_erofs_gbuf *gbuf; + void *ptr, *old_ptr; + int last, i, j; mutex_lock(&gbuf_resize_mutex); - delta = nrpages - z_erofs_gbuf_nrpages; - ret = 0; /* avoid shrinking gbufs, since no idea how many fses rely on */ - if (delta <= 0) - goto out; + if (nrpages <= z_erofs_gbuf_nrpages) { + mutex_unlock(&gbuf_resize_mutex); + return 0; + } for (i = 0; i < z_erofs_gbuf_count; ++i) { - struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i]; - struct page **pages, **tmp_pages; - void *ptr, *old_ptr = NULL; - - ret = -ENOMEM; + gbuf = &z_erofs_gbufpool[i]; tmp_pages = kcalloc(nrpages, sizeof(*tmp_pages), GFP_KERNEL); if (!tmp_pages) - break; - for (j = 0; j < nrpages; ++j) { - tmp_pages[j] = erofs_allocpage(&pagepool, GFP_KERNEL); - if (!tmp_pages[j]) - goto free_pagearray; - } + goto out; + + for (j = 0; j < gbuf->nrpages; ++j) + tmp_pages[j] = gbuf->pages[j]; + do { + last = j; + j = alloc_pages_bulk_array(GFP_KERNEL, nrpages, + tmp_pages); + if (last == j) + goto out; + } while (j != nrpages); + ptr = vmap(tmp_pages, nrpages, VM_MAP, PAGE_KERNEL); if (!ptr) - goto free_pagearray; + goto out; - pages = tmp_pages; spin_lock(&gbuf->lock); + kfree(gbuf->pages); + gbuf->pages = tmp_pages; old_ptr = gbuf->ptr; gbuf->ptr = ptr; - tmp_pages = gbuf->pages; - gbuf->pages = pages; - j = gbuf->nrpages; gbuf->nrpages = nrpages; spin_unlock(&gbuf->lock); - ret = 0; - if (!tmp_pages) { - DBG_BUGON(old_ptr); - continue; - } - if (old_ptr) vunmap(old_ptr); -free_pagearray: - while (j) - erofs_pagepool_add(&pagepool, tmp_pages[--j]); - kfree(tmp_pages); - if (ret) - break; } z_erofs_gbuf_nrpages = nrpages; - erofs_release_pages(&pagepool); out: + if (i < z_erofs_gbuf_count && tmp_pages) { + for (j = 0; j < nrpages; ++j) + if (tmp_pages[j] && tmp_pages[j] != gbuf->pages[j]) + __free_page(tmp_pages[j]); + kfree(tmp_pages); + } mutex_unlock(&gbuf_resize_mutex); - return ret; + return i < z_erofs_gbuf_count ? -ENOMEM : 0; } int __init z_erofs_gbuf_init(void) |