summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/f2fs/checkpoint.c2
-rw-r--r--fs/f2fs/compress.c14
-rw-r--r--fs/f2fs/data.c14
-rw-r--r--fs/f2fs/debug.c33
-rw-r--r--fs/f2fs/f2fs.h96
-rw-r--r--fs/f2fs/file.c60
-rw-r--r--fs/f2fs/gc.c18
-rw-r--r--fs/f2fs/inline.c3
-rw-r--r--fs/f2fs/inode.c35
-rw-r--r--fs/f2fs/recovery.c1
-rw-r--r--fs/f2fs/segment.c89
-rw-r--r--fs/f2fs/super.c40
-rw-r--r--fs/f2fs/sysfs.c18
-rw-r--r--fs/f2fs/xattr.c6
14 files changed, 261 insertions, 168 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 8fd3b7f9fb88..b0597a539fc5 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -1701,9 +1701,9 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
}
f2fs_restore_inmem_curseg(sbi);
+ stat_inc_cp_count(sbi);
stop:
unblock_operations(sbi);
- stat_inc_cp_count(sbi->stat_info);
if (cpc->reason & CP_RECOVERY)
f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 0f7df9c11af3..d820801f473e 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -649,13 +649,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
goto destroy_compress_ctx;
}
- for (i = 0; i < cc->nr_cpages; i++) {
+ for (i = 0; i < cc->nr_cpages; i++)
cc->cpages[i] = f2fs_compress_alloc_page();
- if (!cc->cpages[i]) {
- ret = -ENOMEM;
- goto out_free_cpages;
- }
- }
cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
if (!cc->rbuf) {
@@ -1574,8 +1569,6 @@ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
}
dic->tpages[i] = f2fs_compress_alloc_page();
- if (!dic->tpages[i])
- return -ENOMEM;
}
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
@@ -1656,11 +1649,6 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
struct page *page;
page = f2fs_compress_alloc_page();
- if (!page) {
- ret = -ENOMEM;
- goto out_free;
- }
-
f2fs_set_compressed_page(page, cc->inode,
start_idx + i + 1, dic);
dic->cpages[i] = page;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 5882afe71d82..916e317ac925 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1167,6 +1167,9 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
f2fs_wait_on_block_writeback(inode, blkaddr);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ iostat_update_and_unbind_ctx(bio);
+ if (bio->bi_private)
+ mempool_free(bio->bi_private, bio_post_read_ctx_pool);
bio_put(bio);
return -EFAULT;
}
@@ -1389,18 +1392,14 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
-repeat:
+
page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
if (IS_ERR(page))
return page;
/* wait for read completion */
lock_page(page);
- if (unlikely(page->mapping != mapping)) {
- f2fs_put_page(page, 1);
- goto repeat;
- }
- if (unlikely(!PageUptodate(page))) {
+ if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
f2fs_put_page(page, 1);
return ERR_PTR(-EIO);
}
@@ -3236,8 +3235,7 @@ result:
}
goto next;
}
- done_index = folio->index +
- folio_nr_pages(folio);
+ done_index = folio_next_index(folio);
done = 1;
break;
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 61c35b59126e..fdbf994f1271 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -215,6 +215,9 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->valid_blks[type] += blks;
}
+ for (i = 0; i < MAX_CALL_TYPE; i++)
+ si->cp_call_count[i] = atomic_read(&sbi->cp_call_count[i]);
+
for (i = 0; i < 2; i++) {
si->segment_count[i] = sbi->segment_count[i];
si->block_count[i] = sbi->block_count[i];
@@ -497,7 +500,9 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Prefree: %d\n - Free: %d (%d)\n\n",
si->prefree_count, si->free_segs, si->free_secs);
seq_printf(s, "CP calls: %d (BG: %d)\n",
- si->cp_count, si->bg_cp_count);
+ si->cp_call_count[TOTAL_CALL],
+ si->cp_call_count[BACKGROUND]);
+ seq_printf(s, "CP count: %d\n", si->cp_count);
seq_printf(s, " - cp blocks : %u\n", si->meta_count[META_CP]);
seq_printf(s, " - sit blocks : %u\n",
si->meta_count[META_SIT]);
@@ -511,12 +516,24 @@ static int stat_show(struct seq_file *s, void *v)
seq_printf(s, " - Total : %4d\n", si->nr_total_ckpt);
seq_printf(s, " - Cur time : %4d(ms)\n", si->cur_ckpt_time);
seq_printf(s, " - Peak time : %4d(ms)\n", si->peak_ckpt_time);
- seq_printf(s, "GC calls: %d (BG: %d)\n",
- si->call_count, si->bg_gc);
- seq_printf(s, " - data segments : %d (%d)\n",
- si->data_segs, si->bg_data_segs);
- seq_printf(s, " - node segments : %d (%d)\n",
- si->node_segs, si->bg_node_segs);
+ seq_printf(s, "GC calls: %d (gc_thread: %d)\n",
+ si->gc_call_count[BACKGROUND] +
+ si->gc_call_count[FOREGROUND],
+ si->gc_call_count[BACKGROUND]);
+ if (__is_large_section(sbi)) {
+ seq_printf(s, " - data sections : %d (BG: %d)\n",
+ si->gc_secs[DATA][BG_GC] + si->gc_secs[DATA][FG_GC],
+ si->gc_secs[DATA][BG_GC]);
+ seq_printf(s, " - node sections : %d (BG: %d)\n",
+ si->gc_secs[NODE][BG_GC] + si->gc_secs[NODE][FG_GC],
+ si->gc_secs[NODE][BG_GC]);
+ }
+ seq_printf(s, " - data segments : %d (BG: %d)\n",
+ si->gc_segs[DATA][BG_GC] + si->gc_segs[DATA][FG_GC],
+ si->gc_segs[DATA][BG_GC]);
+ seq_printf(s, " - node segments : %d (BG: %d)\n",
+ si->gc_segs[NODE][BG_GC] + si->gc_segs[NODE][FG_GC],
+ si->gc_segs[NODE][BG_GC]);
seq_puts(s, " - Reclaimed segs :\n");
seq_printf(s, " - Normal : %d\n", sbi->gc_reclaimed_segs[GC_NORMAL]);
seq_printf(s, " - Idle CB : %d\n", sbi->gc_reclaimed_segs[GC_IDLE_CB]);
@@ -687,6 +704,8 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
atomic_set(&sbi->inplace_count, 0);
for (i = META_CP; i < META_MAX; i++)
atomic_set(&sbi->meta_count[i], 0);
+ for (i = 0; i < MAX_CALL_TYPE; i++)
+ atomic_set(&sbi->cp_call_count[i], 0);
atomic_set(&sbi->max_aw_cnt, 0);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 613132339d72..6d688e42d89c 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1383,6 +1383,13 @@ enum errors_option {
MOUNT_ERRORS_PANIC, /* panic on errors */
};
+enum {
+ BACKGROUND,
+ FOREGROUND,
+ MAX_CALL_TYPE,
+ TOTAL_CALL = FOREGROUND,
+};
+
static inline int f2fs_test_bit(unsigned int nr, char *addr);
static inline void f2fs_set_bit(unsigned int nr, char *addr);
static inline void f2fs_clear_bit(unsigned int nr, char *addr);
@@ -1695,6 +1702,7 @@ struct f2fs_sb_info {
unsigned int io_skip_bggc; /* skip background gc for in-flight IO */
unsigned int other_skip_bggc; /* skip background gc for other reasons */
unsigned int ndirty_inode[NR_INODE_TYPE]; /* # of dirty inodes */
+ atomic_t cp_call_count[MAX_CALL_TYPE]; /* # of cp call */
#endif
spinlock_t stat_lock; /* lock for stat operations */
@@ -2114,15 +2122,6 @@ static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem)
return down_read_trylock(&sem->internal_rwsem);
}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
-{
- down_read_nested(&sem->internal_rwsem, subclass);
-}
-#else
-#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
-#endif
-
static inline void f2fs_up_read(struct f2fs_rwsem *sem)
{
up_read(&sem->internal_rwsem);
@@ -2133,6 +2132,21 @@ static inline void f2fs_down_write(struct f2fs_rwsem *sem)
down_write(&sem->internal_rwsem);
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass)
+{
+ down_read_nested(&sem->internal_rwsem, subclass);
+}
+
+static inline void f2fs_down_write_nested(struct f2fs_rwsem *sem, int subclass)
+{
+ down_write_nested(&sem->internal_rwsem, subclass);
+}
+#else
+#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem)
+#define f2fs_down_write_nested(sem, subclass) f2fs_down_write(sem)
+#endif
+
static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem)
{
return down_write_trylock(&sem->internal_rwsem);
@@ -3887,7 +3901,7 @@ struct f2fs_stat_info {
int nats, dirty_nats, sits, dirty_sits;
int free_nids, avail_nids, alloc_nids;
int total_count, utilization;
- int bg_gc, nr_wb_cp_data, nr_wb_data;
+ int nr_wb_cp_data, nr_wb_data;
int nr_rd_data, nr_rd_node, nr_rd_meta;
int nr_dio_read, nr_dio_write;
unsigned int io_skip_bggc, other_skip_bggc;
@@ -3907,9 +3921,11 @@ struct f2fs_stat_info {
int rsvd_segs, overp_segs;
int dirty_count, node_pages, meta_pages, compress_pages;
int compress_page_hit;
- int prefree_count, call_count, cp_count, bg_cp_count;
- int tot_segs, node_segs, data_segs, free_segs, free_secs;
- int bg_node_segs, bg_data_segs;
+ int prefree_count, free_segs, free_secs;
+ int cp_call_count[MAX_CALL_TYPE], cp_count;
+ int gc_call_count[MAX_CALL_TYPE];
+ int gc_segs[2][2];
+ int gc_secs[2][2];
int tot_blks, data_blks, node_blks;
int bg_data_blks, bg_node_blks;
int curseg[NR_CURSEG_TYPE];
@@ -3931,10 +3947,9 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
return (struct f2fs_stat_info *)sbi->stat_info;
}
-#define stat_inc_cp_count(si) ((si)->cp_count++)
-#define stat_inc_bg_cp_count(si) ((si)->bg_cp_count++)
-#define stat_inc_call_count(si) ((si)->call_count++)
-#define stat_inc_bggc_count(si) ((si)->bg_gc++)
+#define stat_inc_cp_call_count(sbi, foreground) \
+ atomic_inc(&sbi->cp_call_count[(foreground)])
+#define stat_inc_cp_count(si) (F2FS_STAT(sbi)->cp_count++)
#define stat_io_skip_bggc_count(sbi) ((sbi)->io_skip_bggc++)
#define stat_other_skip_bggc_count(sbi) ((sbi)->other_skip_bggc++)
#define stat_inc_dirty_inode(sbi, type) ((sbi)->ndirty_inode[type]++)
@@ -4019,18 +4034,12 @@ static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
if (cur > max) \
atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur); \
} while (0)
-#define stat_inc_seg_count(sbi, type, gc_type) \
- do { \
- struct f2fs_stat_info *si = F2FS_STAT(sbi); \
- si->tot_segs++; \
- if ((type) == SUM_TYPE_DATA) { \
- si->data_segs++; \
- si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0; \
- } else { \
- si->node_segs++; \
- si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0; \
- } \
- } while (0)
+#define stat_inc_gc_call_count(sbi, foreground) \
+ (F2FS_STAT(sbi)->gc_call_count[(foreground)]++)
+#define stat_inc_gc_sec_count(sbi, type, gc_type) \
+ (F2FS_STAT(sbi)->gc_secs[(type)][(gc_type)]++)
+#define stat_inc_gc_seg_count(sbi, type, gc_type) \
+ (F2FS_STAT(sbi)->gc_segs[(type)][(gc_type)]++)
#define stat_inc_tot_blk_count(si, blks) \
((si)->tot_blks += (blks))
@@ -4057,10 +4066,8 @@ void __init f2fs_create_root_stats(void);
void f2fs_destroy_root_stats(void);
void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
#else
-#define stat_inc_cp_count(si) do { } while (0)
-#define stat_inc_bg_cp_count(si) do { } while (0)
-#define stat_inc_call_count(si) do { } while (0)
-#define stat_inc_bggc_count(si) do { } while (0)
+#define stat_inc_cp_call_count(sbi, foreground) do { } while (0)
+#define stat_inc_cp_count(sbi) do { } while (0)
#define stat_io_skip_bggc_count(sbi) do { } while (0)
#define stat_other_skip_bggc_count(sbi) do { } while (0)
#define stat_inc_dirty_inode(sbi, type) do { } while (0)
@@ -4088,7 +4095,9 @@ void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
#define stat_inc_seg_type(sbi, curseg) do { } while (0)
#define stat_inc_block_count(sbi, curseg) do { } while (0)
#define stat_inc_inplace_blocks(sbi) do { } while (0)
-#define stat_inc_seg_count(sbi, type, gc_type) do { } while (0)
+#define stat_inc_gc_call_count(sbi, foreground) do { } while (0)
+#define stat_inc_gc_sec_count(sbi, type, gc_type) do { } while (0)
+#define stat_inc_gc_seg_count(sbi, type, gc_type) do { } while (0)
#define stat_inc_tot_blk_count(si, blks) do { } while (0)
#define stat_inc_data_blk_count(sbi, blks, gc_type) do { } while (0)
#define stat_inc_node_blk_count(sbi, blks, gc_type) do { } while (0)
@@ -4425,6 +4434,22 @@ static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
}
#endif
+static inline int f2fs_bdev_index(struct f2fs_sb_info *sbi,
+ struct block_device *bdev)
+{
+ int i;
+
+ if (!f2fs_is_multi_device(sbi))
+ return 0;
+
+ for (i = 0; i < sbi->s_ndevs; i++)
+ if (FDEV(i).bdev == bdev)
+ return i;
+
+ WARN_ON(1);
+ return -1;
+}
+
static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
{
return f2fs_sb_has_blkzoned(sbi);
@@ -4485,7 +4510,8 @@ static inline bool f2fs_low_mem_mode(struct f2fs_sb_info *sbi)
static inline bool f2fs_may_compress(struct inode *inode)
{
if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
- f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode))
+ f2fs_is_atomic_file(inode) || f2fs_has_inline_data(inode) ||
+ f2fs_is_mmap_file(inode))
return false;
return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
}
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ce9d567cd5fe..ca5904129b16 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -526,7 +526,11 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
file_accessed(file);
vma->vm_ops = &f2fs_file_vm_ops;
+
+ f2fs_down_read(&F2FS_I(inode)->i_sem);
set_inode_flag(inode, FI_MMAP_FILE);
+ f2fs_up_read(&F2FS_I(inode)->i_sem);
+
return 0;
}
@@ -1724,6 +1728,7 @@ next_alloc:
if (has_not_enough_free_secs(sbi, 0,
GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
f2fs_down_write(&sbi->gc_lock);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
err = f2fs_gc(sbi, &gc_control);
if (err && err != -ENODATA)
goto out_err;
@@ -1919,12 +1924,19 @@ static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
int err = f2fs_convert_inline_inode(inode);
if (err)
return err;
- if (!f2fs_may_compress(inode))
- return -EINVAL;
- if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
+
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
+ if (!f2fs_may_compress(inode) ||
+ (S_ISREG(inode->i_mode) &&
+ F2FS_HAS_BLOCKS(inode))) {
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
return -EINVAL;
- if (set_compress_context(inode))
- return -EOPNOTSUPP;
+ }
+ err = set_compress_context(inode);
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
+
+ if (err)
+ return err;
}
}
@@ -2465,6 +2477,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
gc_control.init_gc_type = sync ? FG_GC : BG_GC;
gc_control.err_gc_skipped = sync;
+ stat_inc_gc_call_count(sbi, FOREGROUND);
ret = f2fs_gc(sbi, &gc_control);
out:
mnt_drop_write_file(filp);
@@ -2508,6 +2521,7 @@ do_more:
}
gc_control.victim_segno = GET_SEGNO(sbi, range->start);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
ret = f2fs_gc(sbi, &gc_control);
if (ret) {
if (ret == -EBUSY)
@@ -2990,6 +3004,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
sm->last_victim[ALLOC_NEXT] = end_segno + 1;
gc_control.victim_segno = start_segno;
+ stat_inc_gc_call_count(sbi, FOREGROUND);
ret = f2fs_gc(sbi, &gc_control);
if (ret == -EAGAIN)
ret = 0;
@@ -3976,6 +3991,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
file_start_write(filp);
inode_lock(inode);
+ f2fs_down_write(&F2FS_I(inode)->i_sem);
if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
ret = -EBUSY;
goto out;
@@ -3995,6 +4011,7 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
f2fs_warn(sbi, "compression algorithm is successfully set, "
"but current kernel doesn't support this algorithm.");
out:
+ f2fs_up_write(&F2FS_I(inode)->i_sem);
inode_unlock(inode);
file_end_write(filp);
@@ -4079,10 +4096,8 @@ static int f2fs_ioc_decompress_file(struct file *filp)
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
count = last_idx - page_idx;
- while (count) {
- int len = min(cluster_size, count);
-
- ret = redirty_blocks(inode, page_idx, len);
+ while (count && count >= cluster_size) {
+ ret = redirty_blocks(inode, page_idx, cluster_size);
if (ret < 0)
break;
@@ -4092,8 +4107,14 @@ static int f2fs_ioc_decompress_file(struct file *filp)
break;
}
- count -= len;
- page_idx += len;
+ count -= cluster_size;
+ page_idx += cluster_size;
+
+ cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
}
if (!ret)
@@ -4153,10 +4174,8 @@ static int f2fs_ioc_compress_file(struct file *filp)
last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
count = last_idx - page_idx;
- while (count) {
- int len = min(cluster_size, count);
-
- ret = redirty_blocks(inode, page_idx, len);
+ while (count && count >= cluster_size) {
+ ret = redirty_blocks(inode, page_idx, cluster_size);
if (ret < 0)
break;
@@ -4166,8 +4185,14 @@ static int f2fs_ioc_compress_file(struct file *filp)
break;
}
- count -= len;
- page_idx += len;
+ count -= cluster_size;
+ page_idx += cluster_size;
+
+ cond_resched();
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
}
if (!ret)
@@ -4579,6 +4604,7 @@ static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
dec_page_count(sbi, F2FS_DIO_WRITE);
if (error)
return error;
+ f2fs_update_time(sbi, REQ_TIME);
f2fs_update_iostat(sbi, NULL, APP_DIRECT_IO, size);
return 0;
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index a1ca394bc327..f550cdeaa663 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -121,8 +121,8 @@ static int gc_thread_func(void *data)
else
increase_sleep_time(gc_th, &wait_ms);
do_gc:
- if (!foreground)
- stat_inc_bggc_count(sbi->stat_info);
+ stat_inc_gc_call_count(sbi, foreground ?
+ FOREGROUND : BACKGROUND);
sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
@@ -1685,6 +1685,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
int seg_freed = 0, migrated = 0;
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
+ unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
int submitted = 0;
if (__is_large_section(sbi))
@@ -1766,7 +1767,7 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
segno, gc_type,
force_migrate);
- stat_inc_seg_count(sbi, type, gc_type);
+ stat_inc_gc_seg_count(sbi, data_type, gc_type);
sbi->gc_reclaimed_segs[sbi->gc_mode]++;
migrated++;
@@ -1783,12 +1784,12 @@ skip:
}
if (submitted)
- f2fs_submit_merged_write(sbi,
- (type == SUM_TYPE_NODE) ? NODE : DATA);
+ f2fs_submit_merged_write(sbi, data_type);
blk_finish_plug(&plug);
- stat_inc_call_count(sbi->stat_info);
+ if (migrated)
+ stat_inc_gc_sec_count(sbi, data_type, gc_type);
return seg_freed;
}
@@ -1839,6 +1840,7 @@ gc_more:
* secure free segments which doesn't need fggc any more.
*/
if (prefree_segments(sbi)) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
ret = f2fs_write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
@@ -1887,6 +1889,7 @@ retry:
round++;
if (skipped_round > MAX_SKIP_GC_COUNT &&
skipped_round * 2 >= round) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
ret = f2fs_write_checkpoint(sbi, &cpc);
goto stop;
}
@@ -1902,6 +1905,7 @@ retry:
*/
if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
prefree_segments(sbi)) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
ret = f2fs_write_checkpoint(sbi, &cpc);
if (ret)
goto stop;
@@ -2029,6 +2033,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi,
if (gc_only)
goto out;
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
if (err)
goto out;
@@ -2223,6 +2228,7 @@ out_drop_write:
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
set_sbi_flag(sbi, SBI_IS_DIRTY);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
if (err) {
update_fs_metadata(sbi, secs);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 88fc9208ffa7..2fe25619ccb5 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -641,7 +641,8 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
}
if (inode) {
- f2fs_down_write(&F2FS_I(inode)->i_sem);
+ f2fs_down_write_nested(&F2FS_I(inode)->i_sem,
+ SINGLE_DEPTH_NESTING);
page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
if (IS_ERR(page)) {
err = PTR_ERR(page);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index c1c2ba9f28e5..cde243840abd 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -214,7 +214,7 @@ static bool sanity_check_compress_inode(struct inode *inode,
f2fs_warn(sbi,
"%s: inode (ino=%lx) has unsupported compress algorithm: %u, run fsck to fix",
__func__, inode->i_ino, ri->i_compress_algorithm);
- goto err;
+ return false;
}
if (le64_to_cpu(ri->i_compr_blocks) >
SECTOR_TO_BLOCK(inode->i_blocks)) {
@@ -222,14 +222,14 @@ static bool sanity_check_compress_inode(struct inode *inode,
"%s: inode (ino=%lx) has inconsistent i_compr_blocks:%llu, i_blocks:%llu, run fsck to fix",
__func__, inode->i_ino, le64_to_cpu(ri->i_compr_blocks),
SECTOR_TO_BLOCK(inode->i_blocks));
- goto err;
+ return false;
}
if (ri->i_log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
ri->i_log_cluster_size > MAX_COMPRESS_LOG_SIZE) {
f2fs_warn(sbi,
"%s: inode (ino=%lx) has unsupported log cluster size: %u, run fsck to fix",
__func__, inode->i_ino, ri->i_log_cluster_size);
- goto err;
+ return false;
}
clevel = le16_to_cpu(ri->i_compress_flag) >>
@@ -273,8 +273,6 @@ static bool sanity_check_compress_inode(struct inode *inode,
err_level:
f2fs_warn(sbi, "%s: inode (ino=%lx) has unsupported compress level: %u, run fsck to fix",
__func__, inode->i_ino, clevel);
-err:
- set_sbi_flag(sbi, SBI_NEED_FSCK);
return false;
}
@@ -287,14 +285,12 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
if (!iblocks) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
__func__, inode->i_ino, iblocks);
return false;
}
if (ino_of_node(node_page) != nid_of_node(node_page)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
__func__, inode->i_ino,
ino_of_node(node_page), nid_of_node(node_page));
@@ -303,7 +299,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
if (f2fs_has_extra_attr(inode)) {
if (!f2fs_sb_has_extra_attr(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
__func__, inode->i_ino);
return false;
@@ -311,7 +306,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
fi->i_extra_isize < F2FS_MIN_EXTRA_ATTR_SIZE ||
fi->i_extra_isize % sizeof(__le32)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
__func__, inode->i_ino, fi->i_extra_isize,
F2FS_TOTAL_EXTRA_ATTR_SIZE);
@@ -321,7 +315,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
f2fs_has_inline_xattr(inode) &&
(!fi->i_inline_xattr_size ||
fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
__func__, inode->i_ino, fi->i_inline_xattr_size,
MAX_INLINE_XATTR_SIZE);
@@ -335,7 +328,6 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
return false;
}
} else if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
__func__, inode->i_ino);
return false;
@@ -343,31 +335,26 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
if (!f2fs_sb_has_extra_attr(sbi)) {
if (f2fs_sb_has_project_quota(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
__func__, inode->i_ino, F2FS_FEATURE_PRJQUOTA);
return false;
}
if (f2fs_sb_has_inode_chksum(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
__func__, inode->i_ino, F2FS_FEATURE_INODE_CHKSUM);
return false;
}
if (f2fs_sb_has_flexible_inline_xattr(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
__func__, inode->i_ino, F2FS_FEATURE_FLEXIBLE_INLINE_XATTR);
return false;
}
if (f2fs_sb_has_inode_crtime(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
__func__, inode->i_ino, F2FS_FEATURE_INODE_CRTIME);
return false;
}
if (f2fs_sb_has_compression(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: corrupted inode ino=%lx, wrong feature flag: %u, run fsck to fix.",
__func__, inode->i_ino, F2FS_FEATURE_COMPRESSION);
return false;
@@ -375,21 +362,18 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
}
if (f2fs_sanity_check_inline_data(inode)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
__func__, inode->i_ino, inode->i_mode);
return false;
}
if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
__func__, inode->i_ino, inode->i_mode);
return false;
}
if ((fi->i_flags & F2FS_CASEFOLD_FL) && !f2fs_sb_has_casefold(sbi)) {
- set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "%s: inode (ino=%lx) has casefold flag, but casefold feature is off",
__func__, inode->i_ino);
return false;
@@ -475,6 +459,13 @@ static int do_read_inode(struct inode *inode)
fi->i_inline_xattr_size = 0;
}
+ if (!sanity_check_inode(inode, node_page)) {
+ f2fs_put_page(node_page, 1);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
+ return -EFSCORRUPTED;
+ }
+
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
__recover_inline_status(inode, node_page);
@@ -544,12 +535,6 @@ static int do_read_inode(struct inode *inode)
f2fs_init_read_extent_tree(inode, node_page);
f2fs_init_age_extent_tree(inode);
- if (!sanity_check_inode(inode, node_page)) {
- f2fs_put_page(node_page, 1);
- f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
- return -EFSCORRUPTED;
- }
-
if (!sanity_check_extent_cache(inode)) {
f2fs_put_page(node_page, 1);
f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index b8637e88d94f..7be60df277a5 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -924,6 +924,7 @@ skip:
struct cp_control cpc = {
.reason = CP_RECOVERY,
};
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
}
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 0457d620011f..d05b41608fc0 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -205,6 +205,8 @@ void f2fs_abort_atomic_write(struct inode *inode, bool clean)
f2fs_i_size_write(inode, fi->original_i_size);
fi->original_i_size = 0;
}
+ /* avoid stale dirty inode during eviction */
+ sync_inode_metadata(inode, 0);
}
static int __replace_atomic_write_block(struct inode *inode, pgoff_t index,
@@ -433,6 +435,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
.err_gc_skipped = false,
.nr_free_secs = 1 };
f2fs_down_write(&sbi->gc_lock);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
f2fs_gc(sbi, &gc_control);
}
}
@@ -510,8 +513,8 @@ do_sync:
mutex_unlock(&sbi->flush_lock);
}
+ stat_inc_cp_call_count(sbi, BACKGROUND);
f2fs_sync_fs(sbi->sb, 1);
- stat_inc_bg_cp_count(sbi->stat_info);
}
static int __submit_flush_wait(struct f2fs_sb_info *sbi,
@@ -1258,8 +1261,16 @@ static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev)) {
- __submit_zone_reset_cmd(sbi, dc, flag, wait_list, issued);
- return 0;
+ int devi = f2fs_bdev_index(sbi, bdev);
+
+ if (devi < 0)
+ return -EINVAL;
+
+ if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
+ __submit_zone_reset_cmd(sbi, dc, flag,
+ wait_list, issued);
+ return 0;
+ }
}
#endif
@@ -1785,15 +1796,24 @@ static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
dc = __lookup_discard_cmd(sbi, blkaddr);
#ifdef CONFIG_BLK_DEV_ZONED
if (dc && f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(dc->bdev)) {
- /* force submit zone reset */
- if (dc->state == D_PREP)
- __submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
- &dcc->wait_list, NULL);
- dc->ref++;
- mutex_unlock(&dcc->cmd_lock);
- /* wait zone reset */
- __wait_one_discard_bio(sbi, dc);
- return;
+ int devi = f2fs_bdev_index(sbi, dc->bdev);
+
+ if (devi < 0) {
+ mutex_unlock(&dcc->cmd_lock);
+ return;
+ }
+
+ if (f2fs_blkz_is_seq(sbi, devi, dc->di.start)) {
+ /* force submit zone reset */
+ if (dc->state == D_PREP)
+ __submit_zone_reset_cmd(sbi, dc, REQ_SYNC,
+ &dcc->wait_list, NULL);
+ dc->ref++;
+ mutex_unlock(&dcc->cmd_lock);
+ /* wait zone reset */
+ __wait_one_discard_bio(sbi, dc);
+ return;
+ }
}
#endif
if (dc) {
@@ -2193,7 +2213,7 @@ find_next:
len = next_pos - cur_pos;
if (f2fs_sb_has_blkzoned(sbi) ||
- !force || len < cpc->trim_minlen)
+ (force && len < cpc->trim_minlen))
goto skip;
f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
@@ -3228,6 +3248,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
goto out;
f2fs_down_write(&sbi->gc_lock);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
f2fs_up_write(&sbi->gc_lock);
if (err)
@@ -4846,17 +4867,17 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
{
unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
block_t zone_block, wp_block, last_valid_block;
+ unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
int i, s, b, ret;
struct seg_entry *se;
if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return 0;
- wp_block = fdev->start_blk + (zone->wp >> sbi->log_sectors_per_block);
+ wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
wp_segno = GET_SEGNO(sbi, wp_block);
wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
- zone_block = fdev->start_blk + (zone->start >>
- sbi->log_sectors_per_block);
+ zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
zone_segno = GET_SEGNO(sbi, zone_block);
zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
@@ -4906,7 +4927,7 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
"pointer. Reset the write pointer: wp[0x%x,0x%x]",
wp_segno, wp_blkoff);
ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
- zone->len >> sbi->log_sectors_per_block);
+ zone->len >> log_sectors_per_block);
if (ret)
f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
fdev->path, ret);
@@ -4927,12 +4948,19 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
wp_segno, wp_blkoff);
- ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
- zone->len - (zone->wp - zone->start),
- GFP_NOFS, 0);
- if (ret)
- f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
- fdev->path, ret);
+ ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
+ zone->start, zone->len, GFP_NOFS);
+ if (ret == -EOPNOTSUPP) {
+ ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
+ zone->len - (zone->wp - zone->start),
+ GFP_NOFS, 0);
+ if (ret)
+ f2fs_err(sbi, "Fill up zone failed: %s (errno=%d)",
+ fdev->path, ret);
+ } else if (ret) {
+ f2fs_err(sbi, "Finishing zone failed: %s (errno=%d)",
+ fdev->path, ret);
+ }
return ret;
}
@@ -4967,6 +4995,7 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
struct blk_zone zone;
unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
block_t cs_zone_block, wp_block;
+ unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
sector_t zone_sector;
int err;
@@ -4978,8 +5007,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
return 0;
/* report zone for the sector the curseg points to */
- zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) <<
- sbi->log_sectors_per_block;
+ zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
+ << log_sectors_per_block;
err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
report_one_zone_cb, &zone);
if (err != 1) {
@@ -4991,10 +5020,10 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
return 0;
- wp_block = zbd->start_blk + (zone.wp >> sbi->log_sectors_per_block);
+ wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
wp_segno = GET_SEGNO(sbi, wp_block);
wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
- wp_sector_off = zone.wp & GENMASK(sbi->log_sectors_per_block - 1, 0);
+ wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
wp_sector_off == 0)
@@ -5021,8 +5050,8 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
if (!zbd)
return 0;
- zone_sector = (sector_t)(cs_zone_block - zbd->start_blk) <<
- sbi->log_sectors_per_block;
+ zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
+ << log_sectors_per_block;
err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
report_one_zone_cb, &zone);
if (err != 1) {
@@ -5040,7 +5069,7 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
"Reset the zone: curseg[0x%x,0x%x]",
type, cs->segno, cs->next_blkoff);
err = __f2fs_issue_discard_zone(sbi, zbd->bdev, cs_zone_block,
- zone.len >> sbi->log_sectors_per_block);
+ zone.len >> log_sectors_per_block);
if (err) {
f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
zbd->path, err);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index aa1f9a3a8037..a8c8232852bb 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -591,7 +591,7 @@ static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
unsigned int level;
if (strlen(str) == 3) {
- F2FS_OPTION(sbi).compress_level = LZ4HC_DEFAULT_CLEVEL;
+ F2FS_OPTION(sbi).compress_level = 0;
return 0;
}
@@ -862,11 +862,6 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!name)
return -ENOMEM;
if (!strcmp(name, "adaptive")) {
- if (f2fs_sb_has_blkzoned(sbi)) {
- f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
- kfree(name);
- return -EINVAL;
- }
F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
} else if (!strcmp(name, "lfs")) {
F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
@@ -1331,6 +1326,11 @@ default_check:
F2FS_OPTION(sbi).discard_unit =
DISCARD_UNIT_SECTION;
}
+
+ if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) {
+ f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature");
+ return -EINVAL;
+ }
#else
f2fs_err(sbi, "Zoned block device support is not enabled");
return -EINVAL;
@@ -1561,7 +1561,8 @@ static void destroy_device_list(struct f2fs_sb_info *sbi)
int i;
for (i = 0; i < sbi->s_ndevs; i++) {
- blkdev_put(FDEV(i).bdev, sbi->sb);
+ if (i > 0)
+ blkdev_put(FDEV(i).bdev, sbi->sb);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
#endif
@@ -1600,6 +1601,7 @@ static void f2fs_put_super(struct super_block *sb)
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
}
@@ -1609,6 +1611,7 @@ static void f2fs_put_super(struct super_block *sb)
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
};
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
}
@@ -1705,8 +1708,10 @@ int f2fs_sync_fs(struct super_block *sb, int sync)
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return -EAGAIN;
- if (sync)
+ if (sync) {
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_issue_checkpoint(sbi);
+ }
return err;
}
@@ -2205,6 +2210,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
.nr_free_secs = 1 };
f2fs_down_write(&sbi->gc_lock);
+ stat_inc_gc_call_count(sbi, FOREGROUND);
err = f2fs_gc(sbi, &gc_control);
if (err == -ENODATA) {
err = 0;
@@ -2230,6 +2236,7 @@ skip_gc:
f2fs_down_write(&sbi->gc_lock);
cpc.reason = CP_PAUSE;
set_sbi_flag(sbi, SBI_CP_DISABLED);
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
if (err)
goto out_unlock;
@@ -4190,16 +4197,12 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
sbi->aligned_blksize = true;
for (i = 0; i < max_devices; i++) {
-
- if (i > 0 && !RDEV(i).path[0])
+ if (i == 0)
+ FDEV(0).bdev = sbi->sb->s_bdev;
+ else if (!RDEV(i).path[0])
break;
- if (max_devices == 1) {
- /* Single zoned block device mount */
- FDEV(0).bdev =
- blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev, mode,
- sbi->sb, NULL);
- } else {
+ if (max_devices > 1) {
/* Multi-device mount */
memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
FDEV(i).total_segments =
@@ -4215,9 +4218,9 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
FDEV(i).end_blk = FDEV(i).start_blk +
(FDEV(i).total_segments <<
sbi->log_blocks_per_seg) - 1;
+ FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
+ mode, sbi->sb, NULL);
}
- FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path, mode,
- sbi->sb, NULL);
}
if (IS_ERR(FDEV(i).bdev))
return PTR_ERR(FDEV(i).bdev);
@@ -4870,6 +4873,7 @@ static void kill_f2fs_super(struct super_block *sb)
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
+ stat_inc_cp_call_count(sbi, TOTAL_CALL);
f2fs_write_checkpoint(sbi, &cpc);
}
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 48b7e0073884..417fae96890f 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -356,6 +356,16 @@ static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
if (!strcmp(a->attr.name, "revoked_atomic_block"))
return sysfs_emit(buf, "%llu\n", sbi->revoked_atomic_block);
+#ifdef CONFIG_F2FS_STAT_FS
+ if (!strcmp(a->attr.name, "cp_foreground_calls"))
+ return sysfs_emit(buf, "%d\n",
+ atomic_read(&sbi->cp_call_count[TOTAL_CALL]) -
+ atomic_read(&sbi->cp_call_count[BACKGROUND]));
+ if (!strcmp(a->attr.name, "cp_background_calls"))
+ return sysfs_emit(buf, "%d\n",
+ atomic_read(&sbi->cp_call_count[BACKGROUND]));
+#endif
+
ui = (unsigned int *)(ptr + a->offset);
return sysfs_emit(buf, "%u\n", *ui);
@@ -972,10 +982,10 @@ F2FS_SBI_GENERAL_RO_ATTR(unusable_blocks_per_sec);
/* STAT_INFO ATTR */
#ifdef CONFIG_F2FS_STAT_FS
-STAT_INFO_RO_ATTR(cp_foreground_calls, cp_count);
-STAT_INFO_RO_ATTR(cp_background_calls, bg_cp_count);
-STAT_INFO_RO_ATTR(gc_foreground_calls, call_count);
-STAT_INFO_RO_ATTR(gc_background_calls, bg_gc);
+STAT_INFO_RO_ATTR(cp_foreground_calls, cp_call_count[FOREGROUND]);
+STAT_INFO_RO_ATTR(cp_background_calls, cp_call_count[BACKGROUND]);
+STAT_INFO_RO_ATTR(gc_foreground_calls, gc_call_count[FOREGROUND]);
+STAT_INFO_RO_ATTR(gc_background_calls, gc_call_count[BACKGROUND]);
#endif
/* FAULT_INFO ATTR */
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 4ae93e1df421..a657284faee3 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -757,17 +757,17 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
- f2fs_mark_inode_dirty_sync(inode, true);
- if (!error && S_ISDIR(inode->i_mode))
+ if (S_ISDIR(inode->i_mode))
set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_CP);
same:
if (is_inode_flag_set(inode, FI_ACL_MODE)) {
inode->i_mode = F2FS_I(inode)->i_acl_mode;
- inode_set_ctime_current(inode);
clear_inode_flag(inode, FI_ACL_MODE);
}
+ inode_set_ctime_current(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
exit:
kfree(base_addr);
return error;