From 032d8f7268444a0f5d4ee02d9513d682d5b8edfc Mon Sep 17 00:00:00 2001 From: Joern Engel Date: Tue, 13 Apr 2010 17:46:37 +0200 Subject: [LogFS] Prevent memory corruption on large deletes Removing sufficiently large files would create aliases for a large number of segments. This in turn results in a large number of journal entries and an overflow of s_je_array. Cheap fix is to add a BUG_ON, turning memory corruption into something annoying, but less dangerous. Real fix is to count the number of affected segments and prevent the problem completely. Signed-off-by: Joern Engel --- fs/logfs/gc.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'fs/logfs/gc.c') diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c index 92949f95a901..e8253e7fb6b2 100644 --- a/fs/logfs/gc.c +++ b/fs/logfs/gc.c @@ -458,6 +458,14 @@ static void __logfs_gc_pass(struct super_block *sb, int target) struct logfs_block *block; int round, progress, last_progress = 0; + /* + * Doing too many changes to the segfile at once would result + * in a large number of aliases. Write the journal before + * things get out of hand. + */ + if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES) + logfs_write_anchor(sb); + if (no_free_segments(sb) >= target && super->s_no_object_aliases < MAX_OBJ_ALIASES) return; -- cgit v1.2.3-58-ga151