summaryrefslogtreecommitdiffstats
path: root/fs/logfs/journal.c
diff options
context:
space:
mode:
authorJoern Engel <joern@logfs.org>2010-04-13 17:46:37 +0200
committerJoern Engel <joern@logfs.org>2010-04-13 17:46:37 +0200
commit032d8f7268444a0f5d4ee02d9513d682d5b8edfc (patch)
tree57cd841514abb9ffe7df7d2569513663f551f960 /fs/logfs/journal.c
parente05c378f4973674a16d5b9636f2310cf88aca5f2 (diff)
downloadop-kernel-dev-032d8f7268444a0f5d4ee02d9513d682d5b8edfc.zip
op-kernel-dev-032d8f7268444a0f5d4ee02d9513d682d5b8edfc.tar.gz
[LogFS] Prevent memory corruption on large deletes
Removing sufficiently large files would create aliases for a large number of segments. This in turn results in a large number of journal entries and an overflow of s_je_array. Cheap fix is to add a BUG_ON, turning memory corruption into something annoying, but less dangerous. Real fix is to count the number of affected segments and prevent the problem completely. Signed-off-by: Joern Engel <joern@logfs.org>
Diffstat (limited to 'fs/logfs/journal.c')
-rw-r--r--fs/logfs/journal.c3
1 files changed, 3 insertions, 0 deletions
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index d57c7b0..2c22a4a 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -493,6 +493,8 @@ static void account_shadows(struct super_block *sb)
btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow);
btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow);
+ btree_grim_visitor32(&tree->segment_map, 0, NULL);
+ tree->no_shadowed_segments = 0;
if (li->li_block) {
/*
@@ -660,6 +662,7 @@ static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type,
if (ofs < 0)
return ofs;
logfs_buf_write(area, ofs, super->s_compressed_je, len);
+ BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES);
super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs);
return 0;
}
OpenPOWER on IntegriCloud