[LogFS] Prevent memory corruption on large deletes

Removing sufficiently large files would create aliases for a large
number of segments.  This in turn results in a large number of journal
entries and an overflow of s_je_array.

Cheap fix is to add a BUG_ON, turning memory corruption into something
annoying, but less dangerous.  Real fix is to count the number of
affected segments and prevent the problem completely.

Signed-off-by: Joern Engel <joern@logfs.org>
This commit is contained in:
Joern Engel 2010-04-13 17:46:37 +02:00
parent e05c378f49
commit 032d8f7268
5 changed files with 34 additions and 1 deletions

View File

@ -458,6 +458,14 @@ static void __logfs_gc_pass(struct super_block *sb, int target)
struct logfs_block *block;
int round, progress, last_progress = 0;
/*
* Doing too many changes to the segfile at once would result
* in a large number of aliases. Write the journal before
* things get out of hand.
*/
if (super->s_shadow_tree.no_shadowed_segments >= MAX_OBJ_ALIASES)
logfs_write_anchor(sb);
if (no_free_segments(sb) >= target &&
super->s_no_object_aliases < MAX_OBJ_ALIASES)
return;

View File

@ -493,6 +493,8 @@ static void account_shadows(struct super_block *sb)
btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow);
btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow);
btree_grim_visitor32(&tree->segment_map, 0, NULL);
tree->no_shadowed_segments = 0;
if (li->li_block) {
/*
@ -660,6 +662,7 @@ static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type,
if (ofs < 0)
return ofs;
logfs_buf_write(area, ofs, super->s_compressed_je, len);
BUG_ON(super->s_no_je >= MAX_JOURNAL_ENTRIES);
super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs);
return 0;
}

View File

@ -257,10 +257,14 @@ struct logfs_shadow {
* struct shadow_tree
* @new: shadows where old_ofs==0, indexed by new_ofs
* @old: shadows where old_ofs!=0, indexed by old_ofs
* @segment_map: bitfield of segments containing shadows
* @no_shadowed_segment: number of segments containing shadows
*/
struct shadow_tree {
struct btree_head64 new;
struct btree_head64 old;
struct btree_head32 segment_map;
int no_shadowed_segments;
};
struct object_alias_item {
@ -311,6 +315,8 @@ struct logfs_block_ops {
write_alias_t *write_one_alias);
};
#define MAX_JOURNAL_ENTRIES 256
struct logfs_super {
struct mtd_info *s_mtd; /* underlying device */
struct block_device *s_bdev; /* underlying device */
@ -377,7 +383,7 @@ struct logfs_super {
u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */
u64 s_last_version;
struct logfs_area *s_journal_area; /* open journal segment */
__be64 s_je_array[64];
__be64 s_je_array[MAX_JOURNAL_ENTRIES];
int s_no_je;
int s_sum_index; /* for the 12 summaries */

View File

@ -1219,6 +1219,18 @@ static void free_shadow(struct inode *inode, struct logfs_shadow *shadow)
mempool_free(shadow, super->s_shadow_pool);
}
static void mark_segment(struct shadow_tree *tree, u32 segno)
{
int err;
if (!btree_lookup32(&tree->segment_map, segno)) {
err = btree_insert32(&tree->segment_map, segno, (void *)1,
GFP_NOFS);
BUG_ON(err);
tree->no_shadowed_segments++;
}
}
/**
* fill_shadow_tree - Propagate shadow tree changes due to a write
* @inode: Inode owning the page
@ -1266,6 +1278,8 @@ static void fill_shadow_tree(struct inode *inode, struct page *page,
super->s_dirty_used_bytes += shadow->new_len;
super->s_dirty_free_bytes += shadow->old_len;
mark_segment(tree, shadow->old_ofs >> super->s_segshift);
mark_segment(tree, shadow->new_ofs >> super->s_segshift);
}
}

View File

@ -451,6 +451,8 @@ static int logfs_read_sb(struct super_block *sb, int read_only)
btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool);
btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool);
btree_init_mempool32(&super->s_shadow_tree.segment_map,
super->s_btree_pool);
ret = logfs_init_mapping(sb);
if (ret)