f2fs: fix to truncate inline data past EOF

Previously if inode is with inline data, we will try to invalid partial inline
data in page #0 when we truncate size of inode in truncate_partial_data_page().
And then we set page #0 to dirty, after this we can synchronize inode page with
page #0 at ->writepage().

But sometimes we will fail to operate page #0 in truncate_partial_data_page()
due to below reason:
a) if offset is zero, we will skip setting page #0 to dirty.
b) if page #0 is not uptodate, we will fail to update it as it has no mapping
data.

So with following operations, we will meet recent data which should be
truncated.

1.write inline data to file
2.sync first data page to inode page
3.truncate file size to 0
4.truncate file size to max_inline_size
5.echo 1 > /proc/sys/vm/drop_caches
6.read file --> meet original inline data which is remained in inode page.

This patch renames truncate_inline_data() to truncate_inline_inode() for code
readability, then use truncate_inline_inode() to truncate inline data in inode
page in truncate_blocks() and truncate page #0 in truncate_partial_data_page()
for fixing.

v2:
 o truncate partially #0 page in truncate_partial_data_page to avoid keeping
   old data in #0 page.

Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2015-03-10 13:16:25 +08:00 committed by Jaegeuk Kim
parent 83dfe53c18
commit 0bfcfcca3d
3 changed files with 33 additions and 10 deletions

View file

@ -1763,6 +1763,7 @@ extern struct kmem_cache *inode_entry_slab;
*/
bool f2fs_may_inline(struct inode *);
void read_inline_data(struct page *, struct page *);
bool truncate_inline_inode(struct page *, u64);
int f2fs_read_inline_data(struct inode *, struct page *);
int f2fs_convert_inline_page(struct dnode_of_data *, struct page *);
int f2fs_convert_inline_inode(struct inode *);

View file

@ -456,15 +456,16 @@ void truncate_data_blocks(struct dnode_of_data *dn)
truncate_data_blocks_range(dn, ADDRS_PER_BLOCK);
}
static int truncate_partial_data_page(struct inode *inode, u64 from)
static int truncate_partial_data_page(struct inode *inode, u64 from,
bool force)
{
unsigned offset = from & (PAGE_CACHE_SIZE - 1);
struct page *page;
if (!offset)
if (!offset && !force)
return 0;
page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, false);
page = find_data_page(inode, from >> PAGE_CACHE_SHIFT, force);
if (IS_ERR(page))
return 0;
@ -475,7 +476,8 @@ static int truncate_partial_data_page(struct inode *inode, u64 from)
f2fs_wait_on_page_writeback(page, DATA);
zero_user(page, offset, PAGE_CACHE_SIZE - offset);
set_page_dirty(page);
if (!force)
set_page_dirty(page);
out:
f2fs_put_page(page, 1);
return 0;
@ -489,6 +491,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
pgoff_t free_from;
int count = 0, err = 0;
struct page *ipage;
bool truncate_page = false;
trace_f2fs_truncate_blocks_enter(inode, from);
@ -504,7 +507,10 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
}
if (f2fs_has_inline_data(inode)) {
if (truncate_inline_inode(ipage, from))
set_page_dirty(ipage);
f2fs_put_page(ipage, 1);
truncate_page = true;
goto out;
}
@ -535,7 +541,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
/* lastly zero out the first data page */
if (!err)
err = truncate_partial_data_page(inode, from);
err = truncate_partial_data_page(inode, from, truncate_page);
trace_f2fs_truncate_blocks_exit(inode, err);
return err;

View file

@ -50,10 +50,26 @@ void read_inline_data(struct page *page, struct page *ipage)
SetPageUptodate(page);
}
static void truncate_inline_data(struct page *ipage)
bool truncate_inline_inode(struct page *ipage, u64 from)
{
void *addr;
/*
* we should never truncate inline data past max inline data size,
* because we always convert inline inode to normal one before
* truncating real data if new size is past max inline data size.
*/
f2fs_bug_on(F2FS_P_SB(ipage), from > MAX_INLINE_DATA);
if (from >= MAX_INLINE_DATA)
return false;
addr = inline_data_addr(ipage);
f2fs_wait_on_page_writeback(ipage, NODE);
memset(inline_data_addr(ipage), 0, MAX_INLINE_DATA);
memset(addr + from, 0, MAX_INLINE_DATA - from);
return true;
}
int f2fs_read_inline_data(struct inode *inode, struct page *page)
@ -131,7 +147,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */
truncate_inline_data(dn->inode_page);
truncate_inline_inode(dn->inode_page, 0);
clear_out:
stat_dec_inline_inode(dn->inode);
f2fs_clear_inline_inode(dn->inode);
@ -245,7 +261,7 @@ bool recover_inline_data(struct inode *inode, struct page *npage)
if (f2fs_has_inline_data(inode)) {
ipage = get_node_page(sbi, inode->i_ino);
f2fs_bug_on(sbi, IS_ERR(ipage));
truncate_inline_data(ipage);
truncate_inline_inode(ipage, 0);
f2fs_clear_inline_inode(inode);
update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
@ -363,7 +379,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
set_page_dirty(page);
/* clear inline dir and flag after data writeback */
truncate_inline_data(ipage);
truncate_inline_inode(ipage, 0);
stat_dec_inline_dir(dir);
clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);