f2fs crypto: fix racing of accessing encrypted page among

different competitors

Since we use different page cache (normally inode's page cache for R/W
and meta inode's page cache for GC) to cache the same physical block
which is belong to an encrypted inode. Writeback of these two page
cache should be exclusive, but now we didn't handle writeback state
well, so there may be potential racing problem:

a)
kworker:				f2fs_gc:
 - f2fs_write_data_pages
  - f2fs_write_data_page
   - do_write_data_page
    - write_data_page
     - f2fs_submit_page_mbio
(page#1 in inode's page cache was queued
in f2fs bio cache, and be ready to write
to new blkaddr)
					 - gc_data_segment
					  - move_encrypted_block
					   - pagecache_get_page
					(page#2 in meta inode's page cache
					was cached with the invalid datas
					of physical block located in new
					blkaddr)
					   - f2fs_submit_page_mbio
					(page#1 was submitted, later, page#2
					with invalid data will be submitted)

b)
f2fs_gc:
 - gc_data_segment
  - move_encrypted_block
   - f2fs_submit_page_mbio
(page#1 in meta inode's page cache was
queued in f2fs bio cache, and be ready
to write to new blkaddr)
					user thread:
					 - f2fs_write_begin
					  - f2fs_submit_page_bio
					(we submit the request to block layer
					to update page#2 in inode's page cache
					with physical block located in new
					blkaddr, so here we may read gabbage
					data from new blkaddr since GC hasn't
					writebacked the page#1 yet)

This patch fixes above potential racing problem for encrypted inode.

Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
Chao Yu 2015-10-08 13:27:34 +08:00 committed by Jaegeuk Kim
parent ea1a29a0bd
commit 08b39fbd59
5 changed files with 44 additions and 11 deletions

View file

@ -956,21 +956,14 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
if (f2fs_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
struct page *cpage;
ctx = f2fs_get_crypto_ctx(inode);
if (IS_ERR(ctx))
goto set_error_page;
/* wait the page to be moved by cleaning */
cpage = find_lock_page(
META_MAPPING(F2FS_I_SB(inode)),
block_nr);
if (cpage) {
f2fs_wait_on_page_writeback(cpage,
DATA);
f2fs_put_page(cpage, 1);
}
f2fs_wait_on_encrypted_page_writeback(
F2FS_I_SB(inode), block_nr);
}
bio = bio_alloc(GFP_KERNEL,
@ -1064,6 +1057,11 @@ int do_write_data_page(struct f2fs_io_info *fio)
}
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
/* wait for GCed encrypted page writeback */
f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode),
fio->blk_addr);
fio->encrypted_page = f2fs_encrypt(inode, fio->page);
if (IS_ERR(fio->encrypted_page)) {
err = PTR_ERR(fio->encrypted_page);
@ -1452,6 +1450,10 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
f2fs_wait_on_page_writeback(page, DATA);
/* wait for GCed encrypted page writeback */
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
if (len == PAGE_CACHE_SIZE)
goto out_update;
if (PageUptodate(page))

View file

@ -1792,6 +1792,7 @@ void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *,
void allocate_data_block(struct f2fs_sb_info *, struct page *,
block_t, block_t *, struct f2fs_summary *, int);
void f2fs_wait_on_page_writeback(struct page *, enum page_type);
void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
void write_data_summaries(struct f2fs_sb_info *, block_t);
void write_node_summaries(struct f2fs_sb_info *, block_t);
int lookup_journal_in_cursum(struct f2fs_summary_block *,

View file

@ -87,6 +87,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
mapped:
/* fill the page */
f2fs_wait_on_page_writeback(page, DATA);
/* wait for GCed encrypted page writeback */
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
f2fs_wait_on_encrypted_page_writeback(sbi, dn.data_blkaddr);
/* if gced page is attached, don't write to cold segment */
clear_cold_data(page);
out:

View file

@ -559,8 +559,16 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
if (err)
goto out;
if (unlikely(dn.data_blkaddr == NULL_ADDR))
if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
ClearPageUptodate(page);
goto put_out;
}
/*
* don't cache encrypted data into meta inode until previous dirty
* data were writebacked to avoid racing between GC and flush.
*/
f2fs_wait_on_page_writeback(page, DATA);
get_node_info(fio.sbi, dn.nid, &ni);
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
@ -589,7 +597,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
goto put_page_out;
set_page_dirty(fio.encrypted_page);
f2fs_wait_on_page_writeback(fio.encrypted_page, META);
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA);
if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META);

View file

@ -1484,6 +1484,23 @@ void f2fs_wait_on_page_writeback(struct page *page,
}
}
void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
block_t blkaddr)
{
struct page *cpage;
if (blkaddr == NEW_ADDR)
return;
f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
if (cpage) {
f2fs_wait_on_page_writeback(cpage, DATA);
f2fs_put_page(cpage, 1);
}
}
static int read_compacted_summaries(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);