fs/buffer.c: support fscrypt in block_read_full_page()

After each filesystem block (as represented by a buffer_head) has been
read from disk by block_read_full_page(), decrypt it if needed.  The
decryption is done on the fscrypt_read_workqueue.

This is the final change needed to support ext4 encryption with
blocksize != PAGE_SIZE, and it's a fairly small change now that
CONFIG_FS_ENCRYPTION is a bool and fs/crypto/ exposes functions to
decrypt individual blocks and to enqueue work on the fscrypt workqueue.

Don't try to add fs-verity support yet, as the fs/verity/ support layer
isn't ready for sub-page blocks yet.  Just add fscrypt support for now.

Almost all the new code is compiled away when CONFIG_FS_ENCRYPTION=n.

Cc: Chandan Rajendra <chandan@linux.ibm.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20191023033312.361355-2-ebiggers@kernel.org
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
Eric Biggers 2019-10-22 20:33:11 -07:00 committed by Theodore Ts'o
parent 4d06bfb97e
commit 31fb992ce6
1 changed files with 43 additions and 5 deletions

View File

@ -47,6 +47,7 @@
#include <linux/pagevec.h>
#include <linux/sched/mm.h>
#include <trace/events/block.h>
#include <linux/fscrypt.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
@ -246,10 +247,6 @@ out:
return ret;
}
/*
* I/O completion handler for block_read_full_page() - pages
* which come unlocked at the end of I/O.
*/
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
unsigned long flags;
@ -307,6 +304,47 @@ still_busy:
return;
}
struct decrypt_bh_ctx {
struct work_struct work;
struct buffer_head *bh;
};
static void decrypt_bh(struct work_struct *work)
{
struct decrypt_bh_ctx *ctx =
container_of(work, struct decrypt_bh_ctx, work);
struct buffer_head *bh = ctx->bh;
int err;
err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size,
bh_offset(bh));
end_buffer_async_read(bh, err == 0);
kfree(ctx);
}
/*
* I/O completion handler for block_read_full_page() - pages
* which come unlocked at the end of I/O.
*/
static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
{
/* Decrypt if needed */
if (uptodate && IS_ENABLED(CONFIG_FS_ENCRYPTION) &&
IS_ENCRYPTED(bh->b_page->mapping->host) &&
S_ISREG(bh->b_page->mapping->host->i_mode)) {
struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
if (ctx) {
INIT_WORK(&ctx->work, decrypt_bh);
ctx->bh = bh;
fscrypt_enqueue_decrypt_work(&ctx->work);
return;
}
uptodate = 0;
}
end_buffer_async_read(bh, uptodate);
}
/*
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
@ -379,7 +417,7 @@ EXPORT_SYMBOL(end_buffer_async_write);
*/
static void mark_buffer_async_read(struct buffer_head *bh)
{
bh->b_end_io = end_buffer_async_read;
bh->b_end_io = end_buffer_async_read_io;
set_buffer_async_read(bh);
}