bcachefs: Split up fs-io.[ch]

fs-io.c is too big - time for some reorganization
 - fs-dio.c: direct io
 - fs-pagecache.c: pagecache data structures (bch_folio), utility code

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-08-03 18:18:21 -04:00
parent 1e81f89b02
commit dbbfca9f41
11 changed files with 2956 additions and 2803 deletions

View File

@ -38,6 +38,9 @@ bcachefs-y := \
fs-common.o \
fs-ioctl.o \
fs-io.o \
fs-io-buffered.o \
fs-io-direct.o \
fs-io-pagecache.o \
fsck.o \
inode.o \
io.o \

1098
fs/bcachefs/fs-io-buffered.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_FS_IO_BUFFERED_H
#define _BCACHEFS_FS_IO_BUFFERED_H
#ifndef NO_BCACHEFS_FS
int bch2_read_single_folio(struct folio *, struct address_space *);
int bch2_read_folio(struct file *, struct folio *);
int bch2_writepages(struct address_space *, struct writeback_control *);
void bch2_readahead(struct readahead_control *);
int bch2_write_begin(struct file *, struct address_space *, loff_t,
unsigned, struct page **, void **);
int bch2_write_end(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page *, void *);
ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *);
void bch2_fs_fs_io_buffered_exit(struct bch_fs *);
int bch2_fs_fs_io_buffered_init(struct bch_fs *);
#else
static inline void bch2_fs_fs_io_buffered_exit(struct bch_fs *c) {}
static inline int bch2_fs_fs_io_buffered_init(struct bch_fs *c) { return 0; }
#endif
#endif /* _BCACHEFS_FS_IO_BUFFERED_H */

679
fs/bcachefs/fs-io-direct.c Normal file
View File

@ -0,0 +1,679 @@
// SPDX-License-Identifier: GPL-2.0
#ifndef NO_BCACHEFS_FS
#include "bcachefs.h"
#include "alloc_foreground.h"
#include "fs.h"
#include "fs-io.h"
#include "fs-io-direct.h"
#include "fs-io-pagecache.h"
#include "io.h"
#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <linux/task_io_accounting_ops.h>
/* O_DIRECT reads */
struct dio_read {
struct closure cl;
struct kiocb *req;
long ret;
bool should_dirty;
struct bch_read_bio rbio;
};
static void bio_check_or_release(struct bio *bio, bool check_dirty)
{
if (check_dirty) {
bio_check_pages_dirty(bio);
} else {
bio_release_pages(bio, false);
bio_put(bio);
}
}
static void bch2_dio_read_complete(struct closure *cl)
{
struct dio_read *dio = container_of(cl, struct dio_read, cl);
dio->req->ki_complete(dio->req, dio->ret);
bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
}
static void bch2_direct_IO_read_endio(struct bio *bio)
{
struct dio_read *dio = bio->bi_private;
if (bio->bi_status)
dio->ret = blk_status_to_errno(bio->bi_status);
closure_put(&dio->cl);
}
static void bch2_direct_IO_read_split_endio(struct bio *bio)
{
struct dio_read *dio = bio->bi_private;
bool should_dirty = dio->should_dirty;
bch2_direct_IO_read_endio(bio);
bio_check_or_release(bio, should_dirty);
}
static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
{
struct file *file = req->ki_filp;
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_io_opts opts;
struct dio_read *dio;
struct bio *bio;
loff_t offset = req->ki_pos;
bool sync = is_sync_kiocb(req);
size_t shorten;
ssize_t ret;
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
if ((offset|iter->count) & (block_bytes(c) - 1))
return -EINVAL;
ret = min_t(loff_t, iter->count,
max_t(loff_t, 0, i_size_read(&inode->v) - offset));
if (!ret)
return ret;
shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
iter->count -= shorten;
bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
REQ_OP_READ,
GFP_KERNEL,
&c->dio_read_bioset);
bio->bi_end_io = bch2_direct_IO_read_endio;
dio = container_of(bio, struct dio_read, rbio.bio);
closure_init(&dio->cl, NULL);
/*
* this is a _really_ horrible hack just to avoid an atomic sub at the
* end:
*/
if (!sync) {
set_closure_fn(&dio->cl, bch2_dio_read_complete, NULL);
atomic_set(&dio->cl.remaining,
CLOSURE_REMAINING_INITIALIZER -
CLOSURE_RUNNING +
CLOSURE_DESTRUCTOR);
} else {
atomic_set(&dio->cl.remaining,
CLOSURE_REMAINING_INITIALIZER + 1);
}
dio->req = req;
dio->ret = ret;
/*
* This is one of the sketchier things I've encountered: we have to skip
* the dirtying of requests that are internal from the kernel (i.e. from
* loopback), because we'll deadlock on page_lock.
*/
dio->should_dirty = iter_is_iovec(iter);
goto start;
while (iter->count) {
bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
REQ_OP_READ,
GFP_KERNEL,
&c->bio_read);
bio->bi_end_io = bch2_direct_IO_read_split_endio;
start:
bio->bi_opf = REQ_OP_READ|REQ_SYNC;
bio->bi_iter.bi_sector = offset >> 9;
bio->bi_private = dio;
ret = bio_iov_iter_get_pages(bio, iter);
if (ret < 0) {
/* XXX: fault inject this path */
bio->bi_status = BLK_STS_RESOURCE;
bio_endio(bio);
break;
}
offset += bio->bi_iter.bi_size;
if (dio->should_dirty)
bio_set_pages_dirty(bio);
if (iter->count)
closure_get(&dio->cl);
bch2_read(c, rbio_init(bio, opts), inode_inum(inode));
}
iter->count += shorten;
if (sync) {
closure_sync(&dio->cl);
closure_debug_destroy(&dio->cl);
ret = dio->ret;
bio_check_or_release(&dio->rbio.bio, dio->should_dirty);
return ret;
} else {
return -EIOCBQUEUED;
}
}
ssize_t bch2_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct bch_inode_info *inode = file_bch_inode(file);
struct address_space *mapping = file->f_mapping;
size_t count = iov_iter_count(iter);
ssize_t ret;
if (!count)
return 0; /* skip atime */
if (iocb->ki_flags & IOCB_DIRECT) {
struct blk_plug plug;
if (unlikely(mapping->nrpages)) {
ret = filemap_write_and_wait_range(mapping,
iocb->ki_pos,
iocb->ki_pos + count - 1);
if (ret < 0)
goto out;
}
file_accessed(file);
blk_start_plug(&plug);
ret = bch2_direct_IO_read(iocb, iter);
blk_finish_plug(&plug);
if (ret >= 0)
iocb->ki_pos += ret;
} else {
bch2_pagecache_add_get(inode);
ret = generic_file_read_iter(iocb, iter);
bch2_pagecache_add_put(inode);
}
out:
return bch2_err_class(ret);
}
/* O_DIRECT writes */
struct dio_write {
struct kiocb *req;
struct address_space *mapping;
struct bch_inode_info *inode;
struct mm_struct *mm;
unsigned loop:1,
extending:1,
sync:1,
flush:1,
free_iov:1;
struct quota_res quota_res;
u64 written;
struct iov_iter iter;
struct iovec inline_vecs[2];
/* must be last: */
struct bch_write_op op;
};
static bool bch2_check_range_allocated(struct bch_fs *c, subvol_inum inum,
u64 offset, u64 size,
unsigned nr_replicas, bool compressed)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
u64 end = offset + size;
u32 snapshot;
bool ret = true;
int err;
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
err = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
if (err)
goto err;
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
SPOS(inum.inum, offset, snapshot),
BTREE_ITER_SLOTS, k, err) {
if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
break;
if (k.k->p.snapshot != snapshot ||
nr_replicas > bch2_bkey_replicas(c, k) ||
(!compressed && bch2_bkey_sectors_compressed(k))) {
ret = false;
break;
}
}
offset = iter.pos.offset;
bch2_trans_iter_exit(&trans, &iter);
err:
if (bch2_err_matches(err, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
return err ? false : ret;
}
static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
{
struct bch_fs *c = dio->op.c;
struct bch_inode_info *inode = dio->inode;
struct bio *bio = &dio->op.wbio.bio;
return bch2_check_range_allocated(c, inode_inum(inode),
dio->op.pos.offset, bio_sectors(bio),
dio->op.opts.data_replicas,
dio->op.opts.compression != 0);
}
static void bch2_dio_write_loop_async(struct bch_write_op *);
static __always_inline long bch2_dio_write_done(struct dio_write *dio);
/*
* We're going to return -EIOCBQUEUED, but we haven't finished consuming the
* iov_iter yet, so we need to stash a copy of the iovec: it might be on the
* caller's stack, we're not guaranteed that it will live for the duration of
* the IO:
*/
static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
{
struct iovec *iov = dio->inline_vecs;
/*
* iov_iter has a single embedded iovec - nothing to do:
*/
if (iter_is_ubuf(&dio->iter))
return 0;
/*
* We don't currently handle non-iovec iov_iters here - return an error,
* and we'll fall back to doing the IO synchronously:
*/
if (!iter_is_iovec(&dio->iter))
return -1;
if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
GFP_KERNEL);
if (unlikely(!iov))
return -ENOMEM;
dio->free_iov = true;
}
memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
dio->iter.__iov = iov;
return 0;
}
static void bch2_dio_write_flush_done(struct closure *cl)
{
struct dio_write *dio = container_of(cl, struct dio_write, op.cl);
struct bch_fs *c = dio->op.c;
closure_debug_destroy(cl);
dio->op.error = bch2_journal_error(&c->journal);
bch2_dio_write_done(dio);
}
static noinline void bch2_dio_write_flush(struct dio_write *dio)
{
struct bch_fs *c = dio->op.c;
struct bch_inode_unpacked inode;
int ret;
dio->flush = 0;
closure_init(&dio->op.cl, NULL);
if (!dio->op.error) {
ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
if (ret) {
dio->op.error = ret;
} else {
bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq,
&dio->op.cl);
bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
}
}
if (dio->sync) {
closure_sync(&dio->op.cl);
closure_debug_destroy(&dio->op.cl);
} else {
continue_at(&dio->op.cl, bch2_dio_write_flush_done, NULL);
}
}
static __always_inline long bch2_dio_write_done(struct dio_write *dio)
{
struct kiocb *req = dio->req;
struct bch_inode_info *inode = dio->inode;
bool sync = dio->sync;
long ret;
if (unlikely(dio->flush)) {
bch2_dio_write_flush(dio);
if (!sync)
return -EIOCBQUEUED;
}
bch2_pagecache_block_put(inode);
if (dio->free_iov)
kfree(dio->iter.__iov);
ret = dio->op.error ?: ((long) dio->written << 9);
bio_put(&dio->op.wbio.bio);
/* inode->i_dio_count is our ref on inode and thus bch_fs */
inode_dio_end(&inode->v);
if (ret < 0)
ret = bch2_err_class(ret);
if (!sync) {
req->ki_complete(req, ret);
ret = -EIOCBQUEUED;
}
return ret;
}
static __always_inline void bch2_dio_write_end(struct dio_write *dio)
{
struct bch_fs *c = dio->op.c;
struct kiocb *req = dio->req;
struct bch_inode_info *inode = dio->inode;
struct bio *bio = &dio->op.wbio.bio;
req->ki_pos += (u64) dio->op.written << 9;
dio->written += dio->op.written;
if (dio->extending) {
spin_lock(&inode->v.i_lock);
if (req->ki_pos > inode->v.i_size)
i_size_write(&inode->v, req->ki_pos);
spin_unlock(&inode->v.i_lock);
}
if (dio->op.i_sectors_delta || dio->quota_res.sectors) {
mutex_lock(&inode->ei_quota_lock);
__bch2_i_sectors_acct(c, inode, &dio->quota_res, dio->op.i_sectors_delta);
__bch2_quota_reservation_put(c, inode, &dio->quota_res);
mutex_unlock(&inode->ei_quota_lock);
}
bio_release_pages(bio, false);
if (unlikely(dio->op.error))
set_bit(EI_INODE_ERROR, &inode->ei_flags);
}
static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
{
struct bch_fs *c = dio->op.c;
struct kiocb *req = dio->req;
struct address_space *mapping = dio->mapping;
struct bch_inode_info *inode = dio->inode;
struct bch_io_opts opts;
struct bio *bio = &dio->op.wbio.bio;
unsigned unaligned, iter_count;
bool sync = dio->sync, dropped_locks;
long ret;
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
while (1) {
iter_count = dio->iter.count;
EBUG_ON(current->faults_disabled_mapping);
current->faults_disabled_mapping = mapping;
ret = bio_iov_iter_get_pages(bio, &dio->iter);
dropped_locks = fdm_dropped_locks();
current->faults_disabled_mapping = NULL;
/*
* If the fault handler returned an error but also signalled
* that it dropped & retook ei_pagecache_lock, we just need to
* re-shoot down the page cache and retry:
*/
if (dropped_locks && ret)
ret = 0;
if (unlikely(ret < 0))
goto err;
if (unlikely(dropped_locks)) {
ret = bch2_write_invalidate_inode_pages_range(mapping,
req->ki_pos,
req->ki_pos + iter_count - 1);
if (unlikely(ret))
goto err;
if (!bio->bi_iter.bi_size)
continue;
}
unaligned = bio->bi_iter.bi_size & (block_bytes(c) - 1);
bio->bi_iter.bi_size -= unaligned;
iov_iter_revert(&dio->iter, unaligned);
if (!bio->bi_iter.bi_size) {
/*
* bio_iov_iter_get_pages was only able to get <
* blocksize worth of pages:
*/
ret = -EFAULT;
goto err;
}
bch2_write_op_init(&dio->op, c, opts);
dio->op.end_io = sync
? NULL
: bch2_dio_write_loop_async;
dio->op.target = dio->op.opts.foreground_target;
dio->op.write_point = writepoint_hashed((unsigned long) current);
dio->op.nr_replicas = dio->op.opts.data_replicas;
dio->op.subvol = inode->ei_subvol;
dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
dio->op.devs_need_flush = &inode->ei_devs_need_flush;
if (sync)
dio->op.flags |= BCH_WRITE_SYNC;
dio->op.flags |= BCH_WRITE_CHECK_ENOSPC;
ret = bch2_quota_reservation_add(c, inode, &dio->quota_res,
bio_sectors(bio), true);
if (unlikely(ret))
goto err;
ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio),
dio->op.opts.data_replicas, 0);
if (unlikely(ret) &&
!bch2_dio_write_check_allocated(dio))
goto err;
task_io_account_write(bio->bi_iter.bi_size);
if (unlikely(dio->iter.count) &&
!dio->sync &&
!dio->loop &&
bch2_dio_write_copy_iov(dio))
dio->sync = sync = true;
dio->loop = true;
closure_call(&dio->op.cl, bch2_write, NULL, NULL);
if (!sync)
return -EIOCBQUEUED;
bch2_dio_write_end(dio);
if (likely(!dio->iter.count) || dio->op.error)
break;
bio_reset(bio, NULL, REQ_OP_WRITE);
}
out:
return bch2_dio_write_done(dio);
err:
dio->op.error = ret;
bio_release_pages(bio, false);
bch2_quota_reservation_put(c, inode, &dio->quota_res);
goto out;
}
static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
{
struct mm_struct *mm = dio->mm;
bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
if (mm)
kthread_use_mm(mm);
bch2_dio_write_loop(dio);
if (mm)
kthread_unuse_mm(mm);
}
static void bch2_dio_write_loop_async(struct bch_write_op *op)
{
struct dio_write *dio = container_of(op, struct dio_write, op);
bch2_dio_write_end(dio);
if (likely(!dio->iter.count) || dio->op.error)
bch2_dio_write_done(dio);
else
bch2_dio_write_continue(dio);
}
ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
{
struct file *file = req->ki_filp;
struct address_space *mapping = file->f_mapping;
struct bch_inode_info *inode = file_bch_inode(file);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct dio_write *dio;
struct bio *bio;
bool locked = true, extending;
ssize_t ret;
prefetch(&c->opts);
prefetch((void *) &c->opts + 64);
prefetch(&inode->ei_inode);
prefetch((void *) &inode->ei_inode + 64);
inode_lock(&inode->v);
ret = generic_write_checks(req, iter);
if (unlikely(ret <= 0))
goto err;
ret = file_remove_privs(file);
if (unlikely(ret))
goto err;
ret = file_update_time(file);
if (unlikely(ret))
goto err;
if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
goto err;
inode_dio_begin(&inode->v);
bch2_pagecache_block_get(inode);
extending = req->ki_pos + iter->count > inode->v.i_size;
if (!extending) {
inode_unlock(&inode->v);
locked = false;
}
bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
REQ_OP_WRITE,
GFP_KERNEL,
&c->dio_write_bioset);
dio = container_of(bio, struct dio_write, op.wbio.bio);
dio->req = req;
dio->mapping = mapping;
dio->inode = inode;
dio->mm = current->mm;
dio->loop = false;
dio->extending = extending;
dio->sync = is_sync_kiocb(req) || extending;
dio->flush = iocb_is_dsync(req) && !c->opts.journal_flush_disabled;
dio->free_iov = false;
dio->quota_res.sectors = 0;
dio->written = 0;
dio->iter = *iter;
dio->op.c = c;
if (unlikely(mapping->nrpages)) {
ret = bch2_write_invalidate_inode_pages_range(mapping,
req->ki_pos,
req->ki_pos + iter->count - 1);
if (unlikely(ret))
goto err_put_bio;
}
ret = bch2_dio_write_loop(dio);
err:
if (locked)
inode_unlock(&inode->v);
return ret;
err_put_bio:
bch2_pagecache_block_put(inode);
bio_put(bio);
inode_dio_end(&inode->v);
goto err;
}
void bch2_fs_fs_io_direct_exit(struct bch_fs *c)
{
bioset_exit(&c->dio_write_bioset);
bioset_exit(&c->dio_read_bioset);
}
int bch2_fs_fs_io_direct_init(struct bch_fs *c)
{
if (bioset_init(&c->dio_read_bioset,
4, offsetof(struct dio_read, rbio.bio),
BIOSET_NEED_BVECS))
return -BCH_ERR_ENOMEM_dio_read_bioset_init;
if (bioset_init(&c->dio_write_bioset,
4, offsetof(struct dio_write, op.wbio.bio),
BIOSET_NEED_BVECS))
return -BCH_ERR_ENOMEM_dio_write_bioset_init;
return 0;
}
#endif /* NO_BCACHEFS_FS */

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_FS_IO_DIRECT_H
#define _BCACHEFS_FS_IO_DIRECT_H
#ifndef NO_BCACHEFS_FS
ssize_t bch2_direct_write(struct kiocb *, struct iov_iter *);
ssize_t bch2_read_iter(struct kiocb *, struct iov_iter *);
void bch2_fs_fs_io_direct_exit(struct bch_fs *);
int bch2_fs_fs_io_direct_init(struct bch_fs *);
#else
static inline void bch2_fs_fs_io_direct_exit(struct bch_fs *c) {}
static inline int bch2_fs_fs_io_direct_init(struct bch_fs *c) { return 0; }
#endif
#endif /* _BCACHEFS_FS_IO_DIRECT_H */

View File

@ -0,0 +1,780 @@
// SPDX-License-Identifier: GPL-2.0
#ifndef NO_BCACHEFS_FS
#include "bcachefs.h"
#include "btree_iter.h"
#include "extents.h"
#include "fs-io.h"
#include "fs-io-pagecache.h"
#include "subvolume.h"
#include <linux/pagevec.h>
#include <linux/writeback.h>
int bch2_filemap_get_contig_folios_d(struct address_space *mapping,
loff_t start, u64 end,
int fgp_flags, gfp_t gfp,
folios *folios)
{
struct folio *f;
u64 pos = start;
int ret = 0;
while (pos < end) {
if ((u64) pos >= (u64) start + (1ULL << 20))
fgp_flags &= ~FGP_CREAT;
ret = darray_make_room_gfp(folios, 1, gfp & GFP_KERNEL);
if (ret)
break;
f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
if (IS_ERR_OR_NULL(f))
break;
BUG_ON(folios->nr && folio_pos(f) != pos);
pos = folio_end_pos(f);
darray_push(folios, f);
}
if (!folios->nr && !ret && (fgp_flags & FGP_CREAT))
ret = -ENOMEM;
return folios->nr ? 0 : ret;
}
/* pagecache_block must be held */
int bch2_write_invalidate_inode_pages_range(struct address_space *mapping,
loff_t start, loff_t end)
{
int ret;
/*
* XXX: the way this is currently implemented, we can spin if a process
* is continually redirtying a specific page
*/
do {
if (!mapping->nrpages)
return 0;
ret = filemap_write_and_wait_range(mapping, start, end);
if (ret)
break;
if (!mapping->nrpages)
return 0;
ret = invalidate_inode_pages2_range(mapping,
start >> PAGE_SHIFT,
end >> PAGE_SHIFT);
} while (ret == -EBUSY);
return ret;
}
static const char * const bch2_folio_sector_states[] = {
#define x(n) #n,
BCH_FOLIO_SECTOR_STATE()
#undef x
NULL
};
static inline enum bch_folio_sector_state
folio_sector_dirty(enum bch_folio_sector_state state)
{
switch (state) {
case SECTOR_unallocated:
return SECTOR_dirty;
case SECTOR_reserved:
return SECTOR_dirty_reserved;
default:
return state;
}
}
static inline enum bch_folio_sector_state
folio_sector_undirty(enum bch_folio_sector_state state)
{
switch (state) {
case SECTOR_dirty:
return SECTOR_unallocated;
case SECTOR_dirty_reserved:
return SECTOR_reserved;
default:
return state;
}
}
static inline enum bch_folio_sector_state
folio_sector_reserve(enum bch_folio_sector_state state)
{
switch (state) {
case SECTOR_unallocated:
return SECTOR_reserved;
case SECTOR_dirty:
return SECTOR_dirty_reserved;
default:
return state;
}
}
/* for newly allocated folios: */
struct bch_folio *__bch2_folio_create(struct folio *folio, gfp_t gfp)
{
struct bch_folio *s;
s = kzalloc(sizeof(*s) +
sizeof(struct bch_folio_sector) *
folio_sectors(folio), gfp);
if (!s)
return NULL;
spin_lock_init(&s->lock);
folio_attach_private(folio, s);
return s;
}
struct bch_folio *bch2_folio_create(struct folio *folio, gfp_t gfp)
{
return bch2_folio(folio) ?: __bch2_folio_create(folio, gfp);
}
static unsigned bkey_to_sector_state(struct bkey_s_c k)
{
if (bkey_extent_is_reservation(k))
return SECTOR_reserved;
if (bkey_extent_is_allocation(k.k))
return SECTOR_allocated;
return SECTOR_unallocated;
}
static void __bch2_folio_set(struct folio *folio,
unsigned pg_offset, unsigned pg_len,
unsigned nr_ptrs, unsigned state)
{
struct bch_folio *s = bch2_folio(folio);
unsigned i, sectors = folio_sectors(folio);
BUG_ON(pg_offset >= sectors);
BUG_ON(pg_offset + pg_len > sectors);
spin_lock(&s->lock);
for (i = pg_offset; i < pg_offset + pg_len; i++) {
s->s[i].nr_replicas = nr_ptrs;
bch2_folio_sector_set(folio, s, i, state);
}
if (i == sectors)
s->uptodate = true;
spin_unlock(&s->lock);
}
/*
* Initialize bch_folio state (allocated/unallocated, nr_replicas) from the
* extents btree:
*/
int bch2_folio_set(struct bch_fs *c, subvol_inum inum,
struct folio **folios, unsigned nr_folios)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bch_folio *s;
u64 offset = folio_sector(folios[0]);
unsigned folio_idx;
u32 snapshot;
bool need_set = false;
int ret;
for (folio_idx = 0; folio_idx < nr_folios; folio_idx++) {
s = bch2_folio_create(folios[folio_idx], GFP_KERNEL);
if (!s)
return -ENOMEM;
need_set |= !s->uptodate;
}
if (!need_set)
return 0;
folio_idx = 0;
bch2_trans_init(&trans, c, 0, 0);
retry:
bch2_trans_begin(&trans);
ret = bch2_subvolume_get_snapshot(&trans, inum.subvol, &snapshot);
if (ret)
goto err;
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
SPOS(inum.inum, offset, snapshot),
BTREE_ITER_SLOTS, k, ret) {
unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
unsigned state = bkey_to_sector_state(k);
while (folio_idx < nr_folios) {
struct folio *folio = folios[folio_idx];
u64 folio_start = folio_sector(folio);
u64 folio_end = folio_end_sector(folio);
unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
folio_start;
unsigned folio_len = min(k.k->p.offset, folio_end) -
folio_offset - folio_start;
BUG_ON(k.k->p.offset < folio_start);
BUG_ON(bkey_start_offset(k.k) > folio_end);
if (!bch2_folio(folio)->uptodate)
__bch2_folio_set(folio, folio_offset, folio_len, nr_ptrs, state);
if (k.k->p.offset < folio_end)
break;
folio_idx++;
}
if (folio_idx == nr_folios)
break;
}
offset = iter.pos.offset;
bch2_trans_iter_exit(&trans, &iter);
err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
bch2_trans_exit(&trans);
return ret;
}
void bch2_bio_page_state_set(struct bio *bio, struct bkey_s_c k)
{
struct bvec_iter iter;
struct folio_vec fv;
unsigned nr_ptrs = k.k->type == KEY_TYPE_reflink_v
? 0 : bch2_bkey_nr_ptrs_fully_allocated(k);
unsigned state = bkey_to_sector_state(k);
bio_for_each_folio(fv, bio, iter)
__bch2_folio_set(fv.fv_folio,
fv.fv_offset >> 9,
fv.fv_len >> 9,
nr_ptrs, state);
}
void bch2_mark_pagecache_unallocated(struct bch_inode_info *inode,
u64 start, u64 end)
{
pgoff_t index = start >> PAGE_SECTORS_SHIFT;
pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
struct folio_batch fbatch;
unsigned i, j;
if (end <= start)
return;
folio_batch_init(&fbatch);
while (filemap_get_folios(inode->v.i_mapping,
&index, end_index, &fbatch)) {
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
u64 folio_start = folio_sector(folio);
u64 folio_end = folio_end_sector(folio);
unsigned folio_offset = max(start, folio_start) - folio_start;
unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
struct bch_folio *s;
BUG_ON(end <= folio_start);
folio_lock(folio);
s = bch2_folio(folio);
if (s) {
spin_lock(&s->lock);
for (j = folio_offset; j < folio_offset + folio_len; j++)
s->s[j].nr_replicas = 0;
spin_unlock(&s->lock);
}
folio_unlock(folio);
}
folio_batch_release(&fbatch);
cond_resched();
}
}
void bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
u64 start, u64 end)
{
struct bch_fs *c = inode->v.i_sb->s_fs_info;
pgoff_t index = start >> PAGE_SECTORS_SHIFT;
pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
struct folio_batch fbatch;
s64 i_sectors_delta = 0;
unsigned i, j;
if (end <= start)
return;
folio_batch_init(&fbatch);
while (filemap_get_folios(inode->v.i_mapping,
&index, end_index, &fbatch)) {
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
u64 folio_start = folio_sector(folio);
u64 folio_end = folio_end_sector(folio);
unsigned folio_offset = max(start, folio_start) - folio_start;
unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
struct bch_folio *s;
BUG_ON(end <= folio_start);
folio_lock(folio);
s = bch2_folio(folio);
if (s) {
spin_lock(&s->lock);
for (j = folio_offset; j < folio_offset + folio_len; j++) {
i_sectors_delta -= s->s[j].state == SECTOR_dirty;
bch2_folio_sector_set(folio, s, j,
folio_sector_reserve(s->s[j].state));
}
spin_unlock(&s->lock);
}
folio_unlock(folio);
}
folio_batch_release(&fbatch);
cond_resched();
}
bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
}
static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
unsigned nr_replicas)
{
return max(0, (int) nr_replicas -
s->nr_replicas -
s->replicas_reserved);
}
int bch2_get_folio_disk_reservation(struct bch_fs *c,
struct bch_inode_info *inode,
struct folio *folio, bool check_enospc)
{
struct bch_folio *s = bch2_folio_create(folio, 0);
unsigned nr_replicas = inode_nr_replicas(c, inode);
struct disk_reservation disk_res = { 0 };
unsigned i, sectors = folio_sectors(folio), disk_res_sectors = 0;
int ret;
if (!s)
return -ENOMEM;
for (i = 0; i < sectors; i++)
disk_res_sectors += sectors_to_reserve(&s->s[i], nr_replicas);
if (!disk_res_sectors)
return 0;
ret = bch2_disk_reservation_get(c, &disk_res,
disk_res_sectors, 1,
!check_enospc
? BCH_DISK_RESERVATION_NOFAIL
: 0);
if (unlikely(ret))
return ret;
for (i = 0; i < sectors; i++)
s->s[i].replicas_reserved +=
sectors_to_reserve(&s->s[i], nr_replicas);
return 0;
}
void bch2_folio_reservation_put(struct bch_fs *c,
struct bch_inode_info *inode,
struct bch2_folio_reservation *res)
{
bch2_disk_reservation_put(c, &res->disk);
bch2_quota_reservation_put(c, inode, &res->quota);
}
int bch2_folio_reservation_get(struct bch_fs *c,
struct bch_inode_info *inode,
struct folio *folio,
struct bch2_folio_reservation *res,
unsigned offset, unsigned len)
{
struct bch_folio *s = bch2_folio_create(folio, 0);
unsigned i, disk_sectors = 0, quota_sectors = 0;
int ret;
if (!s)
return -ENOMEM;
BUG_ON(!s->uptodate);
for (i = round_down(offset, block_bytes(c)) >> 9;
i < round_up(offset + len, block_bytes(c)) >> 9;
i++) {
disk_sectors += sectors_to_reserve(&s->s[i],
res->disk.nr_replicas);
quota_sectors += s->s[i].state == SECTOR_unallocated;
}
if (disk_sectors) {
ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
if (unlikely(ret))
return ret;
}
if (quota_sectors) {
ret = bch2_quota_reservation_add(c, inode, &res->quota,
quota_sectors, true);
if (unlikely(ret)) {
struct disk_reservation tmp = {
.sectors = disk_sectors
};
bch2_disk_reservation_put(c, &tmp);
res->disk.sectors -= disk_sectors;
return ret;
}
}
return 0;
}
static void bch2_clear_folio_bits(struct folio *folio)
{
struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_folio *s = bch2_folio(folio);
struct disk_reservation disk_res = { 0 };
int i, sectors = folio_sectors(folio), dirty_sectors = 0;
if (!s)
return;
EBUG_ON(!folio_test_locked(folio));
EBUG_ON(folio_test_writeback(folio));
for (i = 0; i < sectors; i++) {
disk_res.sectors += s->s[i].replicas_reserved;
s->s[i].replicas_reserved = 0;
dirty_sectors -= s->s[i].state == SECTOR_dirty;
bch2_folio_sector_set(folio, s, i, folio_sector_undirty(s->s[i].state));
}
bch2_disk_reservation_put(c, &disk_res);
bch2_i_sectors_acct(c, inode, NULL, dirty_sectors);
bch2_folio_release(folio);
}
void bch2_set_folio_dirty(struct bch_fs *c,
struct bch_inode_info *inode,
struct folio *folio,
struct bch2_folio_reservation *res,
unsigned offset, unsigned len)
{
struct bch_folio *s = bch2_folio(folio);
unsigned i, dirty_sectors = 0;
WARN_ON((u64) folio_pos(folio) + offset + len >
round_up((u64) i_size_read(&inode->v), block_bytes(c)));
BUG_ON(!s->uptodate);
spin_lock(&s->lock);
for (i = round_down(offset, block_bytes(c)) >> 9;
i < round_up(offset + len, block_bytes(c)) >> 9;
i++) {
unsigned sectors = sectors_to_reserve(&s->s[i],
res->disk.nr_replicas);
/*
* This can happen if we race with the error path in
* bch2_writepage_io_done():
*/
sectors = min_t(unsigned, sectors, res->disk.sectors);
s->s[i].replicas_reserved += sectors;
res->disk.sectors -= sectors;
dirty_sectors += s->s[i].state == SECTOR_unallocated;
bch2_folio_sector_set(folio, s, i, folio_sector_dirty(s->s[i].state));
}
spin_unlock(&s->lock);
bch2_i_sectors_acct(c, inode, &res->quota, dirty_sectors);
if (!folio_test_dirty(folio))
filemap_dirty_folio(inode->v.i_mapping, folio);
}
vm_fault_t bch2_page_fault(struct vm_fault *vmf)
{
struct file *file = vmf->vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct address_space *fdm = faults_disabled_mapping();
struct bch_inode_info *inode = file_bch_inode(file);
vm_fault_t ret;
if (fdm == mapping)
return VM_FAULT_SIGBUS;
/* Lock ordering: */
if (fdm > mapping) {
struct bch_inode_info *fdm_host = to_bch_ei(fdm->host);
if (bch2_pagecache_add_tryget(inode))
goto got_lock;
bch2_pagecache_block_put(fdm_host);
bch2_pagecache_add_get(inode);
bch2_pagecache_add_put(inode);
bch2_pagecache_block_get(fdm_host);
/* Signal that lock has been dropped: */
set_fdm_dropped_locks();
return VM_FAULT_SIGBUS;
}
bch2_pagecache_add_get(inode);
got_lock:
ret = filemap_fault(vmf);
bch2_pagecache_add_put(inode);
return ret;
}
vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
{
struct folio *folio = page_folio(vmf->page);
struct file *file = vmf->vma->vm_file;
struct bch_inode_info *inode = file_bch_inode(file);
struct address_space *mapping = file->f_mapping;
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch2_folio_reservation res;
unsigned len;
loff_t isize;
vm_fault_t ret;
bch2_folio_reservation_init(c, inode, &res);
sb_start_pagefault(inode->v.i_sb);
file_update_time(file);
/*
* Not strictly necessary, but helps avoid dio writes livelocking in
* bch2_write_invalidate_inode_pages_range() - can drop this if/when we get
* a bch2_write_invalidate_inode_pages_range() that works without dropping
* page lock before invalidating page
*/
bch2_pagecache_add_get(inode);
folio_lock(folio);
isize = i_size_read(&inode->v);
if (folio->mapping != mapping || folio_pos(folio) >= isize) {
folio_unlock(folio);
ret = VM_FAULT_NOPAGE;
goto out;
}
len = min_t(loff_t, folio_size(folio), isize - folio_pos(folio));
if (bch2_folio_set(c, inode_inum(inode), &folio, 1) ?:
bch2_folio_reservation_get(c, inode, folio, &res, 0, len)) {
folio_unlock(folio);
ret = VM_FAULT_SIGBUS;
goto out;
}
bch2_set_folio_dirty(c, inode, folio, &res, 0, len);
bch2_folio_reservation_put(c, inode, &res);
folio_wait_stable(folio);
ret = VM_FAULT_LOCKED;
out:
bch2_pagecache_add_put(inode);
sb_end_pagefault(inode->v.i_sb);
return ret;
}
void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
if (offset || length < folio_size(folio))
return;
bch2_clear_folio_bits(folio);
}
bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
{
if (folio_test_dirty(folio) || folio_test_writeback(folio))
return false;
bch2_clear_folio_bits(folio);
return true;
}
/* fseek: */
static int folio_data_offset(struct folio *folio, loff_t pos,
unsigned min_replicas)
{
struct bch_folio *s = bch2_folio(folio);
unsigned i, sectors = folio_sectors(folio);
if (s)
for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
if (s->s[i].state >= SECTOR_dirty &&
s->s[i].nr_replicas + s->s[i].replicas_reserved >= min_replicas)
return i << SECTOR_SHIFT;
return -1;
}
loff_t bch2_seek_pagecache_data(struct inode *vinode,
loff_t start_offset,
loff_t end_offset,
unsigned min_replicas,
bool nonblock)
{
struct folio_batch fbatch;
pgoff_t start_index = start_offset >> PAGE_SHIFT;
pgoff_t end_index = end_offset >> PAGE_SHIFT;
pgoff_t index = start_index;
unsigned i;
loff_t ret;
int offset;
folio_batch_init(&fbatch);
while (filemap_get_folios(vinode->i_mapping,
&index, end_index, &fbatch)) {
for (i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i];
if (!nonblock) {
folio_lock(folio);
} else if (!folio_trylock(folio)) {
folio_batch_release(&fbatch);
return -EAGAIN;
}
offset = folio_data_offset(folio,
max(folio_pos(folio), start_offset),
min_replicas);
if (offset >= 0) {
ret = clamp(folio_pos(folio) + offset,
start_offset, end_offset);
folio_unlock(folio);
folio_batch_release(&fbatch);
return ret;
}
folio_unlock(folio);
}
folio_batch_release(&fbatch);
cond_resched();
}
return end_offset;
}
static int folio_hole_offset(struct address_space *mapping, loff_t *offset,
unsigned min_replicas, bool nonblock)
{
struct folio *folio;
struct bch_folio *s;
unsigned i, sectors;
bool ret = true;
folio = __filemap_get_folio(mapping, *offset >> PAGE_SHIFT,
FGP_LOCK|(nonblock ? FGP_NOWAIT : 0), 0);
if (folio == ERR_PTR(-EAGAIN))
return -EAGAIN;
if (IS_ERR_OR_NULL(folio))
return true;
s = bch2_folio(folio);
if (!s)
goto unlock;
sectors = folio_sectors(folio);
for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
if (s->s[i].state < SECTOR_dirty ||
s->s[i].nr_replicas + s->s[i].replicas_reserved < min_replicas) {
*offset = max(*offset,
folio_pos(folio) + (i << SECTOR_SHIFT));
goto unlock;
}
*offset = folio_end_pos(folio);
ret = false;
unlock:
folio_unlock(folio);
folio_put(folio);
return ret;
}
loff_t bch2_seek_pagecache_hole(struct inode *vinode,
loff_t start_offset,
loff_t end_offset,
unsigned min_replicas,
bool nonblock)
{
struct address_space *mapping = vinode->i_mapping;
loff_t offset = start_offset;
while (offset < end_offset &&
!folio_hole_offset(mapping, &offset, min_replicas, nonblock))
;
return min(offset, end_offset);
}
int bch2_clamp_data_hole(struct inode *inode,
u64 *hole_start,
u64 *hole_end,
unsigned min_replicas,
bool nonblock)
{
loff_t ret;
ret = bch2_seek_pagecache_hole(inode,
*hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9;
if (ret < 0)
return ret;
*hole_start = ret;
if (*hole_start == *hole_end)
return 0;
ret = bch2_seek_pagecache_data(inode,
*hole_start << 9, *hole_end << 9, min_replicas, nonblock) >> 9;
if (ret < 0)
return ret;
*hole_end = ret;
return 0;
}
#endif /* NO_BCACHEFS_FS */

View File

@ -0,0 +1,176 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_FS_IO_PAGECACHE_H
#define _BCACHEFS_FS_IO_PAGECACHE_H
#include <linux/pagemap.h>
typedef DARRAY(struct folio *) folios;
int bch2_filemap_get_contig_folios_d(struct address_space *, loff_t,
u64, int, gfp_t, folios *);
int bch2_write_invalidate_inode_pages_range(struct address_space *, loff_t, loff_t);
/*
* Use u64 for the end pos and sector helpers because if the folio covers the
* max supported range of the mapping, the start offset of the next folio
* overflows loff_t. This breaks much of the range based processing in the
* buffered write path.
*/
static inline u64 folio_end_pos(struct folio *folio)
{
return folio_pos(folio) + folio_size(folio);
}
static inline size_t folio_sectors(struct folio *folio)
{
return PAGE_SECTORS << folio_order(folio);
}
static inline loff_t folio_sector(struct folio *folio)
{
return folio_pos(folio) >> 9;
}
static inline u64 folio_end_sector(struct folio *folio)
{
return folio_end_pos(folio) >> 9;
}
#define BCH_FOLIO_SECTOR_STATE() \
x(unallocated) \
x(reserved) \
x(dirty) \
x(dirty_reserved) \
x(allocated)
enum bch_folio_sector_state {
#define x(n) SECTOR_##n,
BCH_FOLIO_SECTOR_STATE()
#undef x
};
struct bch_folio_sector {
/* Uncompressed, fully allocated replicas (or on disk reservation): */
unsigned nr_replicas:4;
/* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
unsigned replicas_reserved:4;
/* i_sectors: */
enum bch_folio_sector_state state:8;
};
struct bch_folio {
spinlock_t lock;
atomic_t write_count;
/*
* Is the sector state up to date with the btree?
* (Not the data itself)
*/
bool uptodate;
struct bch_folio_sector s[];
};
/* Helper for when we need to add debug instrumentation: */
static inline void bch2_folio_sector_set(struct folio *folio,
struct bch_folio *s,
unsigned i, unsigned n)
{
s->s[i].state = n;
}
/* file offset (to folio offset) to bch_folio_sector index */
static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
{
u64 f_offset = pos - folio_pos(folio);
BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
return f_offset >> SECTOR_SHIFT;
}
/* for newly allocated folios: */
static inline void __bch2_folio_release(struct folio *folio)
{
kfree(folio_detach_private(folio));
}
static inline void bch2_folio_release(struct folio *folio)
{
EBUG_ON(!folio_test_locked(folio));
__bch2_folio_release(folio);
}
static inline struct bch_folio *__bch2_folio(struct folio *folio)
{
return folio_has_private(folio)
? (struct bch_folio *) folio_get_private(folio)
: NULL;
}
static inline struct bch_folio *bch2_folio(struct folio *folio)
{
EBUG_ON(!folio_test_locked(folio));
return __bch2_folio(folio);
}
struct bch_folio *__bch2_folio_create(struct folio *, gfp_t);
struct bch_folio *bch2_folio_create(struct folio *, gfp_t);
struct bch2_folio_reservation {
struct disk_reservation disk;
struct quota_res quota;
};
static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
{
/* XXX: this should not be open coded */
return inode->ei_inode.bi_data_replicas
? inode->ei_inode.bi_data_replicas - 1
: c->opts.data_replicas;
}
static inline void bch2_folio_reservation_init(struct bch_fs *c,
struct bch_inode_info *inode,
struct bch2_folio_reservation *res)
{
memset(res, 0, sizeof(*res));
res->disk.nr_replicas = inode_nr_replicas(c, inode);
}
int bch2_folio_set(struct bch_fs *, subvol_inum, struct folio **, unsigned);
void bch2_bio_page_state_set(struct bio *, struct bkey_s_c);
void bch2_mark_pagecache_unallocated(struct bch_inode_info *, u64, u64);
void bch2_mark_pagecache_reserved(struct bch_inode_info *, u64, u64);
int bch2_get_folio_disk_reservation(struct bch_fs *,
struct bch_inode_info *,
struct folio *, bool);
void bch2_folio_reservation_put(struct bch_fs *,
struct bch_inode_info *,
struct bch2_folio_reservation *);
int bch2_folio_reservation_get(struct bch_fs *,
struct bch_inode_info *,
struct folio *,
struct bch2_folio_reservation *,
unsigned, unsigned);
void bch2_set_folio_dirty(struct bch_fs *,
struct bch_inode_info *,
struct folio *,
struct bch2_folio_reservation *,
unsigned, unsigned);
vm_fault_t bch2_page_fault(struct vm_fault *);
vm_fault_t bch2_page_mkwrite(struct vm_fault *);
void bch2_invalidate_folio(struct folio *, size_t, size_t);
bool bch2_release_folio(struct folio *, gfp_t);
loff_t bch2_seek_pagecache_data(struct inode *, loff_t, loff_t, unsigned, bool);
loff_t bch2_seek_pagecache_hole(struct inode *, loff_t, loff_t, unsigned, bool);
int bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned, bool);
#endif /* _BCACHEFS_FS_IO_PAGECACHE_H */

File diff suppressed because it is too large Load Diff

View File

@ -5,29 +5,164 @@
#ifndef NO_BCACHEFS_FS
#include "buckets.h"
#include "fs.h"
#include "io_types.h"
#include "quota.h"
#include <linux/uio.h>
struct quota_res;
struct folio_vec {
struct folio *fv_folio;
size_t fv_offset;
size_t fv_len;
};
static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
{
struct folio *folio = page_folio(bv.bv_page);
size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
bv.bv_offset;
size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
return (struct folio_vec) {
.fv_folio = folio,
.fv_offset = offset,
.fv_len = len,
};
}
static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
struct bvec_iter iter)
{
return biovec_to_foliovec(bio_iter_iovec(bio, iter));
}
#define __bio_for_each_folio(bvl, bio, iter, start) \
for (iter = (start); \
(iter).bi_size && \
((bvl = bio_iter_iovec_folio((bio), (iter))), 1); \
bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
/**
* bio_for_each_folio - iterate over folios within a bio
*
* Like other non-_all versions, this iterates over what bio->bi_iter currently
* points to. This version is for drivers, where the bio may have previously
* been split or cloned.
*/
#define bio_for_each_folio(bvl, bio, iter) \
__bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
struct quota_res {
u64 sectors;
};
#ifdef CONFIG_BCACHEFS_QUOTA
static inline void __bch2_quota_reservation_put(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res)
{
BUG_ON(res->sectors > inode->ei_quota_reserved);
bch2_quota_acct(c, inode->ei_qid, Q_SPC,
-((s64) res->sectors), KEY_TYPE_QUOTA_PREALLOC);
inode->ei_quota_reserved -= res->sectors;
res->sectors = 0;
}
static inline void bch2_quota_reservation_put(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res)
{
if (res->sectors) {
mutex_lock(&inode->ei_quota_lock);
__bch2_quota_reservation_put(c, inode, res);
mutex_unlock(&inode->ei_quota_lock);
}
}
static inline int bch2_quota_reservation_add(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res,
u64 sectors,
bool check_enospc)
{
int ret;
if (test_bit(EI_INODE_SNAPSHOT, &inode->ei_flags))
return 0;
mutex_lock(&inode->ei_quota_lock);
ret = bch2_quota_acct(c, inode->ei_qid, Q_SPC, sectors,
check_enospc ? KEY_TYPE_QUOTA_PREALLOC : KEY_TYPE_QUOTA_NOCHECK);
if (likely(!ret)) {
inode->ei_quota_reserved += sectors;
res->sectors += sectors;
}
mutex_unlock(&inode->ei_quota_lock);
return ret;
}
#else
static inline void __bch2_quota_reservation_put(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res) {}
static inline void bch2_quota_reservation_put(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res) {}
static inline int bch2_quota_reservation_add(struct bch_fs *c,
struct bch_inode_info *inode,
struct quota_res *res,
unsigned sectors,
bool check_enospc)
{
return 0;
}
#endif
void __bch2_i_sectors_acct(struct bch_fs *, struct bch_inode_info *,
struct quota_res *, s64);
static inline void bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
struct quota_res *quota_res, s64 sectors)
{
if (sectors) {
mutex_lock(&inode->ei_quota_lock);
__bch2_i_sectors_acct(c, inode, quota_res, sectors);
mutex_unlock(&inode->ei_quota_lock);
}
}
static inline struct address_space *faults_disabled_mapping(void)
{
return (void *) (((unsigned long) current->faults_disabled_mapping) & ~1UL);
}
static inline void set_fdm_dropped_locks(void)
{
current->faults_disabled_mapping =
(void *) (((unsigned long) current->faults_disabled_mapping)|1);
}
static inline bool fdm_dropped_locks(void)
{
return ((unsigned long) current->faults_disabled_mapping) & 1;
}
void bch2_inode_flush_nocow_writes_async(struct bch_fs *,
struct bch_inode_info *, struct closure *);
int __must_check bch2_write_inode_size(struct bch_fs *,
struct bch_inode_info *,
loff_t, unsigned);
int bch2_read_folio(struct file *, struct folio *);
int bch2_writepages(struct address_space *, struct writeback_control *);
void bch2_readahead(struct readahead_control *);
int bch2_write_begin(struct file *, struct address_space *, loff_t,
unsigned, struct page **, void **);
int bch2_write_end(struct file *, struct address_space *, loff_t,
unsigned, unsigned, struct page *, void *);
ssize_t bch2_read_iter(struct kiocb *, struct iov_iter *);
ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *);
int bch2_fsync(struct file *, loff_t, loff_t, int);
int bch2_truncate(struct mnt_idmap *,
@ -39,11 +174,6 @@ loff_t bch2_remap_file_range(struct file *, loff_t, struct file *,
loff_t bch2_llseek(struct file *, loff_t, int);
vm_fault_t bch2_page_fault(struct vm_fault *);
vm_fault_t bch2_page_mkwrite(struct vm_fault *);
void bch2_invalidate_folio(struct folio *, size_t, size_t);
bool bch2_release_folio(struct folio *, gfp_t);
void bch2_fs_fsio_exit(struct bch_fs *);
int bch2_fs_fsio_init(struct bch_fs *);
#else

View File

@ -14,6 +14,9 @@
#include "fs-common.h"
#include "fs-io.h"
#include "fs-ioctl.h"
#include "fs-io-buffered.h"
#include "fs-io-direct.h"
#include "fs-io-pagecache.h"
#include "fsck.h"
#include "inode.h"
#include "io.h"

View File

@ -30,6 +30,8 @@
#include "error.h"
#include "fs.h"
#include "fs-io.h"
#include "fs-io-buffered.h"
#include "fs-io-direct.h"
#include "fsck.h"
#include "inode.h"
#include "io.h"
@ -469,6 +471,8 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c);
bch2_fs_quota_exit(c);
bch2_fs_fs_io_direct_exit(c);
bch2_fs_fs_io_buffered_exit(c);
bch2_fs_fsio_exit(c);
bch2_fs_ec_exit(c);
bch2_fs_encryption_exit(c);
@ -842,7 +846,9 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_fs_encryption_init(c) ?:
bch2_fs_compress_init(c) ?:
bch2_fs_ec_init(c) ?:
bch2_fs_fsio_init(c);
bch2_fs_fsio_init(c) ?:
bch2_fs_fs_io_buffered_init(c);
bch2_fs_fs_io_direct_init(c);
if (ret)
goto err;