bcachefs: Subvolumes, snapshots

This patch adds subvolume.c - support for the subvolumes and snapshots
btrees and related data types and on disk data structures. The next
patches will start hooking up this new code to existing code.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2021-03-16 00:42:25 -04:00 committed by Kent Overstreet
parent 8948fc8f15
commit 14b393ee76
17 changed files with 1314 additions and 24 deletions

View file

@ -50,6 +50,7 @@ bcachefs-y := \
replicas.o \
siphash.o \
six.o \
subvolume.o \
super.o \
super-io.o \
sysfs.o \

View file

@ -381,6 +381,8 @@ enum gc_phase {
GC_PHASE_BTREE_alloc,
GC_PHASE_BTREE_quotas,
GC_PHASE_BTREE_reflink,
GC_PHASE_BTREE_subvolumes,
GC_PHASE_BTREE_snapshots,
GC_PHASE_PENDING_DELETE,
};
@ -564,6 +566,21 @@ struct btree_path_buf {
#define REPLICAS_DELTA_LIST_MAX (1U << 16)
struct snapshot_t {
u32 parent;
u32 children[2];
u32 subvol; /* Nonzero only if a subvolume points to this node: */
u32 equiv;
};
typedef struct {
u32 subvol;
u64 inum;
} subvol_inum;
#define BCACHEFS_ROOT_SUBVOL_INUM \
((subvol_inum) { BCACHEFS_ROOT_SUBVOL, BCACHEFS_ROOT_INO })
struct bch_fs {
struct closure cl;
@ -635,6 +652,12 @@ struct bch_fs {
struct closure sb_write;
struct mutex sb_lock;
/* snapshot.c: */
GENRADIX(struct snapshot_t) snapshots;
struct bch_snapshot_table __rcu *snapshot_table;
struct mutex snapshot_table_lock;
struct work_struct snapshot_delete_work;
/* BTREE CACHE */
struct bio_set btree_bio;
struct workqueue_struct *io_complete_wq;

View file

@ -346,7 +346,9 @@ static inline void bkey_init(struct bkey *k)
x(inline_data, 17) \
x(btree_ptr_v2, 18) \
x(indirect_inline_data, 19) \
x(alloc_v2, 20)
x(alloc_v2, 20) \
x(subvolume, 21) \
x(snapshot, 22)
enum bch_bkey_type {
#define x(name, nr) KEY_TYPE_##name = nr,
@ -690,6 +692,10 @@ struct bch_inode_generation {
__le32 pad;
} __attribute__((packed, aligned(8)));
/*
* bi_subvol and bi_parent_subvol are only set for subvolume roots:
*/
#define BCH_INODE_FIELDS() \
x(bi_atime, 96) \
x(bi_ctime, 96) \
@ -713,7 +719,9 @@ struct bch_inode_generation {
x(bi_erasure_code, 16) \
x(bi_fields_set, 16) \
x(bi_dir, 64) \
x(bi_dir_offset, 64)
x(bi_dir_offset, 64) \
x(bi_subvol, 32) \
x(bi_parent_subvol, 32)
/* subset of BCH_INODE_FIELDS */
#define BCH_INODE_OPTS() \
@ -796,6 +804,9 @@ struct bch_dirent {
__u8 d_name[];
} __attribute__((packed, aligned(8)));
#define DT_SUBVOL 16
#define BCH_DT_MAX 17
#define BCH_NAME_MAX (U8_MAX * sizeof(u64) - \
sizeof(struct bkey) - \
offsetof(struct bch_dirent, d_name))
@ -932,6 +943,42 @@ struct bch_inline_data {
u8 data[0];
};
/* Subvolumes: */
#define SUBVOL_POS_MIN POS(0, 1)
#define SUBVOL_POS_MAX POS(0, S32_MAX)
#define BCACHEFS_ROOT_SUBVOL 1
struct bch_subvolume {
struct bch_val v;
__le32 flags;
__le32 snapshot;
__le64 inode;
};
LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1)
/*
* We need to know whether a subvolume is a snapshot so we can know whether we
* can delete it (or whether it should just be rm -rf'd)
*/
LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2)
/* Snapshots */
struct bch_snapshot {
struct bch_val v;
__le32 flags;
__le32 parent;
__le32 children[2];
__le32 subvol;
__le32 pad;
};
LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
/* True if a subvolume points to this snapshot node: */
LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
/* Optional/variable size superblock sections: */
struct bch_sb_field {
@ -1702,7 +1749,9 @@ LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
x(alloc, 4) \
x(quotas, 5) \
x(stripes, 6) \
x(reflink, 7)
x(reflink, 7) \
x(subvolumes, 8) \
x(snapshots, 9)
enum btree_id {
#define x(kwd, val) BTREE_ID_##kwd = val,

View file

@ -78,6 +78,9 @@ struct bch_ioctl_incremental {
#define BCH_IOCTL_DISK_RESIZE _IOW(0xbc, 14, struct bch_ioctl_disk_resize)
#define BCH_IOCTL_DISK_RESIZE_JOURNAL _IOW(0xbc,15, struct bch_ioctl_disk_resize_journal)
#define BCH_IOCTL_SUBVOLUME_CREATE _IOW(0xbc, 16, struct bch_ioctl_subvolume)
#define BCH_IOCTL_SUBVOLUME_DESTROY _IOW(0xbc, 17, struct bch_ioctl_subvolume)
/* ioctl below act on a particular file, not the filesystem as a whole: */
#define BCHFS_IOC_REINHERIT_ATTRS _IOR(0xbc, 64, const char __user *)
@ -349,4 +352,16 @@ struct bch_ioctl_disk_resize_journal {
__u64 nbuckets;
};
struct bch_ioctl_subvolume {
__u32 flags;
__u32 dirfd;
__u16 mode;
__u16 pad[3];
__u64 dst_ptr;
__u64 src_ptr;
};
#define BCH_SUBVOL_SNAPSHOT_CREATE (1U << 0)
#define BCH_SUBVOL_SNAPSHOT_RO (1U << 1)
#endif /* _BCACHEFS_IOCTL_H */

View file

@ -11,6 +11,7 @@
#include "inode.h"
#include "quota.h"
#include "reflink.h"
#include "subvolume.h"
#include "xattr.h"
const char * const bch2_bkey_types[] = {
@ -126,6 +127,10 @@ static unsigned bch2_key_types_allowed[] = {
[BKEY_TYPE_reflink] =
(1U << KEY_TYPE_reflink_v)|
(1U << KEY_TYPE_indirect_inline_data),
[BKEY_TYPE_subvolumes] =
(1U << KEY_TYPE_subvolume),
[BKEY_TYPE_snapshots] =
(1U << KEY_TYPE_snapshot),
[BKEY_TYPE_btree] =
(1U << KEY_TYPE_btree_ptr)|
(1U << KEY_TYPE_btree_ptr_v2),

View file

@ -164,6 +164,11 @@ btree_key_cache_create(struct btree_key_cache *c,
was_new = false;
}
if (btree_id == BTREE_ID_subvolumes)
six_lock_pcpu_alloc(&ck->c.lock);
else
six_lock_pcpu_free(&ck->c.lock);
ck->c.level = 0;
ck->c.btree_id = btree_id;
ck->key.btree_id = btree_id;

View file

@ -606,7 +606,8 @@ static inline bool btree_node_is_extents(struct btree *b)
#define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \
((1U << BKEY_TYPE_alloc)| \
(1U << BKEY_TYPE_stripes))
(1U << BKEY_TYPE_stripes)| \
(1U << BKEY_TYPE_snapshots))
#define BTREE_NODE_TYPE_HAS_TRIGGERS \
(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
@ -653,7 +654,8 @@ enum btree_update_flags {
#define BTREE_TRIGGER_WANTS_OLD_AND_NEW \
((1U << KEY_TYPE_stripe)| \
(1U << KEY_TYPE_inode))
(1U << KEY_TYPE_inode)| \
(1U << KEY_TYPE_snapshot))
static inline bool btree_node_type_needs_gc(enum btree_node_type type)
{
@ -670,11 +672,6 @@ struct btree_root {
s8 error;
};
/*
* Optional hook that will be called just prior to a btree node update, when
* we're holding the write lock and we know what key is about to be overwritten:
*/
enum btree_insert_ret {
BTREE_INSERT_OK,
/* leaf node needs to be split */
@ -695,8 +692,4 @@ enum btree_node_sibling {
btree_next_sib,
};
typedef struct btree_nr_keys (*sort_fix_overlapping_fn)(struct bset *,
struct btree *,
struct btree_node_iter *);
#endif /* _BCACHEFS_BTREE_TYPES_H */

View file

@ -15,6 +15,7 @@
#include "journal.h"
#include "journal_reclaim.h"
#include "keylist.h"
#include "subvolume.h"
#include "replicas.h"
#include "trace.h"
@ -245,6 +246,11 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
BUG_ON(i->cached != i->path->cached);
BUG_ON(i->level != i->path->level);
BUG_ON(i->btree_id != i->path->btree_id);
EBUG_ON(!i->level &&
!(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
i->k->k.p.snapshot &&
bch2_snapshot_internal_node(trans->c, i->k->k.p.snapshot));
}
static noinline int

View file

@ -16,6 +16,7 @@
#include "movinggc.h"
#include "reflink.h"
#include "replicas.h"
#include "subvolume.h"
#include "trace.h"
#include <linux/preempt.h>
@ -1204,6 +1205,8 @@ static int bch2_mark_key_locked(struct bch_fs *c,
return bch2_mark_reservation(c, old, new, journal_seq, flags);
case KEY_TYPE_reflink_p:
return bch2_mark_reflink_p(c, old, new, journal_seq, flags);
case KEY_TYPE_snapshot:
return bch2_mark_snapshot(c, old, new, journal_seq, flags);
default:
return 0;
}

View file

@ -99,7 +99,8 @@ const char *bch2_dirent_invalid(const struct bch_fs *c, struct bkey_s_c k)
if (memchr(d.v->d_name, '/', len))
return "invalid name";
if (le64_to_cpu(d.v->d_inum) == d.k->p.inode)
if (d.v->d_type != DT_SUBVOL &&
le64_to_cpu(d.v->d_inum) == d.k->p.inode)
return "dirent points to own directory";
return NULL;
@ -113,7 +114,7 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c,
bch_scnmemcpy(out, d.v->d_name,
bch2_dirent_name_bytes(d));
pr_buf(out, " -> %llu type %s", d.v->d_inum,
d.v->d_type < DT_MAX
d.v->d_type < BCH_DT_MAX
? bch2_d_types[d.v->d_type]
: "(bad d_type)");
}

View file

@ -9,6 +9,7 @@
#include "fsck.h"
#include "inode.h"
#include "keylist.h"
#include "subvolume.h"
#include "super.h"
#include "xattr.h"
@ -1410,7 +1411,8 @@ int bch2_fsck_full(struct bch_fs *c)
{
struct bch_inode_unpacked root_inode;
return check_inodes(c, true) ?:
return bch2_fs_snapshots_check(c) ?:
check_inodes(c, true) ?:
check_extents(c) ?:
check_dirents(c) ?:
check_xattrs(c) ?:

View file

@ -8,6 +8,7 @@
#include "extents.h"
#include "inode.h"
#include "str_hash.h"
#include "subvolume.h"
#include "varint.h"
#include <linux/random.h>
@ -340,8 +341,8 @@ int bch2_inode_write(struct btree_trans *trans,
const char *bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
struct bch_inode_unpacked unpacked;
struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
struct bch_inode_unpacked unpacked;
if (k.k->p.inode)
return "nonzero k.p.inode";
@ -368,6 +369,9 @@ const char *bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k)
unpacked.bi_nlink != 0)
return "flagged as unlinked but bi_nlink != 0";
if (unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode))
return "subvolume root but not a directory";
return NULL;
}
@ -635,6 +639,13 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
bch2_inode_unpack(bkey_s_c_to_inode(k), &inode_u);
/* Subvolume root? */
if (inode_u.bi_subvol) {
ret = bch2_subvolume_delete(&trans, inode_u.bi_subvol, -1);
if (ret)
goto err;
}
bkey_inode_generation_init(&delete.k_i);
delete.k.p = iter.pos;
delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);

View file

@ -63,7 +63,7 @@ const char * const bch2_member_states[] = {
#undef x
const char * const bch2_d_types[DT_MAX] = {
const char * const bch2_d_types[BCH_DT_MAX] = {
[DT_UNKNOWN] = "unknown",
[DT_FIFO] = "fifo",
[DT_CHR] = "chr",
@ -73,6 +73,7 @@ const char * const bch2_d_types[DT_MAX] = {
[DT_LNK] = "lnk",
[DT_SOCK] = "sock",
[DT_WHT] = "whiteout",
[DT_SUBVOL] = "subvol",
};
void bch2_opts_apply(struct bch_opts *dst, struct bch_opts src)

View file

@ -20,6 +20,7 @@
#include "quota.h"
#include "recovery.h"
#include "replicas.h"
#include "subvolume.h"
#include "super-io.h"
#include <linux/sort.h>
@ -961,6 +962,81 @@ static int read_btree_roots(struct bch_fs *c)
return ret;
}
static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
{
struct bkey_i_snapshot root_snapshot;
struct bkey_i_subvolume root_volume;
int ret;
bkey_snapshot_init(&root_snapshot.k_i);
root_snapshot.k.p.offset = U32_MAX;
root_snapshot.v.flags = 0;
root_snapshot.v.parent = 0;
root_snapshot.v.subvol = BCACHEFS_ROOT_SUBVOL;
root_snapshot.v.pad = 0;
SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
ret = bch2_btree_insert(c, BTREE_ID_snapshots,
&root_snapshot.k_i,
NULL, NULL, 0);
if (ret)
return ret;
bkey_subvolume_init(&root_volume.k_i);
root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
root_volume.v.flags = 0;
root_volume.v.snapshot = cpu_to_le32(U32_MAX);
root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
ret = bch2_btree_insert(c, BTREE_ID_subvolumes,
&root_volume.k_i,
NULL, NULL, 0);
if (ret)
return ret;
return 0;
}
static int bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct bch_inode_unpacked inode;
struct bkey_inode_buf *packed;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
POS(0, BCACHEFS_ROOT_INO), 0);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
if (k.k->type != KEY_TYPE_inode) {
bch_err(c, "root inode not found");
ret = -ENOENT;
goto err;
}
ret = bch2_inode_unpack(bkey_s_c_to_inode(k), &inode);
BUG_ON(ret);
inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
packed = bch2_trans_kmalloc(trans, sizeof(*packed));
ret = PTR_ERR_OR_ZERO(packed);
if (ret)
goto err;
bch2_inode_pack(c, packed, &inode);
ret = bch2_trans_update(trans, &iter, &packed->inode.k_i, 0);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
int bch2_fs_recovery(struct bch_fs *c)
{
const char *err = "cannot allocate memory";
@ -1017,11 +1093,12 @@ int bch2_fs_recovery(struct bch_fs *c)
c->opts.version_upgrade = true;
c->opts.fsck = true;
c->opts.fix_errors = FSCK_OPT_YES;
}
if (c->sb.version < bcachefs_metadata_version_btree_ptr_sectors_written) {
} else if (c->sb.version < bcachefs_metadata_version_btree_ptr_sectors_written) {
bch_info(c, "version prior to btree_ptr_sectors_written, upgrade required");
c->opts.version_upgrade = true;
} else if (c->sb.version < bcachefs_metadata_version_snapshot) {
bch_info(c, "filesystem version is prior to snapshot field - upgrading");
c->opts.version_upgrade = true;
}
ret = bch2_blacklist_table_initialize(c);
@ -1190,6 +1267,29 @@ int bch2_fs_recovery(struct bch_fs *c)
bch_verbose(c, "alloc write done");
}
if (c->sb.version < bcachefs_metadata_version_snapshot) {
err = "error creating root snapshot node";
ret = bch2_fs_initialize_subvolumes(c);
if (ret)
goto err;
}
bch_verbose(c, "reading snapshots table");
err = "error reading snapshots table";
ret = bch2_fs_snapshots_start(c);
if (ret)
goto err;
bch_verbose(c, "reading snapshots done");
if (c->sb.version < bcachefs_metadata_version_snapshot) {
/* set bi_subvol on root inode */
err = "error upgrade root inode for subvolumes";
ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
bch2_fs_upgrade_for_subvolumes(&trans));
if (ret)
goto err;
}
if (c->opts.fsck) {
bch_info(c, "starting fsck");
err = "error in fsck";
@ -1350,9 +1450,22 @@ int bch2_fs_initialize(struct bch_fs *c)
}
}
err = "error creating root snapshot node";
ret = bch2_fs_initialize_subvolumes(c);
if (ret)
goto err;
bch_verbose(c, "reading snapshots table");
err = "error reading snapshots table";
ret = bch2_fs_snapshots_start(c);
if (ret)
goto err;
bch_verbose(c, "reading snapshots done");
bch2_inode_init(c, &root_inode, 0, 0,
S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
root_inode.bi_inum = BCACHEFS_ROOT_INO;
root_inode.bi_inum = BCACHEFS_ROOT_INO;
root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
bch2_inode_pack(c, &packed_inode, &root_inode);
packed_inode.inode.k.p.snapshot = U32_MAX;

981
fs/bcachefs/subvolume.c Normal file
View file

@ -0,0 +1,981 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "btree_key_cache.h"
#include "btree_update.h"
#include "error.h"
#include "subvolume.h"
/* Snapshot tree: */
static void bch2_delete_dead_snapshots_work(struct work_struct *);
static void bch2_delete_dead_snapshots(struct bch_fs *);
void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
pr_buf(out, "is_subvol %llu deleted %llu parent %u children %u %u subvol %u",
BCH_SNAPSHOT_SUBVOL(s.v),
BCH_SNAPSHOT_DELETED(s.v),
le32_to_cpu(s.v->parent),
le32_to_cpu(s.v->children[0]),
le32_to_cpu(s.v->children[1]),
le32_to_cpu(s.v->subvol));
}
const char *bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_s_c_snapshot s;
u32 i, id;
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0 ||
bkey_cmp(k.k->p, POS(0, 1)) < 0)
return "bad pos";
if (bkey_val_bytes(k.k) != sizeof(struct bch_snapshot))
return "bad val size";
s = bkey_s_c_to_snapshot(k);
id = le32_to_cpu(s.v->parent);
if (id && id <= k.k->p.offset)
return "bad parent node";
if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]))
return "children not normalized";
if (s.v->children[0] &&
s.v->children[0] == s.v->children[1])
return "duplicate child nodes";
for (i = 0; i < 2; i++) {
id = le32_to_cpu(s.v->children[i]);
if (id >= k.k->p.offset)
return "bad child node";
}
return NULL;
}
int bch2_mark_snapshot(struct bch_fs *c,
struct bkey_s_c old, struct bkey_s_c new,
u64 journal_seq, unsigned flags)
{
struct snapshot_t *t;
t = genradix_ptr_alloc(&c->snapshots,
U32_MAX - new.k->p.offset,
GFP_KERNEL);
if (!t)
return -ENOMEM;
if (new.k->type == KEY_TYPE_snapshot) {
struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
t->parent = le32_to_cpu(s.v->parent);
t->children[0] = le32_to_cpu(s.v->children[0]);
t->children[1] = le32_to_cpu(s.v->children[1]);
t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
} else {
t->parent = 0;
t->children[0] = 0;
t->children[1] = 0;
t->subvol = 0;
}
return 0;
}
static int subvol_lookup(struct btree_trans *trans, unsigned id, struct bch_subvolume *s)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes, POS(0, id), 0);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k) ?: k.k->type == KEY_TYPE_subvolume ? 0 : -ENOENT;
if (!ret)
*s = *bkey_s_c_to_subvolume(k).v;
bch2_trans_iter_exit(trans, &iter);
return ret;
}
static int snapshot_lookup(struct btree_trans *trans, u32 id,
struct bch_snapshot *s)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
BTREE_ITER_WITH_UPDATES);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k) ?: k.k->type == KEY_TYPE_snapshot ? 0 : -ENOENT;
if (!ret)
*s = *bkey_s_c_to_snapshot(k).v;
bch2_trans_iter_exit(trans, &iter);
return ret;
}
static int snapshot_live(struct btree_trans *trans, u32 id)
{
struct bch_snapshot v;
int ret;
if (!id)
return 0;
ret = lockrestart_do(trans, snapshot_lookup(trans, id, &v));
if (ret == -ENOENT)
bch_err(trans->c, "snapshot node %u not found", id);
if (ret)
return ret;
return !BCH_SNAPSHOT_DELETED(&v);
}
static int bch2_snapshots_set_equiv(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_s_c_snapshot snap;
unsigned i;
int ret;
for_each_btree_key(trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k, ret) {
u32 id = k.k->p.offset, child[2];
unsigned nr_live = 0, live_idx;
if (k.k->type != KEY_TYPE_snapshot)
continue;
snap = bkey_s_c_to_snapshot(k);
child[0] = le32_to_cpu(snap.v->children[0]);
child[1] = le32_to_cpu(snap.v->children[1]);
for (i = 0; i < 2; i++) {
ret = snapshot_live(trans, child[i]);
if (ret < 0)
break;
if (ret)
live_idx = i;
nr_live += ret;
}
snapshot_t(c, id)->equiv = nr_live == 1
? snapshot_t(c, child[live_idx])->equiv
: id;
}
bch2_trans_iter_exit(trans, &iter);
if (ret)
bch_err(c, "error walking snapshots: %i", ret);
return ret;
}
/* fsck: */
static int bch2_snapshot_check(struct btree_trans *trans,
struct bkey_s_c_snapshot s)
{
struct bch_subvolume subvol;
struct bch_snapshot v;
u32 i, id;
int ret;
id = le32_to_cpu(s.v->subvol);
ret = lockrestart_do(trans, subvol_lookup(trans, id, &subvol));
if (ret == -ENOENT)
bch_err(trans->c, "snapshot node %llu has nonexistent subvolume %u",
s.k->p.offset, id);
if (ret)
return ret;
if (BCH_SNAPSHOT_SUBVOL(s.v) != (le32_to_cpu(subvol.snapshot) == s.k->p.offset)) {
bch_err(trans->c, "snapshot node %llu has wrong BCH_SNAPSHOT_SUBVOL",
s.k->p.offset);
return -EINVAL;
}
id = le32_to_cpu(s.v->parent);
if (id) {
ret = lockrestart_do(trans, snapshot_lookup(trans, id, &v));
if (ret == -ENOENT)
bch_err(trans->c, "snapshot node %llu has nonexistent parent %u",
s.k->p.offset, id);
if (ret)
return ret;
if (le32_to_cpu(v.children[0]) != s.k->p.offset &&
le32_to_cpu(v.children[1]) != s.k->p.offset) {
bch_err(trans->c, "snapshot parent %u missing pointer to child %llu",
id, s.k->p.offset);
return -EINVAL;
}
}
for (i = 0; i < 2 && s.v->children[i]; i++) {
id = le32_to_cpu(s.v->children[i]);
ret = lockrestart_do(trans, snapshot_lookup(trans, id, &v));
if (ret == -ENOENT)
bch_err(trans->c, "snapshot node %llu has nonexistent child %u",
s.k->p.offset, id);
if (ret)
return ret;
if (le32_to_cpu(v.parent) != s.k->p.offset) {
bch_err(trans->c, "snapshot child %u has wrong parent (got %u should be %llu)",
id, le32_to_cpu(v.parent), s.k->p.offset);
return -EINVAL;
}
}
return 0;
}
int bch2_fs_snapshots_check(struct bch_fs *c)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bch_snapshot s;
unsigned id;
int ret;
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_snapshot)
continue;
ret = bch2_snapshot_check(&trans, bkey_s_c_to_snapshot(k));
if (ret)
break;
}
bch2_trans_iter_exit(&trans, &iter);
if (ret) {
bch_err(c, "error %i checking snapshots", ret);
goto err;
}
for_each_btree_key(&trans, iter, BTREE_ID_subvolumes,
POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_subvolume)
continue;
again_2:
id = le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot);
ret = snapshot_lookup(&trans, id, &s);
if (ret == -EINTR) {
k = bch2_btree_iter_peek(&iter);
goto again_2;
} else if (ret == -ENOENT)
bch_err(c, "subvolume %llu points to nonexistent snapshot %u",
k.k->p.offset, id);
else if (ret)
break;
}
bch2_trans_iter_exit(&trans, &iter);
err:
bch2_trans_exit(&trans);
return ret;
}
void bch2_fs_snapshots_exit(struct bch_fs *c)
{
genradix_free(&c->snapshots);
}
int bch2_fs_snapshots_start(struct bch_fs *c)
{
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
bool have_deleted = false;
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k, ret) {
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0)
break;
if (k.k->type != KEY_TYPE_snapshot) {
bch_err(c, "found wrong key type %u in snapshot node table",
k.k->type);
continue;
}
if (BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v))
have_deleted = true;
ret = bch2_mark_snapshot(c, bkey_s_c_null, k, 0, 0);
if (ret)
break;
}
bch2_trans_iter_exit(&trans, &iter);
if (ret)
goto err;
ret = bch2_snapshots_set_equiv(&trans);
if (ret)
goto err;
err:
bch2_trans_exit(&trans);
if (!ret && have_deleted) {
bch_info(c, "restarting deletion of dead snapshots");
if (c->opts.fsck) {
bch2_delete_dead_snapshots_work(&c->snapshot_delete_work);
} else {
bch2_delete_dead_snapshots(c);
}
}
return ret;
}
/*
* Mark a snapshot as deleted, for future cleanup:
*/
static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
{
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_i_snapshot *s;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
if (k.k->type != KEY_TYPE_snapshot) {
bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
ret = -ENOENT;
goto err;
}
/* already deleted? */
if (BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v))
goto err;
s = bch2_trans_kmalloc(trans, sizeof(*s));
ret = PTR_ERR_OR_ZERO(s);
if (ret)
goto err;
bkey_reassemble(&s->k_i, k);
SET_BCH_SNAPSHOT_DELETED(&s->v, true);
ret = bch2_trans_update(trans, &iter, &s->k_i, 0);
if (ret)
goto err;
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
{
struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
struct bkey_s_c k;
struct bkey_s_c_snapshot s;
struct bkey_i_snapshot *parent;
u32 parent_id;
unsigned i;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
if (k.k->type != KEY_TYPE_snapshot) {
bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
ret = -ENOENT;
goto err;
}
s = bkey_s_c_to_snapshot(k);
BUG_ON(!BCH_SNAPSHOT_DELETED(s.v));
parent_id = le32_to_cpu(s.v->parent);
if (parent_id) {
bch2_trans_iter_init(trans, &p_iter, BTREE_ID_snapshots,
POS(0, parent_id),
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&p_iter);
ret = bkey_err(k);
if (ret)
goto err;
if (k.k->type != KEY_TYPE_snapshot) {
bch2_fs_inconsistent(trans->c, "missing snapshot %u", parent_id);
ret = -ENOENT;
goto err;
}
parent = bch2_trans_kmalloc(trans, sizeof(*parent));
ret = PTR_ERR_OR_ZERO(parent);
if (ret)
goto err;
bkey_reassemble(&parent->k_i, k);
for (i = 0; i < 2; i++)
if (le32_to_cpu(parent->v.children[i]) == id)
break;
if (i == 2)
bch_err(trans->c, "snapshot %u missing child pointer to %u",
parent_id, id);
else
parent->v.children[i] = 0;
if (le32_to_cpu(parent->v.children[0]) <
le32_to_cpu(parent->v.children[1]))
swap(parent->v.children[0],
parent->v.children[1]);
ret = bch2_trans_update(trans, &p_iter, &parent->k_i, 0);
if (ret)
goto err;
}
ret = bch2_btree_delete_at(trans, &iter, 0);
err:
bch2_trans_iter_exit(trans, &p_iter);
bch2_trans_iter_exit(trans, &iter);
return ret;
}
static int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
u32 *new_snapids,
u32 *snapshot_subvols,
unsigned nr_snapids)
{
struct btree_iter iter;
struct bkey_i_snapshot *n;
struct bkey_s_c k;
unsigned i;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
POS_MIN, BTREE_ITER_INTENT);
k = bch2_btree_iter_peek(&iter);
ret = bkey_err(k);
if (ret)
goto err;
for (i = 0; i < nr_snapids; i++) {
k = bch2_btree_iter_prev_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
if (!k.k || !k.k->p.offset) {
ret = -ENOSPC;
goto err;
}
n = bch2_trans_kmalloc(trans, sizeof(*n));
ret = PTR_ERR_OR_ZERO(n);
if (ret)
return ret;
bkey_snapshot_init(&n->k_i);
n->k.p = iter.pos;
n->v.flags = 0;
n->v.parent = cpu_to_le32(parent);
n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
n->v.pad = 0;
SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
bch2_trans_update(trans, &iter, &n->k_i, 0);
ret = bch2_mark_snapshot(trans->c, bkey_s_c_null, bkey_i_to_s_c(&n->k_i), 0, 0);
if (ret)
break;
new_snapids[i] = iter.pos.offset;
}
if (parent) {
bch2_btree_iter_set_pos(&iter, POS(0, parent));
k = bch2_btree_iter_peek(&iter);
ret = bkey_err(k);
if (ret)
goto err;
if (k.k->type != KEY_TYPE_snapshot) {
bch_err(trans->c, "snapshot %u not found", parent);
ret = -ENOENT;
goto err;
}
n = bch2_trans_kmalloc(trans, sizeof(*n));
ret = PTR_ERR_OR_ZERO(n);
if (ret)
return ret;
bkey_reassemble(&n->k_i, k);
if (n->v.children[0] || n->v.children[1]) {
bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
ret = -EINVAL;
goto err;
}
n->v.children[0] = cpu_to_le32(new_snapids[0]);
n->v.children[1] = cpu_to_le32(new_snapids[1]);
SET_BCH_SNAPSHOT_SUBVOL(&n->v, false);
bch2_trans_update(trans, &iter, &n->k_i, 0);
}
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
/* List of snapshot IDs that are being deleted: */
struct snapshot_id_list {
u32 nr;
u32 size;
u32 *d;
};
static bool snapshot_list_has_id(struct snapshot_id_list *s, u32 id)
{
unsigned i;
for (i = 0; i < s->nr; i++)
if (id == s->d[i])
return true;
return false;
}
static int snapshot_id_add(struct snapshot_id_list *s, u32 id)
{
BUG_ON(snapshot_list_has_id(s, id));
if (s->nr == s->size) {
size_t new_size = max(8U, s->size * 2);
void *n = krealloc(s->d,
new_size * sizeof(s->d[0]),
GFP_KERNEL);
if (!n) {
pr_err("error allocating snapshot ID list");
return -ENOMEM;
}
s->d = n;
s->size = new_size;
};
s->d[s->nr++] = id;
return 0;
}
static int bch2_snapshot_delete_keys_btree(struct btree_trans *trans,
struct snapshot_id_list *deleted,
enum btree_id btree_id)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct snapshot_id_list equiv_seen = { 0 };
struct bpos last_pos = POS_MIN;
int ret = 0;
/*
* XXX: We should also delete whiteouts that no longer overwrite
* anything
*/
bch2_trans_iter_init(trans, &iter, btree_id, POS_MIN,
BTREE_ITER_INTENT|
BTREE_ITER_PREFETCH|
BTREE_ITER_NOT_EXTENTS|
BTREE_ITER_ALL_SNAPSHOTS);
while ((bch2_trans_begin(trans),
(k = bch2_btree_iter_peek(&iter)).k) &&
!(ret = bkey_err(k))) {
u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
if (bkey_cmp(k.k->p, last_pos))
equiv_seen.nr = 0;
last_pos = k.k->p;
if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
snapshot_list_has_id(&equiv_seen, equiv)) {
if (btree_id == BTREE_ID_inodes &&
bch2_btree_key_cache_flush(trans, btree_id, iter.pos))
continue;
ret = __bch2_trans_do(trans, NULL, NULL,
BTREE_INSERT_NOFAIL,
bch2_btree_iter_traverse(&iter) ?:
bch2_btree_delete_at(trans, &iter,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
if (ret)
break;
} else {
ret = snapshot_id_add(&equiv_seen, equiv);
if (ret)
break;
}
bch2_btree_iter_advance(&iter);
}
bch2_trans_iter_exit(trans, &iter);
kfree(equiv_seen.d);
return ret;
}
static void bch2_delete_dead_snapshots_work(struct work_struct *work)
{
struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_s_c_snapshot snap;
struct snapshot_id_list deleted = { 0 };
u32 i, id, children[2];
int ret = 0;
bch2_trans_init(&trans, c, 0, 0);
/*
* For every snapshot node: If we have no live children and it's not
* pointed to by a subvolume, delete it:
*/
for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_snapshot)
continue;
snap = bkey_s_c_to_snapshot(k);
if (BCH_SNAPSHOT_DELETED(snap.v) ||
BCH_SNAPSHOT_SUBVOL(snap.v))
continue;
children[0] = le32_to_cpu(snap.v->children[0]);
children[1] = le32_to_cpu(snap.v->children[1]);
ret = snapshot_live(&trans, children[0]) ?:
snapshot_live(&trans, children[1]);
if (ret < 0)
break;
if (ret)
continue;
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
bch2_snapshot_node_set_deleted(&trans, iter.pos.offset));
if (ret) {
bch_err(c, "error deleting snapshot %llu: %i", iter.pos.offset, ret);
break;
}
}
bch2_trans_iter_exit(&trans, &iter);
if (ret) {
bch_err(c, "error walking snapshots: %i", ret);
goto err;
}
ret = bch2_snapshots_set_equiv(&trans);
if (ret)
goto err;
for_each_btree_key(&trans, iter, BTREE_ID_snapshots,
POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_snapshot)
continue;
snap = bkey_s_c_to_snapshot(k);
if (BCH_SNAPSHOT_DELETED(snap.v)) {
ret = snapshot_id_add(&deleted, k.k->p.offset);
if (ret)
break;
}
}
bch2_trans_iter_exit(&trans, &iter);
if (ret) {
bch_err(c, "error walking snapshots: %i", ret);
goto err;
}
for (id = 0; id < BTREE_ID_NR; id++) {
if (!btree_type_has_snapshots(id))
continue;
ret = bch2_snapshot_delete_keys_btree(&trans, &deleted, id);
if (ret) {
bch_err(c, "error deleting snapshot keys: %i", ret);
goto err;
}
}
for (i = 0; i < deleted.nr; i++) {
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
bch2_snapshot_node_delete(&trans, deleted.d[i]));
if (ret) {
bch_err(c, "error deleting snapshot %u: %i",
deleted.d[i], ret);
goto err;
}
}
err:
kfree(deleted.d);
bch2_trans_exit(&trans);
percpu_ref_put(&c->writes);
}
static void bch2_delete_dead_snapshots(struct bch_fs *c)
{
if (unlikely(!percpu_ref_tryget(&c->writes)))
return;
if (!queue_work(system_long_wq, &c->snapshot_delete_work))
percpu_ref_put(&c->writes);
}
static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
struct btree_trans_commit_hook *h)
{
bch2_delete_dead_snapshots(trans->c);
return 0;
}
/* Subvolumes: */
const char *bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
if (bkey_cmp(k.k->p, SUBVOL_POS_MIN) < 0)
return "invalid pos";
if (bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0)
return "invalid pos";
if (bkey_val_bytes(k.k) != sizeof(struct bch_subvolume))
return "bad val size";
return NULL;
}
void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
pr_buf(out, "root %llu snapshot id %u",
le64_to_cpu(s.v->inode),
le32_to_cpu(s.v->snapshot));
}
int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol,
u32 *snapid)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
POS(0, subvol),
BTREE_ITER_CACHED|
BTREE_ITER_WITH_UPDATES);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
if (k.k->type != KEY_TYPE_subvolume) {
bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvol);
ret = -EIO;
goto err;
}
*snapid = le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
/* XXX: mark snapshot id for deletion, walk btree and delete: */
int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid,
int deleting_snapshot)
{
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_s_c_subvolume subvol;
struct btree_trans_commit_hook *h;
struct bkey_i *delete;
u32 snapid;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
POS(0, subvolid),
BTREE_ITER_CACHED|
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
if (k.k->type != KEY_TYPE_subvolume) {
bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvolid);
ret = -EIO;
goto err;
}
subvol = bkey_s_c_to_subvolume(k);
snapid = le32_to_cpu(subvol.v->snapshot);
if (deleting_snapshot >= 0 &&
deleting_snapshot != BCH_SUBVOLUME_SNAP(subvol.v)) {
ret = -ENOENT;
goto err;
}
delete = bch2_trans_kmalloc(trans, sizeof(*delete));
ret = PTR_ERR_OR_ZERO(delete);
if (ret)
goto err;
bkey_init(&delete->k);
delete->k.p = iter.pos;
ret = bch2_trans_update(trans, &iter, delete, 0);
if (ret)
goto err;
ret = bch2_snapshot_node_set_deleted(trans, snapid);
h = bch2_trans_kmalloc(trans, sizeof(*h));
ret = PTR_ERR_OR_ZERO(h);
if (ret)
goto err;
h->fn = bch2_delete_dead_snapshots_hook;
bch2_trans_commit_hook(trans, h);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
}
int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
u32 src_subvolid,
u32 *new_subvolid,
u32 *new_snapshotid,
bool ro)
{
struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
struct bkey_i_subvolume *new_subvol = NULL;
struct bkey_i_subvolume *src_subvol = NULL;
struct bkey_s_c k;
u32 parent = 0, new_nodes[2], snapshot_subvols[2];
int ret = 0;
for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0)
break;
if (bkey_deleted(k.k))
goto found_slot;
}
if (!ret)
ret = -ENOSPC;
goto err;
found_slot:
snapshot_subvols[0] = dst_iter.pos.offset;
snapshot_subvols[1] = src_subvolid;
if (src_subvolid) {
/* Creating a snapshot: */
src_subvol = bch2_trans_kmalloc(trans, sizeof(*src_subvol));
ret = PTR_ERR_OR_ZERO(src_subvol);
if (ret)
goto err;
bch2_trans_iter_init(trans, &src_iter, BTREE_ID_subvolumes,
POS(0, src_subvolid),
BTREE_ITER_CACHED|
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&src_iter);
ret = bkey_err(k);
if (ret)
goto err;
if (k.k->type != KEY_TYPE_subvolume) {
bch_err(trans->c, "subvolume %u not found", src_subvolid);
ret = -ENOENT;
goto err;
}
bkey_reassemble(&src_subvol->k_i, k);
parent = le32_to_cpu(src_subvol->v.snapshot);
}
ret = bch2_snapshot_node_create(trans, parent, new_nodes,
snapshot_subvols,
src_subvolid ? 2 : 1);
if (ret)
goto err;
if (src_subvolid) {
src_subvol->v.snapshot = cpu_to_le32(new_nodes[1]);
bch2_trans_update(trans, &src_iter, &src_subvol->k_i, 0);
}
new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
ret = PTR_ERR_OR_ZERO(new_subvol);
if (ret)
goto err;
bkey_subvolume_init(&new_subvol->k_i);
new_subvol->v.flags = 0;
new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
new_subvol->v.inode = cpu_to_le64(inode);
SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
new_subvol->k.p = dst_iter.pos;
bch2_trans_update(trans, &dst_iter, &new_subvol->k_i, 0);
*new_subvolid = new_subvol->k.p.offset;
*new_snapshotid = new_nodes[0];
err:
bch2_trans_iter_exit(trans, &src_iter);
bch2_trans_iter_exit(trans, &dst_iter);
return ret;
}
int bch2_fs_subvolumes_init(struct bch_fs *c)
{
INIT_WORK(&c->snapshot_delete_work, bch2_delete_dead_snapshots_work);
return 0;
}

77
fs/bcachefs/subvolume.h Normal file
View file

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_SUBVOLUME_H
#define _BCACHEFS_SUBVOLUME_H
void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
const char *bch2_snapshot_invalid(const struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_snapshot (struct bkey_ops) { \
.key_invalid = bch2_snapshot_invalid, \
.val_to_text = bch2_snapshot_to_text, \
}
int bch2_mark_snapshot(struct bch_fs *, struct bkey_s_c,
struct bkey_s_c, u64, unsigned);
static inline struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
{
return genradix_ptr(&c->snapshots, U32_MAX - id);
}
static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id)
{
return snapshot_t(c, id)->parent;
}
static inline u32 bch2_snapshot_internal_node(struct bch_fs *c, u32 id)
{
struct snapshot_t *s = snapshot_t(c, id);
return s->children[0] || s->children[1];
}
static inline u32 bch2_snapshot_sibling(struct bch_fs *c, u32 id)
{
struct snapshot_t *s;
u32 parent = bch2_snapshot_parent(c, id);
if (!parent)
return 0;
s = snapshot_t(c, bch2_snapshot_parent(c, id));
if (id == s->children[0])
return s->children[1];
if (id == s->children[1])
return s->children[0];
return 0;
}
static inline bool bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
{
while (id && id < ancestor)
id = bch2_snapshot_parent(c, id);
return id == ancestor;
}
int bch2_fs_snapshots_check(struct bch_fs *);
void bch2_fs_snapshots_exit(struct bch_fs *);
int bch2_fs_snapshots_start(struct bch_fs *);
const char *bch2_subvolume_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_subvolume (struct bkey_ops) { \
.key_invalid = bch2_subvolume_invalid, \
.val_to_text = bch2_subvolume_to_text, \
}
int bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *);
int bch2_subvolume_delete(struct btree_trans *, u32, int);
int bch2_subvolume_create(struct btree_trans *, u64, u32,
u32 *, u32 *, bool);
int bch2_fs_subvolumes_init(struct bch_fs *);
#endif /* _BCACHEFS_SUBVOLUME_H */

View file

@ -39,6 +39,7 @@
#include "rebalance.h"
#include "recovery.h"
#include "replicas.h"
#include "subvolume.h"
#include "super.h"
#include "super-io.h"
#include "sysfs.h"
@ -475,6 +476,7 @@ static void __bch2_fs_free(struct bch_fs *c)
for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]);
bch2_fs_snapshots_exit(c);
bch2_fs_quota_exit(c);
bch2_fs_fsio_exit(c);
bch2_fs_ec_exit(c);
@ -694,6 +696,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
mutex_init(&c->usage_scratch_lock);
mutex_init(&c->bio_bounce_pages_lock);
mutex_init(&c->snapshot_table_lock);
spin_lock_init(&c->btree_write_error_lock);
@ -797,6 +800,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_fs_btree_key_cache_init(&c->btree_key_cache) ||
bch2_fs_btree_iter_init(c) ||
bch2_fs_btree_interior_update_init(c) ||
bch2_fs_subvolumes_init(c) ||
bch2_fs_io_init(c) ||
bch2_fs_encryption_init(c) ||
bch2_fs_compress_init(c) ||