bcachefs: rcu protect trans->paths

Upcoming patches are going to be changing trans->paths to a
reallocatable buffer. We need to guard against use after free when it's
used by other threads; this introduces RCU protection to those paths and
changes them to check for trans->paths == NULL

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-12-12 20:08:29 -05:00
parent 6474b70610
commit fea153a845
4 changed files with 68 additions and 11 deletions

View File

@ -2920,6 +2920,8 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
trans->sorted = trans->_sorted;
trans->paths = trans->_paths;
*trans_paths_nr(trans->paths) = BTREE_ITER_MAX;
trans->paths_allocated[0] = 1;
s = btree_trans_stats(trans);
@ -3074,12 +3076,14 @@ bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
{
struct btree_path *path;
struct btree_bkey_cached_common *b;
static char lock_types[] = { 'r', 'i', 'w' };
struct task_struct *task = READ_ONCE(trans->locking_wait.task);
unsigned l, idx;
/* before rcu_read_lock(): */
bch2_printbuf_make_room(out, 4096);
if (!out->nr_tabstops) {
printbuf_tabstop_push(out, 16);
printbuf_tabstop_push(out, 32);
@ -3087,7 +3091,18 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
trans_for_each_path(trans, path, idx) {
/* trans->paths is rcu protected vs. freeing */
rcu_read_lock();
out->atomic++;
struct btree_path *paths = rcu_dereference(trans->paths);
if (!paths)
goto out;
unsigned long *paths_allocated = trans_paths_allocated(paths);
trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
struct btree_path *path = paths + idx;
if (!path->nodes_locked)
continue;
@ -3120,6 +3135,9 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
bch2_btree_bkey_cached_common_to_text(out, b);
prt_newline(out);
}
out:
--out->atomic;
rcu_read_unlock();
}
void bch2_fs_btree_iter_exit(struct bch_fs *c)

View File

@ -63,6 +63,22 @@ static inline void btree_trans_sort_paths(struct btree_trans *trans)
__bch2_btree_trans_sort_paths(trans);
}
static inline unsigned long *trans_paths_nr(struct btree_path *paths)
{
return &container_of(paths, struct btree_trans_paths, paths[0])->nr_paths;
}
static inline unsigned long *trans_paths_allocated(struct btree_path *paths)
{
unsigned long *v = trans_paths_nr(paths);
return v - BITS_TO_LONGS(*v);
}
#define trans_for_each_path_idx_from(_paths_allocated, _nr, _idx, _start)\
for (_idx = _start; \
(_idx = find_next_bit(_paths_allocated, _nr, _idx)) < _nr; \
_idx++)
static inline struct btree_path *
__trans_next_path(struct btree_trans *trans, unsigned *idx)
{

View File

@ -281,9 +281,8 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
struct lock_graph g;
struct trans_waiting_for_lock *top;
struct btree_bkey_cached_common *b;
struct btree_path *path;
unsigned path_idx;
int ret;
btree_path_idx_t path_idx;
int ret = 0;
g.nr = 0;
@ -296,13 +295,26 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
}
lock_graph_down(&g, trans);
/* trans->paths is rcu protected vs. freeing */
rcu_read_lock();
if (cycle)
cycle->atomic++;
next:
if (!g.nr)
return 0;
goto out;
top = &g.g[g.nr - 1];
trans_for_each_path_from(top->trans, path, path_idx, top->path_idx) {
struct btree_path *paths = rcu_dereference(top->trans->paths);
if (!paths)
goto up;
unsigned long *paths_allocated = trans_paths_allocated(paths);
trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths),
path_idx, top->path_idx) {
struct btree_path *path = paths + path_idx;
if (!path->nodes_locked)
continue;
@ -368,18 +380,23 @@ next:
ret = lock_graph_descend(&g, trans, cycle);
if (ret)
return ret;
goto out;
goto next;
}
raw_spin_unlock(&b->lock.wait_lock);
}
}
up:
if (g.nr > 1 && cycle)
print_chain(cycle, &g);
lock_graph_up(&g);
goto next;
out:
if (cycle)
--cycle->atomic;
rcu_read_unlock();
return ret;
}
int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)

View File

@ -372,12 +372,17 @@ struct btree_trans_commit_hook {
#define BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS 10000
struct btree_trans_paths {
unsigned long nr_paths;
struct btree_path paths[];
};
struct btree_trans {
struct bch_fs *c;
unsigned long *paths_allocated;
u8 *sorted;
struct btree_path *paths;
u8 *sorted;
void *mem;
unsigned mem_top;
@ -431,8 +436,9 @@ struct btree_trans {
struct replicas_delta_list *fs_usage_deltas;
unsigned long _paths_allocated[BITS_TO_LONGS(BTREE_ITER_MAX)];
u8 _sorted[BTREE_ITER_MAX + 8];
struct btree_trans_paths trans_paths;
struct btree_path _paths[BTREE_ITER_MAX];
u8 _sorted[BTREE_ITER_MAX + 8];
};
static inline struct btree_path *btree_iter_path(struct btree_trans *trans, struct btree_iter *iter)