bcachefs: don't block reads if we're promoting
The promote path calls data_update_init() and now that we take locks here, there's potential for promote to block our read path, just error when we can't take the lock instead of blocking. Signed-off-by: Daniel Hill <daniel@gluo.nz> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
0093b9e970
commit
3482dd6a25
|
@ -411,6 +411,7 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||||
const union bch_extent_entry *entry;
|
const union bch_extent_entry *entry;
|
||||||
struct extent_ptr_decoded p;
|
struct extent_ptr_decoded p;
|
||||||
unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
|
unsigned i, reserve_sectors = k.k->size * data_opts.extra_replicas;
|
||||||
|
unsigned int ptrs_locked = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
bch2_bkey_buf_init(&m->k);
|
bch2_bkey_buf_init(&m->k);
|
||||||
|
@ -436,6 +437,8 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
|
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
|
||||||
|
bool locked;
|
||||||
|
|
||||||
if (((1U << i) & m->data_opts.rewrite_ptrs) &&
|
if (((1U << i) & m->data_opts.rewrite_ptrs) &&
|
||||||
p.ptr.cached)
|
p.ptr.cached)
|
||||||
BUG();
|
BUG();
|
||||||
|
@ -461,11 +464,7 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||||
if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
|
if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
|
||||||
m->op.incompressible = true;
|
m->op.incompressible = true;
|
||||||
|
|
||||||
i++;
|
|
||||||
|
|
||||||
if (ctxt) {
|
if (ctxt) {
|
||||||
bool locked;
|
|
||||||
|
|
||||||
move_ctxt_wait_event(ctxt, trans,
|
move_ctxt_wait_event(ctxt, trans,
|
||||||
(locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
|
(locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
|
||||||
PTR_BUCKET_POS(c, &p.ptr), 0)) ||
|
PTR_BUCKET_POS(c, &p.ptr), 0)) ||
|
||||||
|
@ -475,9 +474,14 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||||
bch2_bucket_nocow_lock(&c->nocow_locks,
|
bch2_bucket_nocow_lock(&c->nocow_locks,
|
||||||
PTR_BUCKET_POS(c, &p.ptr), 0);
|
PTR_BUCKET_POS(c, &p.ptr), 0);
|
||||||
} else {
|
} else {
|
||||||
bch2_bucket_nocow_lock(&c->nocow_locks,
|
if (!bch2_bucket_nocow_trylock(&c->nocow_locks,
|
||||||
PTR_BUCKET_POS(c, &p.ptr), 0);
|
PTR_BUCKET_POS(c, &p.ptr), 0)) {
|
||||||
|
ret = -BCH_ERR_nocow_lock_blocked;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
ptrs_locked |= (1U << i);
|
||||||
|
i++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reserve_sectors) {
|
if (reserve_sectors) {
|
||||||
|
@ -499,9 +503,13 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||||
return -BCH_ERR_unwritten_extent_update;
|
return -BCH_ERR_unwritten_extent_update;
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
|
i = 0;
|
||||||
bch2_bucket_nocow_unlock(&c->nocow_locks,
|
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
|
||||||
PTR_BUCKET_POS(c, &p.ptr), 0);
|
if ((1U << i) & ptrs_locked)
|
||||||
|
bch2_bucket_nocow_unlock(&c->nocow_locks,
|
||||||
|
PTR_BUCKET_POS(c, &p.ptr), 0);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
bch2_bkey_buf_exit(&m->k, c);
|
bch2_bkey_buf_exit(&m->k, c);
|
||||||
bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
|
bch2_bio_free_pages_pool(c, &m->op.wbio.bio);
|
||||||
|
|
|
@ -120,6 +120,7 @@
|
||||||
x(BCH_ERR_invalid_sb, invalid_sb_clean) \
|
x(BCH_ERR_invalid_sb, invalid_sb_clean) \
|
||||||
x(BCH_ERR_invalid_sb, invalid_sb_quota) \
|
x(BCH_ERR_invalid_sb, invalid_sb_quota) \
|
||||||
x(BCH_ERR_invalid, invalid_bkey) \
|
x(BCH_ERR_invalid, invalid_bkey) \
|
||||||
|
x(BCH_ERR_operation_blocked, nocow_lock_blocked) \
|
||||||
|
|
||||||
enum bch_errcode {
|
enum bch_errcode {
|
||||||
BCH_ERR_START = 2048,
|
BCH_ERR_START = 2048,
|
||||||
|
|
|
@ -2023,6 +2023,13 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
|
||||||
.write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED,
|
.write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED,
|
||||||
},
|
},
|
||||||
btree_id, k);
|
btree_id, k);
|
||||||
|
if (ret == -BCH_ERR_nocow_lock_blocked) {
|
||||||
|
ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
|
||||||
|
bch_promote_params);
|
||||||
|
BUG_ON(ret);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
op->write.op.end_io = promote_done;
|
op->write.op.end_io = promote_done;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue