drbd: Protect accesses to the uuid set with a spinlock

There is at least the worker context, the receiver context, the context of
receiving netlink packts and processes reading a sysfs attribute that access
the uuids.

Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Philipp Reisner 2012-08-16 14:25:58 +02:00 committed by Jens Axboe
parent 35fd3dc58d
commit 9f2247bb9b
4 changed files with 43 additions and 10 deletions

View file

@ -912,6 +912,7 @@ struct drbd_md {
u64 md_offset; /* sector offset to 'super' block */
u64 la_size_sect; /* last agreed size, unit sectors */
spinlock_t uuid_lock;
u64 uuid[UI_SIZE];
u64 device_uuid;
u32 flags;
@ -1283,8 +1284,9 @@ extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local);
extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);

View file

@ -2125,8 +2125,10 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
if (!get_ldev_if_state(mdev, D_NEGOTIATING))
return 1;
spin_lock_irq(&mdev->ldev->md.uuid_lock);
for (i = UI_CURRENT; i < UI_SIZE; i++)
p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
spin_unlock_irq(&mdev->ldev->md.uuid_lock);
mdev->comm_bm_set = drbd_bm_total_weight(mdev);
p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
@ -4011,7 +4013,7 @@ void drbd_md_mark_dirty(struct drbd_conf *mdev)
}
#endif
static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
{
int i;
@ -4019,7 +4021,7 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
}
void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
if (idx == UI_CURRENT) {
if (mdev->state.role == R_PRIMARY)
@ -4034,14 +4036,24 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
drbd_md_mark_dirty(mdev);
}
void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
__drbd_uuid_set(mdev, idx, val);
spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
}
void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (mdev->ldev->md.uuid[idx]) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
}
_drbd_uuid_set(mdev, idx, val);
__drbd_uuid_set(mdev, idx, val);
spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
}
/**
@ -4054,15 +4066,20 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
{
u64 val;
unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
unsigned long long bm_uuid;
get_random_bytes(&val, sizeof(u64));
spin_lock_irq(&mdev->ldev->md.uuid_lock);
bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
__drbd_uuid_set(mdev, UI_CURRENT, val);
spin_unlock_irq(&mdev->ldev->md.uuid_lock);
get_random_bytes(&val, sizeof(u64));
_drbd_uuid_set(mdev, UI_CURRENT, val);
drbd_print_uuids(mdev, "new current UUID");
/* get it to stable storage _now_ */
drbd_md_sync(mdev);
@ -4070,9 +4087,11 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
{
unsigned long flags;
if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
@ -4084,6 +4103,8 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
drbd_md_mark_dirty(mdev);
}

View file

@ -977,6 +977,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc->dc.fencing = DRBD_FENCING_DEF;
nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
spin_lock_init(&nbc->md.uuid_lock);
if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
retcode = ERR_MANDATORY_TAG;
goto fail;
@ -2170,8 +2172,11 @@ static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
tl = reply->tag_list;
if (get_ldev(mdev)) {
unsigned long flags;
spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
put_ldev(mdev);
}
put_unaligned(TT_END, tl++); /* Close the tag list */

View file

@ -2392,7 +2392,9 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
drbd_uuid_set_bm(mdev, 0UL);
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
mdev->ldev->md.uuid[UI_BITMAP] = 0;
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
@ -2500,8 +2502,8 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->agreed_pro_version < 91)
return -1091;
_drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
_drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
__drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
__drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
@ -2554,11 +2556,14 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
mydisk = mdev->new_state_tmp.disk;
dev_info(DEV, "drbd_sync_handshake:\n");
spin_lock_irq(&mdev->ldev->md.uuid_lock);
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
hg = drbd_uuid_compare(mdev, &rule_nr);
spin_unlock_irq(&mdev->ldev->md.uuid_lock);
dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);