drbd: Protect accesses to the uuid set with a spinlock

There is at least the worker context, the receiver context, the context of
receiving netlink packts.

Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
Philipp Reisner 2012-08-08 21:19:09 +02:00
parent fef45d297e
commit 39a1aa7f49
4 changed files with 49 additions and 11 deletions

View File

@ -747,6 +747,7 @@ struct drbd_md {
u64 md_offset; /* sector offset to 'super' block */
u64 la_size_sect; /* last agreed size, unit sectors */
spinlock_t uuid_lock;
u64 uuid[UI_SIZE];
u64 device_uuid;
u32 flags;
@ -1119,8 +1120,9 @@ extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local);
extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);

View File

@ -838,8 +838,10 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
put_ldev(mdev);
return -EIO;
}
spin_lock_irq(&mdev->ldev->md.uuid_lock);
for (i = UI_CURRENT; i < UI_SIZE; i++)
p->uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
spin_unlock_irq(&mdev->ldev->md.uuid_lock);
mdev->comm_bm_set = drbd_bm_total_weight(mdev);
p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
@ -3015,7 +3017,7 @@ void drbd_md_mark_dirty(struct drbd_conf *mdev)
}
#endif
static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
{
int i;
@ -3023,7 +3025,7 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
}
void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
if (idx == UI_CURRENT) {
if (mdev->state.role == R_PRIMARY)
@ -3038,14 +3040,24 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
drbd_md_mark_dirty(mdev);
}
void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
__drbd_uuid_set(mdev, idx, val);
spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
}
void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
unsigned long flags;
spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (mdev->ldev->md.uuid[idx]) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
}
_drbd_uuid_set(mdev, idx, val);
__drbd_uuid_set(mdev, idx, val);
spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
}
/**
@ -3058,15 +3070,20 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
{
u64 val;
unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
unsigned long long bm_uuid;
get_random_bytes(&val, sizeof(u64));
spin_lock_irq(&mdev->ldev->md.uuid_lock);
bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
__drbd_uuid_set(mdev, UI_CURRENT, val);
spin_unlock_irq(&mdev->ldev->md.uuid_lock);
get_random_bytes(&val, sizeof(u64));
_drbd_uuid_set(mdev, UI_CURRENT, val);
drbd_print_uuids(mdev, "new current UUID");
/* get it to stable storage _now_ */
drbd_md_sync(mdev);
@ -3074,9 +3091,11 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
{
unsigned long flags;
if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
@ -3088,6 +3107,8 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
drbd_md_mark_dirty(mdev);
}

View File

@ -1320,6 +1320,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
retcode = ERR_NOMEM;
goto fail;
}
spin_lock_init(&nbc->md.uuid_lock);
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
retcode = ERR_NOMEM;
@ -2679,8 +2681,16 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
goto nla_put_failure;
if (got_ldev) {
int err;
spin_lock_irq(&mdev->ldev->md.uuid_lock);
err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
spin_unlock_irq(&mdev->ldev->md.uuid_lock);
if (err)
goto nla_put_failure;
if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid) ||
nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
goto nla_put_failure;

View File

@ -2796,7 +2796,9 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
drbd_uuid_set_bm(mdev, 0UL);
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
mdev->ldev->md.uuid[UI_BITMAP] = 0;
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
@ -2904,8 +2906,8 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->tconn->agreed_pro_version < 91)
return -1091;
_drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
_drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
__drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
__drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
@ -2959,11 +2961,14 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
mydisk = mdev->new_state_tmp.disk;
dev_info(DEV, "drbd_sync_handshake:\n");
spin_lock_irq(&mdev->ldev->md.uuid_lock);
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
hg = drbd_uuid_compare(mdev, &rule_nr);
spin_unlock_irq(&mdev->ldev->md.uuid_lock);
dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);