md/raid: only permit hot-add of compatible integrity profiles

It is not safe for an integrity profile to be changed while i/o is
in-flight in the queue.  Prevent adding new disks or otherwise online
spares to an array if the device has an incompatible integrity profile.

The original change to the blk_integrity_unregister implementation in
md, commmit c7bfced9a6 "md: suspend i/o during runtime
blk_integrity_unregister" introduced an immediate hang regression.

This policy of disallowing changes the integrity profile once one has
been established is shared with DM.

Here is an abbreviated log from a test run that:
1/ Creates a degraded raid1 with an integrity-enabled device (pmem0s) [   59.076127]
2/ Tries to add an integrity-disabled device (pmem1m) [   90.489209]
3/ Retries with an integrity-enabled device (pmem1s) [  205.671277]

[   59.076127] md/raid1:md0: active with 1 out of 2 mirrors
[   59.078302] md: data integrity enabled on md0
[..]
[   90.489209] md0: incompatible integrity profile for pmem1m
[..]
[  205.671277] md: super_written gets error=-5
[  205.677386] md/raid1:md0: Disk failure on pmem1m, disabling device.
[  205.677386] md/raid1:md0: Operation continuing on 1 devices.
[  205.683037] RAID1 conf printout:
[  205.684699]  --- wd:1 rd:2
[  205.685972]  disk 0, wo:0, o:1, dev:pmem0s
[  205.687562]  disk 1, wo:1, o:1, dev:pmem1s
[  205.691717] md: recovery of RAID array md0

Fixes: c7bfced9a6 ("md: suspend i/o during runtime blk_integrity_unregister")
Cc: <stable@vger.kernel.org>
Cc: Mike Snitzer <snitzer@redhat.com>
Reported-by: NeilBrown <neilb@suse.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: NeilBrown <neilb@suse.com>
This commit is contained in:
Dan Williams 2016-01-13 16:00:07 -08:00 committed by NeilBrown
parent 7aafc405ce
commit 1501efadc5
5 changed files with 26 additions and 22 deletions

View file

@ -2010,28 +2010,32 @@ int md_integrity_register(struct mddev *mddev)
} }
EXPORT_SYMBOL(md_integrity_register); EXPORT_SYMBOL(md_integrity_register);
/* Disable data integrity if non-capable/non-matching disk is being added */ /*
void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) * Attempt to add an rdev, but only if it is consistent with the current
* integrity profile
*/
int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev)
{ {
struct blk_integrity *bi_rdev; struct blk_integrity *bi_rdev;
struct blk_integrity *bi_mddev; struct blk_integrity *bi_mddev;
char name[BDEVNAME_SIZE];
if (!mddev->gendisk) if (!mddev->gendisk)
return; return 0;
bi_rdev = bdev_get_integrity(rdev->bdev); bi_rdev = bdev_get_integrity(rdev->bdev);
bi_mddev = blk_get_integrity(mddev->gendisk); bi_mddev = blk_get_integrity(mddev->gendisk);
if (!bi_mddev) /* nothing to do */ if (!bi_mddev) /* nothing to do */
return; return 0;
if (rdev->raid_disk < 0) /* skip spares */
return; if (blk_integrity_compare(mddev->gendisk, rdev->bdev->bd_disk) != 0) {
if (bi_rdev && blk_integrity_compare(mddev->gendisk, printk(KERN_NOTICE "%s: incompatible integrity profile for %s\n",
rdev->bdev->bd_disk) >= 0) mdname(mddev), bdevname(rdev->bdev, name));
return; return -ENXIO;
WARN_ON_ONCE(!mddev->suspended); }
printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev));
blk_integrity_unregister(mddev->gendisk); return 0;
} }
EXPORT_SYMBOL(md_integrity_add_rdev); EXPORT_SYMBOL(md_integrity_add_rdev);

View file

@ -660,7 +660,7 @@ extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors); extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
extern int md_check_no_bitmap(struct mddev *mddev); extern int md_check_no_bitmap(struct mddev *mddev);
extern int md_integrity_register(struct mddev *mddev); extern int md_integrity_register(struct mddev *mddev);
extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev); extern int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale); extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
extern void mddev_init(struct mddev *mddev); extern void mddev_init(struct mddev *mddev);

View file

@ -257,6 +257,9 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
disk_stack_limits(mddev->gendisk, rdev->bdev, disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9); rdev->data_offset << 9);
err = md_integrity_add_rdev(rdev, mddev);
if (err)
break;
spin_lock_irq(&conf->device_lock); spin_lock_irq(&conf->device_lock);
mddev->degraded--; mddev->degraded--;
rdev->raid_disk = path; rdev->raid_disk = path;
@ -264,9 +267,6 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
rcu_assign_pointer(p->rdev, rdev); rcu_assign_pointer(p->rdev, rdev);
err = 0; err = 0;
mddev_suspend(mddev);
md_integrity_add_rdev(rdev, mddev);
mddev_resume(mddev);
break; break;
} }

View file

@ -1589,6 +1589,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (mddev->recovery_disabled == conf->recovery_disabled) if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY; return -EBUSY;
if (md_integrity_add_rdev(rdev, mddev))
return -ENXIO;
if (rdev->raid_disk >= 0) if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk; first = last = rdev->raid_disk;
@ -1632,9 +1635,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break; break;
} }
} }
mddev_suspend(mddev);
md_integrity_add_rdev(rdev, mddev);
mddev_resume(mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf); print_conf(conf);

View file

@ -1698,6 +1698,9 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1)) if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
return -EINVAL; return -EINVAL;
if (md_integrity_add_rdev(rdev, mddev))
return -ENXIO;
if (rdev->raid_disk >= 0) if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk; first = last = rdev->raid_disk;
@ -1739,9 +1742,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev); rcu_assign_pointer(p->rdev, rdev);
break; break;
} }
mddev_suspend(mddev);
md_integrity_add_rdev(rdev, mddev);
mddev_resume(mddev);
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);