Fix dm bufio shrinker to properly zero-fill all fields.

Fix race in dm cache that caused improper reporting of the number of
 dirty blocks in the cache.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJT28K2AAoJEMUj8QotnQNau30IAOAiW683+TnSpq4MXRywUNKl
 gaLtC65Q04KesZ2FtZYaYUOdfF8nsmuyBiSlHl8RLzGWCYJcQQAMdXVEgCZ+qXKX
 OhD85zhySXUamvKfq2wX452kAK6O2eR//Azc3d57uWhGboXTZrqTDc4QLRDJQoAF
 8b4g4r/NCV6fAtHtEfB9JtLyrk1kOUGpHdF2rSFy20IUKs1RPZRZzNYEh5KB9ZWI
 DeoZY6GrqR1bLlOAL3Usd43fYGdgv+Mn1HaR+Xgn7LVl2HpRIts4M8Y54F0xko4F
 T26rJMZlMppolgZBXElrDm8ly6bDPglfU7ymJMleiPEdCLhpLX/jqoPEtgsqGI8=
 =pQe5
 -----END PGP SIGNATURE-----

Merge tag 'dm-3.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:
 "Fix dm bufio shrinker to properly zero-fill all fields.

  Fix race in dm cache that caused improper reporting of the number of
  dirty blocks in the cache"

* tag 'dm-3.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache: fix race affecting dirty block count
  dm bufio: fully initialize shrinker
This commit is contained in:
Linus Torvalds 2014-08-01 12:50:05 -07:00
commit 818be5894e
2 changed files with 7 additions and 8 deletions

View file

@ -1541,7 +1541,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
BUG_ON(block_size < 1 << SECTOR_SHIFT ||
(block_size & (block_size - 1)));
c = kmalloc(sizeof(*c), GFP_KERNEL);
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
r = -ENOMEM;
goto bad_client;

View file

@ -231,7 +231,7 @@ struct cache {
/*
* cache_size entries, dirty if set
*/
dm_cblock_t nr_dirty;
atomic_t nr_dirty;
unsigned long *dirty_bitset;
/*
@ -492,7 +492,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)
static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
{
if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
atomic_inc(&cache->nr_dirty);
policy_set_dirty(cache->policy, oblock);
}
}
@ -501,8 +501,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
{
if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
policy_clear_dirty(cache->policy, oblock);
cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
if (!from_cblock(cache->nr_dirty))
if (atomic_dec_return(&cache->nr_dirty) == 0)
dm_table_event(cache->ti->table);
}
}
@ -2269,7 +2268,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
atomic_set(&cache->quiescing_ack, 0);
r = -ENOMEM;
cache->nr_dirty = 0;
atomic_set(&cache->nr_dirty, 0);
cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
if (!cache->dirty_bitset) {
*error = "could not allocate dirty bitset";
@ -2808,7 +2807,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
residency = policy_residency(cache->policy);
DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
(unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
(unsigned long long)nr_blocks_metadata,
@ -2821,7 +2820,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
(unsigned) atomic_read(&cache->stats.write_miss),
(unsigned) atomic_read(&cache->stats.demotion),
(unsigned) atomic_read(&cache->stats.promotion),
(unsigned long long) from_cblock(cache->nr_dirty));
(unsigned long) atomic_read(&cache->nr_dirty));
if (writethrough_mode(&cache->features))
DMEMIT("1 writethrough ");