brd: remove discard support

It's just a in-driver reimplementation of writing zeroes to the pages,
which fails if the discards aren't page aligned.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
Christoph Hellwig 2017-04-05 19:21:16 +02:00 committed by Jens Axboe
parent 19372e2769
commit f09a06a193
1 changed files with 0 additions and 54 deletions

View File

@ -134,28 +134,6 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
return page; return page;
} }
static void brd_free_page(struct brd_device *brd, sector_t sector)
{
struct page *page;
pgoff_t idx;
spin_lock(&brd->brd_lock);
idx = sector >> PAGE_SECTORS_SHIFT;
page = radix_tree_delete(&brd->brd_pages, idx);
spin_unlock(&brd->brd_lock);
if (page)
__free_page(page);
}
static void brd_zero_page(struct brd_device *brd, sector_t sector)
{
struct page *page;
page = brd_lookup_page(brd, sector);
if (page)
clear_highpage(page);
}
/* /*
* Free all backing store pages and radix tree. This must only be called when * Free all backing store pages and radix tree. This must only be called when
* there are no other users of the device. * there are no other users of the device.
@ -212,24 +190,6 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
return 0; return 0;
} }
static void discard_from_brd(struct brd_device *brd,
sector_t sector, size_t n)
{
while (n >= PAGE_SIZE) {
/*
* Don't want to actually discard pages here because
* re-allocating the pages can result in writeback
* deadlocks under heavy load.
*/
if (0)
brd_free_page(brd, sector);
else
brd_zero_page(brd, sector);
sector += PAGE_SIZE >> SECTOR_SHIFT;
n -= PAGE_SIZE;
}
}
/* /*
* Copy n bytes from src to the brd starting at sector. Does not sleep. * Copy n bytes from src to the brd starting at sector. Does not sleep.
*/ */
@ -338,14 +298,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
goto io_error; goto io_error;
if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
if (sector & ((PAGE_SIZE >> SECTOR_SHIFT) - 1) ||
bio->bi_iter.bi_size & ~PAGE_MASK)
goto io_error;
discard_from_brd(brd, sector, bio->bi_iter.bi_size);
goto out;
}
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {
unsigned int len = bvec.bv_len; unsigned int len = bvec.bv_len;
int err; int err;
@ -357,7 +309,6 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
sector += len >> SECTOR_SHIFT; sector += len >> SECTOR_SHIFT;
} }
out:
bio_endio(bio); bio_endio(bio);
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
io_error: io_error:
@ -464,11 +415,6 @@ static struct brd_device *brd_alloc(int i)
* is harmless) * is harmless)
*/ */
blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE); blk_queue_physical_block_size(brd->brd_queue, PAGE_SIZE);
brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
blk_queue_max_discard_sectors(brd->brd_queue, UINT_MAX);
brd->brd_queue->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
#ifdef CONFIG_BLK_DEV_RAM_DAX #ifdef CONFIG_BLK_DEV_RAM_DAX
queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue); queue_flag_set_unlocked(QUEUE_FLAG_DAX, brd->brd_queue);
#endif #endif