Btrfs: fix up seek_hole/seek_data handling

Whoever wrote this was braindead.  Also it doesn't work right if you have
VACANCY's since we assumed you would only have that at the end of the file,
which won't be the case in the near future.  I tested this with generic/285 and
generic/286 as well as the btrfs tests that use fssum since it uses
seek_hole/seek_data to verify things are ok.  Thanks,

Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
This commit is contained in:
Josef Bacik 2013-10-18 11:44:46 -04:00 committed by Chris Mason
parent 4277a9c3b3
commit 7f4ca37c48
1 changed files with 19 additions and 75 deletions

View File

@ -2405,14 +2405,12 @@ out_reserve_fail:
static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_map *em;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
u64 lockstart = *offset;
u64 lockend = i_size_read(inode);
u64 start = *offset;
u64 orig_start = *offset;
u64 len = i_size_read(inode);
u64 last_end = 0;
int ret = 0;
lockend = max_t(u64, root->sectorsize, lockend);
@ -2429,89 +2427,35 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, 0,
&cached_state);
/*
* Delalloc is such a pain. If we have a hole and we have pending
* delalloc for a portion of the hole we will get back a hole that
* exists for the entire range since it hasn't been actually written
* yet. So to take care of this case we need to look for an extent just
* before the position we want in case there is outstanding delalloc
* going on here.
*/
if (whence == SEEK_HOLE && start != 0) {
if (start <= root->sectorsize)
em = btrfs_get_extent_fiemap(inode, NULL, 0, 0,
root->sectorsize, 0);
else
em = btrfs_get_extent_fiemap(inode, NULL, 0,
start - root->sectorsize,
root->sectorsize, 0);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out;
}
last_end = em->start + em->len;
if (em->block_start == EXTENT_MAP_DELALLOC)
last_end = min_t(u64, last_end, inode->i_size);
free_extent_map(em);
}
while (1) {
while (start < inode->i_size) {
em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
em = NULL;
break;
}
if (em->block_start == EXTENT_MAP_HOLE) {
if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
if (last_end <= orig_start) {
free_extent_map(em);
ret = -ENXIO;
break;
}
}
if (whence == SEEK_HOLE) {
*offset = start;
free_extent_map(em);
break;
}
} else {
if (whence == SEEK_DATA) {
if (em->block_start == EXTENT_MAP_DELALLOC) {
if (start >= inode->i_size) {
free_extent_map(em);
ret = -ENXIO;
break;
}
}
if (!test_bit(EXTENT_FLAG_PREALLOC,
&em->flags)) {
*offset = start;
free_extent_map(em);
break;
}
}
}
if (whence == SEEK_HOLE &&
(em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
break;
else if (whence == SEEK_DATA &&
(em->block_start != EXTENT_MAP_HOLE &&
!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)))
break;
start = em->start + em->len;
last_end = em->start + em->len;
if (em->block_start == EXTENT_MAP_DELALLOC)
last_end = min_t(u64, last_end, inode->i_size);
if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
free_extent_map(em);
ret = -ENXIO;
break;
}
free_extent_map(em);
em = NULL;
cond_resched();
}
if (!ret)
*offset = min(*offset, inode->i_size);
out:
free_extent_map(em);
if (!ret) {
if (whence == SEEK_DATA && start >= inode->i_size)
ret = -ENXIO;
else
*offset = min_t(loff_t, start, inode->i_size);
}
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);
return ret;