fs: Convert vfs_dedupe_file_range_compare to folios

We still only operate on a single page of data at a time due to using
kmap().  A more complex implementation would work on each page in a folio,
but it's not clear that such a complex implementation would be worthwhile.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2020-12-14 07:57:07 -05:00
parent 1613fac9aa
commit 338f379cf7

View file

@ -146,41 +146,41 @@ static int generic_remap_check_len(struct inode *inode_in,
} }
/* Read a page's worth of file data into the page cache. */ /* Read a page's worth of file data into the page cache. */
static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) static struct folio *vfs_dedupe_get_folio(struct inode *inode, loff_t pos)
{ {
struct page *page; struct folio *folio;
page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL); folio = read_mapping_folio(inode->i_mapping, pos >> PAGE_SHIFT, NULL);
if (IS_ERR(page)) if (IS_ERR(folio))
return page; return folio;
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
put_page(page); folio_put(folio);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
return page; return folio;
} }
/* /*
* Lock two pages, ensuring that we lock in offset order if the pages are from * Lock two folios, ensuring that we lock in offset order if the folios
* the same file. * are from the same file.
*/ */
static void vfs_lock_two_pages(struct page *page1, struct page *page2) static void vfs_lock_two_folios(struct folio *folio1, struct folio *folio2)
{ {
/* Always lock in order of increasing index. */ /* Always lock in order of increasing index. */
if (page1->index > page2->index) if (folio1->index > folio2->index)
swap(page1, page2); swap(folio1, folio2);
lock_page(page1); folio_lock(folio1);
if (page1 != page2) if (folio1 != folio2)
lock_page(page2); folio_lock(folio2);
} }
/* Unlock two pages, being careful not to unlock the same page twice. */ /* Unlock two folios, being careful not to unlock the same folio twice. */
static void vfs_unlock_two_pages(struct page *page1, struct page *page2) static void vfs_unlock_two_folios(struct folio *folio1, struct folio *folio2)
{ {
unlock_page(page1); folio_unlock(folio1);
if (page1 != page2) if (folio1 != folio2)
unlock_page(page2); folio_unlock(folio2);
} }
/* /*
@ -188,77 +188,71 @@ static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
* Caller must have locked both inodes to prevent write races. * Caller must have locked both inodes to prevent write races.
*/ */
static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
struct inode *dest, loff_t destoff, struct inode *dest, loff_t dstoff,
loff_t len, bool *is_same) loff_t len, bool *is_same)
{ {
loff_t src_poff; bool same = true;
loff_t dest_poff; int error = -EINVAL;
void *src_addr;
void *dest_addr;
struct page *src_page;
struct page *dest_page;
loff_t cmp_len;
bool same;
int error;
error = -EINVAL;
same = true;
while (len) { while (len) {
src_poff = srcoff & (PAGE_SIZE - 1); struct folio *src_folio, *dst_folio;
dest_poff = destoff & (PAGE_SIZE - 1); void *src_addr, *dst_addr;
cmp_len = min(PAGE_SIZE - src_poff, loff_t cmp_len = min(PAGE_SIZE - offset_in_page(srcoff),
PAGE_SIZE - dest_poff); PAGE_SIZE - offset_in_page(dstoff));
cmp_len = min(cmp_len, len); cmp_len = min(cmp_len, len);
if (cmp_len <= 0) if (cmp_len <= 0)
goto out_error; goto out_error;
src_page = vfs_dedupe_get_page(src, srcoff); src_folio = vfs_dedupe_get_folio(src, srcoff);
if (IS_ERR(src_page)) { if (IS_ERR(src_folio)) {
error = PTR_ERR(src_page); error = PTR_ERR(src_folio);
goto out_error; goto out_error;
} }
dest_page = vfs_dedupe_get_page(dest, destoff); dst_folio = vfs_dedupe_get_folio(dest, dstoff);
if (IS_ERR(dest_page)) { if (IS_ERR(dst_folio)) {
error = PTR_ERR(dest_page); error = PTR_ERR(dst_folio);
put_page(src_page); folio_put(src_folio);
goto out_error; goto out_error;
} }
vfs_lock_two_pages(src_page, dest_page); vfs_lock_two_folios(src_folio, dst_folio);
/* /*
* Now that we've locked both pages, make sure they're still * Now that we've locked both folios, make sure they're still
* mapped to the file data we're interested in. If not, * mapped to the file data we're interested in. If not,
* someone is invalidating pages on us and we lose. * someone is invalidating pages on us and we lose.
*/ */
if (!PageUptodate(src_page) || !PageUptodate(dest_page) || if (!folio_test_uptodate(src_folio) || !folio_test_uptodate(dst_folio) ||
src_page->mapping != src->i_mapping || src_folio->mapping != src->i_mapping ||
dest_page->mapping != dest->i_mapping) { dst_folio->mapping != dest->i_mapping) {
same = false; same = false;
goto unlock; goto unlock;
} }
src_addr = kmap_atomic(src_page); src_addr = kmap_local_folio(src_folio,
dest_addr = kmap_atomic(dest_page); offset_in_folio(src_folio, srcoff));
dst_addr = kmap_local_folio(dst_folio,
offset_in_folio(dst_folio, dstoff));
flush_dcache_page(src_page); flush_dcache_folio(src_folio);
flush_dcache_page(dest_page); flush_dcache_folio(dst_folio);
if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len)) if (memcmp(src_addr, dst_addr, cmp_len))
same = false; same = false;
kunmap_atomic(dest_addr); kunmap_local(dst_addr);
kunmap_atomic(src_addr); kunmap_local(src_addr);
unlock: unlock:
vfs_unlock_two_pages(src_page, dest_page); vfs_unlock_two_folios(src_folio, dst_folio);
put_page(dest_page); folio_put(dst_folio);
put_page(src_page); folio_put(src_folio);
if (!same) if (!same)
break; break;
srcoff += cmp_len; srcoff += cmp_len;
destoff += cmp_len; dstoff += cmp_len;
len -= cmp_len; len -= cmp_len;
} }