netfs: Provide a launder_folio implementation
Provide a launder_folio implementation for netfslib. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@kernel.org> cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org
This commit is contained in:
parent
62c3b7481b
commit
4a79616cfb
|
@ -1111,3 +1111,77 @@ out:
|
|||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_writepages);
|
||||
|
||||
/*
|
||||
* Deal with the disposition of a laundered folio.
|
||||
*/
|
||||
static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq)
|
||||
{
|
||||
if (wreq->error) {
|
||||
pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error);
|
||||
mapping_set_error(wreq->mapping, wreq->error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* netfs_launder_folio - Clean up a dirty folio that's being invalidated
|
||||
* @folio: The folio to clean
|
||||
*
|
||||
* This is called to write back a folio that's being invalidated when an inode
|
||||
* is getting torn down. Ideally, writepages would be used instead.
|
||||
*/
|
||||
int netfs_launder_folio(struct folio *folio)
|
||||
{
|
||||
struct netfs_io_request *wreq;
|
||||
struct address_space *mapping = folio->mapping;
|
||||
struct netfs_folio *finfo = netfs_folio_info(folio);
|
||||
struct netfs_group *group = netfs_folio_group(folio);
|
||||
struct bio_vec bvec;
|
||||
unsigned long long i_size = i_size_read(mapping->host);
|
||||
unsigned long long start = folio_pos(folio);
|
||||
size_t offset = 0, len;
|
||||
int ret = 0;
|
||||
|
||||
if (finfo) {
|
||||
offset = finfo->dirty_offset;
|
||||
start += offset;
|
||||
len = finfo->dirty_len;
|
||||
} else {
|
||||
len = folio_size(folio);
|
||||
}
|
||||
len = min_t(unsigned long long, len, i_size - start);
|
||||
|
||||
wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE);
|
||||
if (IS_ERR(wreq)) {
|
||||
ret = PTR_ERR(wreq);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!folio_clear_dirty_for_io(folio))
|
||||
goto out_put;
|
||||
|
||||
trace_netfs_folio(folio, netfs_folio_trace_launder);
|
||||
|
||||
_debug("launder %llx-%llx", start, start + len - 1);
|
||||
|
||||
/* Speculatively write to the cache. We have to fix this up later if
|
||||
* the store fails.
|
||||
*/
|
||||
wreq->cleanup = netfs_cleanup_launder_folio;
|
||||
|
||||
bvec_set_folio(&bvec, folio, len, offset);
|
||||
iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
|
||||
__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
|
||||
ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
|
||||
|
||||
out_put:
|
||||
folio_detach_private(folio);
|
||||
netfs_put_group(group);
|
||||
kfree(finfo);
|
||||
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
|
||||
out:
|
||||
folio_wait_fscache(folio);
|
||||
_leave(" = %d", ret);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_launder_folio);
|
||||
|
|
|
@ -30,6 +30,7 @@ static const char *netfs_origins[nr__netfs_io_origin] = {
|
|||
[NETFS_READPAGE] = "RP",
|
||||
[NETFS_READ_FOR_WRITE] = "RW",
|
||||
[NETFS_WRITEBACK] = "WB",
|
||||
[NETFS_LAUNDER_WRITE] = "LW",
|
||||
[NETFS_UNBUFFERED_WRITE] = "UW",
|
||||
[NETFS_DIO_READ] = "DR",
|
||||
[NETFS_DIO_WRITE] = "DW",
|
||||
|
|
|
@ -227,6 +227,7 @@ enum netfs_io_origin {
|
|||
NETFS_READPAGE, /* This read is a synchronous read */
|
||||
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
|
||||
NETFS_WRITEBACK, /* This write was triggered by writepages */
|
||||
NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
|
||||
NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
|
||||
NETFS_DIO_READ, /* This is a direct I/O read */
|
||||
NETFS_DIO_WRITE, /* This is a direct I/O write */
|
||||
|
@ -404,6 +405,7 @@ int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
|
|||
void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
|
||||
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
|
||||
bool netfs_release_folio(struct folio *folio, gfp_t gfp);
|
||||
int netfs_launder_folio(struct folio *folio);
|
||||
|
||||
/* VMA operations API. */
|
||||
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#define netfs_write_traces \
|
||||
EM(netfs_write_trace_dio_write, "DIO-WRITE") \
|
||||
EM(netfs_write_trace_launder, "LAUNDER ") \
|
||||
EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
|
||||
E_(netfs_write_trace_writeback, "WRITEBACK")
|
||||
|
||||
|
@ -33,6 +34,7 @@
|
|||
EM(NETFS_READPAGE, "RP") \
|
||||
EM(NETFS_READ_FOR_WRITE, "RW") \
|
||||
EM(NETFS_WRITEBACK, "WB") \
|
||||
EM(NETFS_LAUNDER_WRITE, "LW") \
|
||||
EM(NETFS_UNBUFFERED_WRITE, "UW") \
|
||||
EM(NETFS_DIO_READ, "DR") \
|
||||
E_(NETFS_DIO_WRITE, "DW")
|
||||
|
@ -127,6 +129,7 @@
|
|||
EM(netfs_folio_trace_end_copy, "end-copy") \
|
||||
EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
|
||||
EM(netfs_folio_trace_kill, "kill") \
|
||||
EM(netfs_folio_trace_launder, "launder") \
|
||||
EM(netfs_folio_trace_mkwrite, "mkwrite") \
|
||||
EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
|
||||
EM(netfs_folio_trace_read_gaps, "read-gaps") \
|
||||
|
|
Loading…
Reference in New Issue