From 03ffae909278bd773ae4ce0f15fd8fd77a7b08a4 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 10 Feb 2020 10:00:22 +0000 Subject: [PATCH 01/14] afs: Disable use of the fscache I/O routines Disable use of the fscache I/O routined by the AFS filesystem. It's about to transition to passing iov_iters down and fscache is about to have its I/O path to use iov_iter, so all that needs to change. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/158861209824.340223.1864211542341758994.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/159465768717.1376105.2229314852486665807.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/160588457929.3465195.1730097418904945578.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118143744.1232039.2727898205333669064.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161039077.2537118.7986870854927176905.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340403323.1303470.8159439948319423431.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539547167.286939.3536238932531122332.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653802797.2770958.547311814861545911.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789085806.6155.2596146255056027428.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/file.c | 195 +++++++++---------------------------------------- fs/afs/inode.c | 2 +- fs/afs/write.c | 10 --- 3 files changed, 34 insertions(+), 173 deletions(-) diff --git a/fs/afs/file.c b/fs/afs/file.c index 960b64268623..314f6a9517c7 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -202,24 +202,6 @@ void afs_put_read(struct afs_read *req) } } -#ifdef CONFIG_AFS_FSCACHE -/* - * deal with notification that a page was read from the cache - */ -static void afs_file_readpage_read_complete(struct page *page, - void *data, - int error) -{ - _enter("%p,%p,%d", page, data, error); - - /* if the read completes with an error, we just unlock the page and let - * the VM reissue the readpage */ - if (!error) - SetPageUptodate(page); - unlock_page(page); -} -#endif - static void afs_fetch_data_success(struct afs_operation *op) { struct afs_vnode *vnode = op->file[0].vnode; @@ -287,89 +269,46 @@ int afs_page_filler(void *data, struct page *page) if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) goto error; - /* is it cached? */ -#ifdef CONFIG_AFS_FSCACHE - ret = fscache_read_or_alloc_page(vnode->cache, - page, - afs_file_readpage_read_complete, - NULL, - GFP_KERNEL); -#else - ret = -ENOBUFS; -#endif - switch (ret) { - /* read BIO submitted (page in cache) */ - case 0: - break; + req = kzalloc(struct_size(req, array, 1), GFP_KERNEL); + if (!req) + goto enomem; - /* page not yet cached */ - case -ENODATA: - _debug("cache said ENODATA"); - goto go_on; + /* We request a full page. If the page is a partial one at the + * end of the file, the server will return a short read and the + * unmarshalling code will clear the unfilled space. + */ + refcount_set(&req->usage, 1); + req->pos = (loff_t)page->index << PAGE_SHIFT; + req->len = PAGE_SIZE; + req->nr_pages = 1; + req->pages = req->array; + req->pages[0] = page; + get_page(page); - /* page will not be cached */ - case -ENOBUFS: - _debug("cache said ENOBUFS"); + /* read the contents of the file from the server into the + * page */ + ret = afs_fetch_data(vnode, key, req); + afs_put_read(req); - fallthrough; - default: - go_on: - req = kzalloc(struct_size(req, array, 1), GFP_KERNEL); - if (!req) - goto enomem; - - /* We request a full page. If the page is a partial one at the - * end of the file, the server will return a short read and the - * unmarshalling code will clear the unfilled space. - */ - refcount_set(&req->usage, 1); - req->pos = (loff_t)page->index << PAGE_SHIFT; - req->len = PAGE_SIZE; - req->nr_pages = 1; - req->pages = req->array; - req->pages[0] = page; - get_page(page); - - /* read the contents of the file from the server into the - * page */ - ret = afs_fetch_data(vnode, key, req); - afs_put_read(req); - - if (ret < 0) { - if (ret == -ENOENT) { - _debug("got NOENT from server" - " - marking file deleted and stale"); - set_bit(AFS_VNODE_DELETED, &vnode->flags); - ret = -ESTALE; - } - -#ifdef CONFIG_AFS_FSCACHE - fscache_uncache_page(vnode->cache, page); -#endif - BUG_ON(PageFsCache(page)); - - if (ret == -EINTR || - ret == -ENOMEM || - ret == -ERESTARTSYS || - ret == -EAGAIN) - goto error; - goto io_error; + if (ret < 0) { + if (ret == -ENOENT) { + _debug("got NOENT from server" + " - marking file deleted and stale"); + set_bit(AFS_VNODE_DELETED, &vnode->flags); + ret = -ESTALE; } - SetPageUptodate(page); - - /* send the page to the cache */ -#ifdef CONFIG_AFS_FSCACHE - if (PageFsCache(page) && - fscache_write_page(vnode->cache, page, vnode->status.size, - GFP_KERNEL) != 0) { - fscache_uncache_page(vnode->cache, page); - BUG_ON(PageFsCache(page)); - } -#endif - unlock_page(page); + if (ret == -EINTR || + ret == -ENOMEM || + ret == -ERESTARTSYS || + ret == -EAGAIN) + goto error; + goto io_error; } + SetPageUptodate(page); + unlock_page(page); + _leave(" = 0"); return 0; @@ -415,23 +354,10 @@ static int afs_readpage(struct file *file, struct page *page) */ static void afs_readpages_page_done(struct afs_read *req) { -#ifdef CONFIG_AFS_FSCACHE - struct afs_vnode *vnode = req->vnode; -#endif struct page *page = req->pages[req->index]; req->pages[req->index] = NULL; SetPageUptodate(page); - - /* send the page to the cache */ -#ifdef CONFIG_AFS_FSCACHE - if (PageFsCache(page) && - fscache_write_page(vnode->cache, page, vnode->status.size, - GFP_KERNEL) != 0) { - fscache_uncache_page(vnode->cache, page); - BUG_ON(PageFsCache(page)); - } -#endif unlock_page(page); put_page(page); } @@ -490,9 +416,6 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, index = page->index; if (add_to_page_cache_lru(page, mapping, index, readahead_gfp_mask(mapping))) { -#ifdef CONFIG_AFS_FSCACHE - fscache_uncache_page(vnode->cache, page); -#endif put_page(page); break; } @@ -525,9 +448,6 @@ error: for (i = 0; i < req->nr_pages; i++) { page = req->pages[i]; if (page) { -#ifdef CONFIG_AFS_FSCACHE - fscache_uncache_page(vnode->cache, page); -#endif SetPageError(page); unlock_page(page); } @@ -559,37 +479,6 @@ static int afs_readpages(struct file *file, struct address_space *mapping, } /* attempt to read as many of the pages as possible */ -#ifdef CONFIG_AFS_FSCACHE - ret = fscache_read_or_alloc_pages(vnode->cache, - mapping, - pages, - &nr_pages, - afs_file_readpage_read_complete, - NULL, - mapping_gfp_mask(mapping)); -#else - ret = -ENOBUFS; -#endif - - switch (ret) { - /* all pages are being read from the cache */ - case 0: - BUG_ON(!list_empty(pages)); - BUG_ON(nr_pages != 0); - _leave(" = 0 [reading all]"); - return 0; - - /* there were pages that couldn't be read from the cache */ - case -ENODATA: - case -ENOBUFS: - break; - - /* other error */ - default: - _leave(" = %d", ret); - return ret; - } - while (!list_empty(pages)) { ret = afs_readpages_one(file, mapping, pages); if (ret < 0) @@ -669,17 +558,6 @@ static void afs_invalidatepage(struct page *page, unsigned int offset, BUG_ON(!PageLocked(page)); -#ifdef CONFIG_AFS_FSCACHE - /* we clean up only if the entire page is being invalidated */ - if (offset == 0 && length == PAGE_SIZE) { - if (PageFsCache(page)) { - struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); - fscache_wait_on_page_write(vnode->cache, page); - fscache_uncache_page(vnode->cache, page); - } - } -#endif - if (PagePrivate(page)) afs_invalidate_dirty(page, offset, length); @@ -701,13 +579,6 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags) /* deny if page is being written to the cache and the caller hasn't * elected to wait */ -#ifdef CONFIG_AFS_FSCACHE - if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) { - _leave(" = F [cache busy]"); - return 0; - } -#endif - if (PagePrivate(page)) { priv = (unsigned long)detach_page_private(page); trace_afs_page_dirty(vnode, tracepoint_string("rel"), diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 12be88716e4c..8de6f05987b4 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -427,7 +427,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode) } __packed key; struct afs_vnode_cache_aux aux; - if (vnode->status.type == AFS_FTYPE_DIR) { + if (vnode->status.type != AFS_FTYPE_FILE) { vnode->cache = NULL; return; } diff --git a/fs/afs/write.c b/fs/afs/write.c index eb737ed63afb..901bd2ee2dd0 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -847,9 +847,6 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) /* Wait for the page to be written to the cache before we allow it to * be modified. We then assume the entire page will need writing back. */ -#ifdef CONFIG_AFS_FSCACHE - fscache_wait_on_page_write(vnode->cache, vmf->page); -#endif if (wait_on_page_writeback_killable(vmf->page)) return VM_FAULT_RETRY; @@ -935,12 +932,5 @@ int afs_launder_page(struct page *page) priv = (unsigned long)detach_page_private(page); trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page->index, priv); - -#ifdef CONFIG_AFS_FSCACHE - if (PageFsCache(page)) { - fscache_wait_on_page_write(vnode->cache, page); - fscache_uncache_page(vnode->cache, page); - } -#endif return ret; } From 67d78a6f6e7b38c1beb7d8c09c6d40f8682e60b1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 28 Oct 2020 14:23:46 +0000 Subject: [PATCH 02/14] afs: Pass page into dirty region helpers to provide THP size Pass a pointer to the page being accessed into the dirty region helpers so that the size of the page can be determined in case it's a transparent huge page. This also required the page to be passed into the afs_page_dirty trace point - so there's no need to specifically pass in the index or private data as these can be retrieved directly from the page struct. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/160588527183.3465195.16107942526481976308.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118144921.1232039.11377711180492625929.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161040747.2537118.11435394902674511430.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340404553.1303470.11414163641767769882.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539548385.286939.8864598314493255313.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653804285.2770958.3497360004849598038.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789087043.6155.16922142208140170528.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/file.c | 20 ++++++------- fs/afs/internal.h | 16 +++++----- fs/afs/write.c | 60 ++++++++++++++++---------------------- include/trace/events/afs.h | 23 ++++++++------- 4 files changed, 55 insertions(+), 64 deletions(-) diff --git a/fs/afs/file.c b/fs/afs/file.c index 314f6a9517c7..f1bae0b0a9c0 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -514,8 +514,8 @@ static void afs_invalidate_dirty(struct page *page, unsigned int offset, return; /* We may need to shorten the dirty region */ - f = afs_page_dirty_from(priv); - t = afs_page_dirty_to(priv); + f = afs_page_dirty_from(page, priv); + t = afs_page_dirty_to(page, priv); if (t <= offset || f >= end) return; /* Doesn't overlap */ @@ -533,17 +533,17 @@ static void afs_invalidate_dirty(struct page *page, unsigned int offset, if (f == t) goto undirty; - priv = afs_page_dirty(f, t); + priv = afs_page_dirty(page, f, t); set_page_private(page, priv); - trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page->index, priv); + trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page); return; undirty: - trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page->index, priv); + trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page); clear_page_dirty_for_io(page); full_invalidate: - priv = (unsigned long)detach_page_private(page); - trace_afs_page_dirty(vnode, tracepoint_string("inval"), page->index, priv); + detach_page_private(page); + trace_afs_page_dirty(vnode, tracepoint_string("inval"), page); } /* @@ -571,7 +571,6 @@ static void afs_invalidatepage(struct page *page, unsigned int offset, static int afs_releasepage(struct page *page, gfp_t gfp_flags) { struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); - unsigned long priv; _enter("{{%llx:%llu}[%lu],%lx},%x", vnode->fid.vid, vnode->fid.vnode, page->index, page->flags, @@ -580,9 +579,8 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags) /* deny if page is being written to the cache and the caller hasn't * elected to wait */ if (PagePrivate(page)) { - priv = (unsigned long)detach_page_private(page); - trace_afs_page_dirty(vnode, tracepoint_string("rel"), - page->index, priv); + detach_page_private(page); + trace_afs_page_dirty(vnode, tracepoint_string("rel"), page); } /* indicate that the page can be released */ diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 1627b1872812..fd437d4722b5 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -875,31 +875,31 @@ struct afs_vnode_cache_aux { #define __AFS_PAGE_PRIV_MMAPPED 0x8000UL #endif -static inline unsigned int afs_page_dirty_resolution(void) +static inline unsigned int afs_page_dirty_resolution(struct page *page) { - int shift = PAGE_SHIFT - (__AFS_PAGE_PRIV_SHIFT - 1); + int shift = thp_order(page) + PAGE_SHIFT - (__AFS_PAGE_PRIV_SHIFT - 1); return (shift > 0) ? shift : 0; } -static inline size_t afs_page_dirty_from(unsigned long priv) +static inline size_t afs_page_dirty_from(struct page *page, unsigned long priv) { unsigned long x = priv & __AFS_PAGE_PRIV_MASK; /* The lower bound is inclusive */ - return x << afs_page_dirty_resolution(); + return x << afs_page_dirty_resolution(page); } -static inline size_t afs_page_dirty_to(unsigned long priv) +static inline size_t afs_page_dirty_to(struct page *page, unsigned long priv) { unsigned long x = (priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK; /* The upper bound is immediately beyond the region */ - return (x + 1) << afs_page_dirty_resolution(); + return (x + 1) << afs_page_dirty_resolution(page); } -static inline unsigned long afs_page_dirty(size_t from, size_t to) +static inline unsigned long afs_page_dirty(struct page *page, size_t from, size_t to) { - unsigned int res = afs_page_dirty_resolution(); + unsigned int res = afs_page_dirty_resolution(page); from >>= res; to = (to - 1) >> res; return (to << __AFS_PAGE_PRIV_SHIFT) | from; diff --git a/fs/afs/write.c b/fs/afs/write.c index 901bd2ee2dd0..babc84dd9719 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -112,15 +112,14 @@ try_again: t = f = 0; if (PagePrivate(page)) { priv = page_private(page); - f = afs_page_dirty_from(priv); - t = afs_page_dirty_to(priv); + f = afs_page_dirty_from(page, priv); + t = afs_page_dirty_to(page, priv); ASSERTCMP(f, <=, t); } if (f != t) { if (PageWriteback(page)) { - trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), - page->index, priv); + trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page); goto flush_conflicting_write; } /* If the file is being filled locally, allow inter-write @@ -204,21 +203,19 @@ int afs_write_end(struct file *file, struct address_space *mapping, if (PagePrivate(page)) { priv = page_private(page); - f = afs_page_dirty_from(priv); - t = afs_page_dirty_to(priv); + f = afs_page_dirty_from(page, priv); + t = afs_page_dirty_to(page, priv); if (from < f) f = from; if (to > t) t = to; - priv = afs_page_dirty(f, t); + priv = afs_page_dirty(page, f, t); set_page_private(page, priv); - trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), - page->index, priv); + trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page); } else { - priv = afs_page_dirty(from, to); + priv = afs_page_dirty(page, from, to); attach_page_private(page, (void *)priv); - trace_afs_page_dirty(vnode, tracepoint_string("dirty"), - page->index, priv); + trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page); } set_page_dirty(page); @@ -321,7 +318,6 @@ static void afs_pages_written_back(struct afs_vnode *vnode, pgoff_t first, pgoff_t last) { struct pagevec pv; - unsigned long priv; unsigned count, loop; _enter("{%llx:%llu},{%lx-%lx}", @@ -340,9 +336,9 @@ static void afs_pages_written_back(struct afs_vnode *vnode, ASSERTCMP(pv.nr, ==, count); for (loop = 0; loop < count; loop++) { - priv = (unsigned long)detach_page_private(pv.pages[loop]); + detach_page_private(pv.pages[loop]); trace_afs_page_dirty(vnode, tracepoint_string("clear"), - pv.pages[loop]->index, priv); + pv.pages[loop]); end_page_writeback(pv.pages[loop]); } first += count; @@ -516,15 +512,13 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, */ start = primary_page->index; priv = page_private(primary_page); - offset = afs_page_dirty_from(priv); - to = afs_page_dirty_to(priv); - trace_afs_page_dirty(vnode, tracepoint_string("store"), - primary_page->index, priv); + offset = afs_page_dirty_from(primary_page, priv); + to = afs_page_dirty_to(primary_page, priv); + trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page); WARN_ON(offset == to); if (offset == to) - trace_afs_page_dirty(vnode, tracepoint_string("WARN"), - primary_page->index, priv); + trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page); if (start >= final_page || (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))) @@ -562,8 +556,8 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, } priv = page_private(page); - f = afs_page_dirty_from(priv); - t = afs_page_dirty_to(priv); + f = afs_page_dirty_from(page, priv); + t = afs_page_dirty_to(page, priv); if (f != 0 && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) { unlock_page(page); @@ -571,8 +565,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, } to = t; - trace_afs_page_dirty(vnode, tracepoint_string("store+"), - page->index, priv); + trace_afs_page_dirty(vnode, tracepoint_string("store+"), page); if (!clear_page_dirty_for_io(page)) BUG(); @@ -860,14 +853,13 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) */ wait_on_page_writeback(vmf->page); - priv = afs_page_dirty(0, PAGE_SIZE); + priv = afs_page_dirty(vmf->page, 0, PAGE_SIZE); priv = afs_page_dirty_mmapped(priv); - trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), - vmf->page->index, priv); if (PagePrivate(vmf->page)) set_page_private(vmf->page, priv); else attach_page_private(vmf->page, (void *)priv); + trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), vmf->page); file_update_time(file); sb_end_pagefault(inode->i_sb); @@ -920,17 +912,15 @@ int afs_launder_page(struct page *page) f = 0; t = PAGE_SIZE; if (PagePrivate(page)) { - f = afs_page_dirty_from(priv); - t = afs_page_dirty_to(priv); + f = afs_page_dirty_from(page, priv); + t = afs_page_dirty_to(page, priv); } - trace_afs_page_dirty(vnode, tracepoint_string("launder"), - page->index, priv); + trace_afs_page_dirty(vnode, tracepoint_string("launder"), page); ret = afs_store_data(mapping, page->index, page->index, t, f, true); } - priv = (unsigned long)detach_page_private(page); - trace_afs_page_dirty(vnode, tracepoint_string("laundered"), - page->index, priv); + detach_page_private(page); + trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page); return ret; } diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 4a5cc8c64be3..9203cf6a8c53 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -969,30 +969,33 @@ TRACE_EVENT(afs_dir_check_failed, ); TRACE_EVENT(afs_page_dirty, - TP_PROTO(struct afs_vnode *vnode, const char *where, - pgoff_t page, unsigned long priv), + TP_PROTO(struct afs_vnode *vnode, const char *where, struct page *page), - TP_ARGS(vnode, where, page, priv), + TP_ARGS(vnode, where, page), TP_STRUCT__entry( __field(struct afs_vnode *, vnode ) __field(const char *, where ) __field(pgoff_t, page ) - __field(unsigned long, priv ) + __field(unsigned long, from ) + __field(unsigned long, to ) ), TP_fast_assign( __entry->vnode = vnode; __entry->where = where; - __entry->page = page; - __entry->priv = priv; + __entry->page = page->index; + __entry->from = afs_page_dirty_from(page, page->private); + __entry->to = afs_page_dirty_to(page, page->private); + __entry->to |= (afs_is_page_dirty_mmapped(page->private) ? + (1UL << (BITS_PER_LONG - 1)) : 0); ), - TP_printk("vn=%p %lx %s %zx-%zx%s", + TP_printk("vn=%p %lx %s %lx-%lx%s", __entry->vnode, __entry->page, __entry->where, - afs_page_dirty_from(__entry->priv), - afs_page_dirty_to(__entry->priv), - afs_is_page_dirty_mmapped(__entry->priv) ? " M" : "") + __entry->from, + __entry->to & ~(1UL << (BITS_PER_LONG - 1)), + __entry->to & (1UL << (BITS_PER_LONG - 1)) ? " M" : "") ); TRACE_EVENT(afs_call_state, From f015cf1d6b660fc5933baecab2917357e669916b Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 22 Oct 2020 14:38:15 +0100 Subject: [PATCH 03/14] afs: Print the operation debug_id when logging an unexpected data version Print the afs_operation debug_id when logging an unexpected change in the data version. This allows the logged message to be matched against tracelines. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/160588528377.3465195.2206051235095182302.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118146111.1232039.11398082422487058312.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161042180.2537118.2471333561661033316.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340405772.1303470.3877167548944248214.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539549628.286939.15234870409714613954.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653805530.2770958.15120507632529970934.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789088290.6155.3494369629853673866.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/inode.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 8de6f05987b4..a4bb3ac762be 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -214,11 +214,12 @@ static void afs_apply_status(struct afs_operation *op, if (vp->dv_before + vp->dv_delta != status->data_version) { if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) - pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s\n", + pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s (op=%x)\n", vnode->fid.vid, vnode->fid.vnode, (unsigned long long)vp->dv_before + vp->dv_delta, (unsigned long long)status->data_version, - op->type ? op->type->name : "???"); + op->type ? op->type->name : "???", + op->debug_id); vnode->invalid_before = status->data_version; if (vnode->status.type == AFS_FTYPE_DIR) { From c69bf479baa614f5e80a1ded355e752e15a52b72 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 6 Feb 2020 14:22:27 +0000 Subject: [PATCH 04/14] afs: Move key to afs_read struct Stash the key used to authenticate read operations in the afs_read struct. This will be necessary to reissue the operation against the server if a read from the cache fails in upcoming cache changes. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/158861248336.340223.1851189950710196001.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/159465823899.1377938.11925978022348532049.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/160588529557.3465195.7303323479305254243.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118147693.1232039.13780672951838643842.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161043340.2537118.511899217704140722.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340406678.1303470.12676824086429446370.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539550819.286939.1268332875889175195.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653806683.2770958.11300984379283401542.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789089556.6155.14603302893431820997.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/dir.c | 3 ++- fs/afs/file.c | 16 +++++++++------- fs/afs/internal.h | 3 ++- fs/afs/write.c | 12 ++++++------ 4 files changed, 19 insertions(+), 15 deletions(-) diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 17548c1faf02..d8825ce63eba 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -241,6 +241,7 @@ retry: return ERR_PTR(-ENOMEM); refcount_set(&req->usage, 1); + req->key = key_get(key); req->nr_pages = nr_pages; req->actual_len = i_size; /* May change */ req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */ @@ -305,7 +306,7 @@ retry: if (!test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { trace_afs_reload_dir(dvnode); - ret = afs_fetch_data(dvnode, key, req); + ret = afs_fetch_data(dvnode, req); if (ret < 0) goto error_unlock; diff --git a/fs/afs/file.c b/fs/afs/file.c index f1bae0b0a9c0..af6471defec3 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -198,6 +198,7 @@ void afs_put_read(struct afs_read *req) if (req->pages != req->array) kfree(req->pages); } + key_put(req->key); kfree(req); } } @@ -228,7 +229,7 @@ static const struct afs_operation_ops afs_fetch_data_operation = { /* * Fetch file data from the volume. */ -int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *req) +int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) { struct afs_operation *op; @@ -237,9 +238,9 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *re vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, - key_serial(key)); + key_serial(req->key)); - op = afs_alloc_operation(key, vnode->volume); + op = afs_alloc_operation(req->key, vnode->volume); if (IS_ERR(op)) return PTR_ERR(op); @@ -278,6 +279,7 @@ int afs_page_filler(void *data, struct page *page) * unmarshalling code will clear the unfilled space. */ refcount_set(&req->usage, 1); + req->key = key_get(key); req->pos = (loff_t)page->index << PAGE_SHIFT; req->len = PAGE_SIZE; req->nr_pages = 1; @@ -287,7 +289,7 @@ int afs_page_filler(void *data, struct page *page) /* read the contents of the file from the server into the * page */ - ret = afs_fetch_data(vnode, key, req); + ret = afs_fetch_data(vnode, req); afs_put_read(req); if (ret < 0) { @@ -372,7 +374,6 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, struct afs_read *req; struct list_head *p; struct page *first, *page; - struct key *key = afs_file_key(file); pgoff_t index; int ret, n, i; @@ -396,6 +397,7 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, refcount_set(&req->usage, 1); req->vnode = vnode; + req->key = key_get(afs_file_key(file)); req->page_done = afs_readpages_page_done; req->pos = first->index; req->pos <<= PAGE_SHIFT; @@ -425,11 +427,11 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, } while (req->nr_pages < n); if (req->nr_pages == 0) { - kfree(req); + afs_put_read(req); return 0; } - ret = afs_fetch_data(vnode, key, req); + ret = afs_fetch_data(vnode, req); if (ret < 0) goto error; diff --git a/fs/afs/internal.h b/fs/afs/internal.h index fd437d4722b5..995fef267be7 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -204,6 +204,7 @@ struct afs_read { loff_t actual_len; /* How much we're actually getting */ loff_t remain; /* Amount remaining */ loff_t file_size; /* File size returned by server */ + struct key *key; /* The key to use to reissue the read */ afs_dataversion_t data_version; /* Version number returned by server */ refcount_t usage; unsigned int index; /* Which page we're reading into */ @@ -1045,7 +1046,7 @@ extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *); extern void afs_put_wb_key(struct afs_wb_key *); extern int afs_open(struct inode *, struct file *); extern int afs_release(struct inode *, struct file *); -extern int afs_fetch_data(struct afs_vnode *, struct key *, struct afs_read *); +extern int afs_fetch_data(struct afs_vnode *, struct afs_read *); extern int afs_page_filler(void *, struct page *); extern void afs_put_read(struct afs_read *); diff --git a/fs/afs/write.c b/fs/afs/write.c index babc84dd9719..a91da2e680da 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -25,9 +25,10 @@ int afs_set_page_dirty(struct page *page) /* * partly or wholly fill a page that's under preparation for writing */ -static int afs_fill_page(struct afs_vnode *vnode, struct key *key, +static int afs_fill_page(struct file *file, loff_t pos, unsigned int len, struct page *page) { + struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); struct afs_read *req; size_t p; void *data; @@ -49,6 +50,7 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, return -ENOMEM; refcount_set(&req->usage, 1); + req->key = key_get(afs_file_key(file)); req->pos = pos; req->len = len; req->nr_pages = 1; @@ -56,7 +58,7 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, req->pages[0] = page; get_page(page); - ret = afs_fetch_data(vnode, key, req); + ret = afs_fetch_data(vnode, req); afs_put_read(req); if (ret < 0) { if (ret == -ENOENT) { @@ -80,7 +82,6 @@ int afs_write_begin(struct file *file, struct address_space *mapping, { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); struct page *page; - struct key *key = afs_file_key(file); unsigned long priv; unsigned f, from = pos & (PAGE_SIZE - 1); unsigned t, to = from + len; @@ -95,7 +96,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, return -ENOMEM; if (!PageUptodate(page) && len != PAGE_SIZE) { - ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page); + ret = afs_fill_page(file, pos & PAGE_MASK, PAGE_SIZE, page); if (ret < 0) { unlock_page(page); put_page(page); @@ -163,7 +164,6 @@ int afs_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); - struct key *key = afs_file_key(file); unsigned long priv; unsigned int f, from = pos & (PAGE_SIZE - 1); unsigned int t, to = from + copied; @@ -193,7 +193,7 @@ int afs_write_end(struct file *file, struct address_space *mapping, * unmarshalling routine will take care of clearing any * bits that are beyond the EOF. */ - ret = afs_fill_page(vnode, key, pos + copied, + ret = afs_fill_page(file, pos + copied, len - copied, page); if (ret < 0) goto out; From f105da1a798f23f386ac5c4c2d776d57088bec32 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 6 Feb 2020 14:22:28 +0000 Subject: [PATCH 05/14] afs: Don't truncate iter during data fetch Don't truncate the iterator to correspond to the actual data size when fetching the data from the server - rather, pass the length we want to read to rxrpc. This will allow the clear-after-read code in future to simply clear the remaining iterator capacity rather than having to reinitialise the iterator. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/158861249201.340223.13035445866976590375.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/159465825061.1377938.14403904452300909320.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/160588531418.3465195.10712005940763063144.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118148567.1232039.13380313332292947956.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161044610.2537118.17908520793806837792.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340407907.1303470.6501394859511712746.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539551721.286939.14655713136572200716.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653807790.2770958.14034599989374173734.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789090823.6155.15673999934535049102.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/fsclient.c | 6 ++++-- fs/afs/internal.h | 6 ++++++ fs/afs/rxrpc.c | 13 +++++++++---- fs/afs/yfsclient.c | 6 ++++-- include/net/af_rxrpc.h | 2 +- net/rxrpc/recvmsg.c | 9 +++++---- 6 files changed, 29 insertions(+), 13 deletions(-) diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 1d95ed9dd86e..4a57c6c6f12b 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -305,8 +305,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) unsigned int size; int ret; - _enter("{%u,%zu/%llu}", - call->unmarshall, iov_iter_count(call->iter), req->actual_len); + _enter("{%u,%zu,%zu/%llu}", + call->unmarshall, call->iov_len, iov_iter_count(call->iter), + req->actual_len); switch (call->unmarshall) { case 0: @@ -343,6 +344,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) size = PAGE_SIZE - req->offset; else size = req->remain; + call->iov_len = size; call->bvec[0].bv_len = size; call->bvec[0].bv_offset = req->offset; call->bvec[0].bv_page = req->pages[req->index]; diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 995fef267be7..7b8306d8e81e 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -104,6 +104,7 @@ struct afs_call { struct afs_server *server; /* The fileserver record if fs op (pins ref) */ struct afs_vlserver *vlserver; /* The vlserver record if vl op */ void *request; /* request data (first part) */ + size_t iov_len; /* Size of *iter to be used */ struct iov_iter def_iter; /* Default buffer/data iterator */ struct iov_iter *iter; /* Iterator currently in use */ union { /* Convenience for ->def_iter */ @@ -1271,6 +1272,7 @@ static inline void afs_make_op_call(struct afs_operation *op, struct afs_call *c static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t size) { + call->iov_len = size; call->kvec[0].iov_base = buf; call->kvec[0].iov_len = size; iov_iter_kvec(&call->def_iter, READ, call->kvec, 1, size); @@ -1278,21 +1280,25 @@ static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t si static inline void afs_extract_to_tmp(struct afs_call *call) { + call->iov_len = sizeof(call->tmp); afs_extract_begin(call, &call->tmp, sizeof(call->tmp)); } static inline void afs_extract_to_tmp64(struct afs_call *call) { + call->iov_len = sizeof(call->tmp64); afs_extract_begin(call, &call->tmp64, sizeof(call->tmp64)); } static inline void afs_extract_discard(struct afs_call *call, size_t size) { + call->iov_len = size; iov_iter_discard(&call->def_iter, READ, size); } static inline void afs_extract_to_buf(struct afs_call *call, size_t size) { + call->iov_len = size; afs_extract_begin(call, call->buffer, size); } diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 8be709cb8542..0ec38b758f29 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -363,6 +363,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) struct rxrpc_call *rxcall; struct msghdr msg; struct kvec iov[1]; + size_t len; s64 tx_total_len; int ret; @@ -466,9 +467,10 @@ error_do_abort: rxrpc_kernel_abort_call(call->net->socket, rxcall, RX_USER_ABORT, ret, "KSD"); } else { + len = 0; iov_iter_kvec(&msg.msg_iter, READ, NULL, 0, 0); rxrpc_kernel_recv_data(call->net->socket, rxcall, - &msg.msg_iter, false, + &msg.msg_iter, &len, false, &call->abort_code, &call->service_id); ac->abort_code = call->abort_code; ac->responded = true; @@ -504,6 +506,7 @@ error_kill_call: static void afs_deliver_to_call(struct afs_call *call) { enum afs_call_state state; + size_t len; u32 abort_code, remote_abort = 0; int ret; @@ -516,10 +519,11 @@ static void afs_deliver_to_call(struct afs_call *call) state == AFS_CALL_SV_AWAIT_ACK ) { if (state == AFS_CALL_SV_AWAIT_ACK) { + len = 0; iov_iter_kvec(&call->def_iter, READ, NULL, 0, 0); ret = rxrpc_kernel_recv_data(call->net->socket, call->rxcall, &call->def_iter, - false, &remote_abort, + &len, false, &remote_abort, &call->service_id); trace_afs_receive_data(call, &call->def_iter, false, ret); @@ -929,10 +933,11 @@ int afs_extract_data(struct afs_call *call, bool want_more) u32 remote_abort = 0; int ret; - _enter("{%s,%zu},%d", call->type->name, iov_iter_count(iter), want_more); + _enter("{%s,%zu,%zu},%d", + call->type->name, call->iov_len, iov_iter_count(iter), want_more); ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter, - want_more, &remote_abort, + &call->iov_len, want_more, &remote_abort, &call->service_id); if (ret == 0 || ret == -EAGAIN) return ret; diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index bd787e71a657..6c45d32da13c 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -363,8 +363,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) unsigned int size; int ret; - _enter("{%u,%zu/%llu}", - call->unmarshall, iov_iter_count(call->iter), req->actual_len); + _enter("{%u,%zu, %zu/%llu}", + call->unmarshall, call->iov_len, iov_iter_count(call->iter), + req->actual_len); switch (call->unmarshall) { case 0: @@ -396,6 +397,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) size = PAGE_SIZE - req->offset; else size = req->remain; + call->iov_len = size; call->bvec[0].bv_len = size; call->bvec[0].bv_offset = req->offset; call->bvec[0].bv_page = req->pages[req->index]; diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h index f6abcc0bbd6e..cee5f83c0f11 100644 --- a/include/net/af_rxrpc.h +++ b/include/net/af_rxrpc.h @@ -53,7 +53,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *, struct msghdr *, size_t, rxrpc_notify_end_tx_t); int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *, - struct iov_iter *, bool, u32 *, u16 *); + struct iov_iter *, size_t *, bool, u32 *, u16 *); bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, u32, int, const char *); void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index fef3573fdc8b..eca6dda26c77 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -669,6 +669,7 @@ wait_error: * @sock: The socket that the call exists on * @call: The call to send data through * @iter: The buffer to receive into + * @_len: The amount of data we want to receive (decreased on return) * @want_more: True if more data is expected to be read * @_abort: Where the abort code is stored if -ECONNABORTED is returned * @_service: Where to store the actual service ID (may be upgraded) @@ -684,7 +685,7 @@ wait_error: * *_abort should also be initialised to 0. */ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, - struct iov_iter *iter, + struct iov_iter *iter, size_t *_len, bool want_more, u32 *_abort, u16 *_service) { size_t offset = 0; @@ -692,7 +693,7 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, _enter("{%d,%s},%zu,%d", call->debug_id, rxrpc_call_states[call->state], - iov_iter_count(iter), want_more); + *_len, want_more); ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING); @@ -703,8 +704,8 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call, case RXRPC_CALL_SERVER_RECV_REQUEST: case RXRPC_CALL_SERVER_ACK_REQUEST: ret = rxrpc_recvmsg_data(sock, call, NULL, iter, - iov_iter_count(iter), 0, - &offset); + *_len, 0, &offset); + *_len -= offset; if (ret < 0) goto out; From 05092755aab4b7f5ec7541144c32b0744eb8d136 Mon Sep 17 00:00:00 2001 From: David Howells Date: Sun, 7 Jun 2020 21:50:29 +0100 Subject: [PATCH 06/14] afs: Log remote unmarshalling errors Log unmarshalling errors reported by the peer (ie. it can't parse what we sent it). Limit the maximum number of messages to 3. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/159465826250.1377938.16372395422217583913.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/160588532584.3465195.15618385466614028590.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118149739.1232039.208060911149801695.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161046033.2537118.7779717661044373273.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340409118.1303470.17812607349396199116.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539552964.286939.16503232687974398308.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653808989.2770958.11530765353025697860.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789092349.6155.8581594259882708631.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/rxrpc.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 0ec38b758f29..ae68576f822f 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -500,6 +500,39 @@ error_kill_call: _leave(" = %d", ret); } +/* + * Log remote abort codes that indicate that we have a protocol disagreement + * with the server. + */ +static void afs_log_error(struct afs_call *call, s32 remote_abort) +{ + static int max = 0; + const char *msg; + int m; + + switch (remote_abort) { + case RX_EOF: msg = "unexpected EOF"; break; + case RXGEN_CC_MARSHAL: msg = "client marshalling"; break; + case RXGEN_CC_UNMARSHAL: msg = "client unmarshalling"; break; + case RXGEN_SS_MARSHAL: msg = "server marshalling"; break; + case RXGEN_SS_UNMARSHAL: msg = "server unmarshalling"; break; + case RXGEN_DECODE: msg = "opcode decode"; break; + case RXGEN_SS_XDRFREE: msg = "server XDR cleanup"; break; + case RXGEN_CC_XDRFREE: msg = "client XDR cleanup"; break; + case -32: msg = "insufficient data"; break; + default: + return; + } + + m = max; + if (m < 3) { + max = m + 1; + pr_notice("kAFS: Peer reported %s failure on %s [%pISp]\n", + msg, call->type->name, + &call->alist->addrs[call->addr_ix].transport); + } +} + /* * deliver messages to a call */ @@ -563,6 +596,7 @@ static void afs_deliver_to_call(struct afs_call *call) goto out; case -ECONNABORTED: ASSERTCMP(state, ==, AFS_CALL_COMPLETE); + afs_log_error(call, call->abort_code); goto done; case -ENOTSUPP: abort_code = RXGEN_OPCODE; From c450846461f88b8888d6f5c2a2aa63ab64864978 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 6 Feb 2020 14:22:28 +0000 Subject: [PATCH 07/14] afs: Set up the iov_iter before calling afs_extract_data() afs_extract_data() sets up a temporary iov_iter and passes it to AF_RXRPC each time it is called to describe the remaining buffer to be filled. Instead: (1) Put an iterator in the afs_call struct. (2) Set the iterator for each marshalling stage to load data into the appropriate places. A number of convenience functions are provided to this end (eg. afs_extract_to_buf()). This iterator is then passed to afs_extract_data(). (3) Use the new ITER_XARRAY iterator when reading data to load directly into the inode's pages without needing to create a list of them. This will allow O_DIRECT calls to be supported in future patches. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/152898380012.11616.12094591785228251717.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/153685394431.14766.3178466345696987059.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/153999787395.866.11218209749223643998.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/154033911195.12041.3882700371848894587.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/158861250059.340223.1248231474865140653.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/159465827399.1377938.11181327349704960046.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/160588533776.3465195.3612752083351956948.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118151238.1232039.17015723405750601161.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161047240.2537118.14721975104810564022.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340410333.1303470.16260122230371140878.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539554187.286939.15305559004905459852.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653810525.2770958.4630666029125411789.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789093719.6155.7877160739235087723.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/dir.c | 228 ++++++++++++++++++++++++++++++--------------- fs/afs/file.c | 190 +++++++++++++++++++++---------------- fs/afs/fsclient.c | 54 +++-------- fs/afs/internal.h | 16 ++-- fs/afs/write.c | 27 ++++-- fs/afs/yfsclient.c | 54 +++-------- 6 files changed, 317 insertions(+), 252 deletions(-) diff --git a/fs/afs/dir.c b/fs/afs/dir.c index d8825ce63eba..8c093bfff8b6 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -102,6 +102,35 @@ struct afs_lookup_cookie { struct afs_fid fids[50]; }; +/* + * Drop the refs that we're holding on the pages we were reading into. We've + * got refs on the first nr_pages pages. + */ +static void afs_dir_read_cleanup(struct afs_read *req) +{ + struct address_space *mapping = req->vnode->vfs_inode.i_mapping; + struct page *page; + pgoff_t last = req->nr_pages - 1; + + XA_STATE(xas, &mapping->i_pages, 0); + + if (unlikely(!req->nr_pages)) + return; + + rcu_read_lock(); + xas_for_each(&xas, page, last) { + if (xas_retry(&xas, page)) + continue; + BUG_ON(xa_is_value(page)); + BUG_ON(PageCompound(page)); + ASSERTCMP(page->mapping, ==, mapping); + + put_page(page); + } + + rcu_read_unlock(); +} + /* * check that a directory page is valid */ @@ -127,7 +156,7 @@ static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page, qty /= sizeof(union afs_xdr_dir_block); /* check them */ - dbuf = kmap(page); + dbuf = kmap_atomic(page); for (tmp = 0; tmp < qty; tmp++) { if (dbuf->blocks[tmp].hdr.magic != AFS_DIR_MAGIC) { printk("kAFS: %s(%lx): bad magic %d/%d is %04hx\n", @@ -146,7 +175,7 @@ static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page, ((u8 *)&dbuf->blocks[tmp])[AFS_DIR_BLOCK_SIZE - 1] = 0; } - kunmap(page); + kunmap_atomic(dbuf); checked: afs_stat_v(dvnode, n_read_dir); @@ -157,35 +186,74 @@ error: } /* - * Check the contents of a directory that we've just read. + * Dump the contents of a directory. */ -static bool afs_dir_check_pages(struct afs_vnode *dvnode, struct afs_read *req) +static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req) { struct afs_xdr_dir_page *dbuf; - unsigned int i, j, qty = PAGE_SIZE / sizeof(union afs_xdr_dir_block); + struct address_space *mapping = dvnode->vfs_inode.i_mapping; + struct page *page; + unsigned int i, qty = PAGE_SIZE / sizeof(union afs_xdr_dir_block); + pgoff_t last = req->nr_pages - 1; - for (i = 0; i < req->nr_pages; i++) - if (!afs_dir_check_page(dvnode, req->pages[i], req->actual_len)) - goto bad; - return true; + XA_STATE(xas, &mapping->i_pages, 0); -bad: - pr_warn("DIR %llx:%llx f=%llx l=%llx al=%llx r=%llx\n", + pr_warn("DIR %llx:%llx f=%llx l=%llx al=%llx\n", dvnode->fid.vid, dvnode->fid.vnode, - req->file_size, req->len, req->actual_len, req->remain); - pr_warn("DIR %llx %x %x %x\n", - req->pos, req->index, req->nr_pages, req->offset); + req->file_size, req->len, req->actual_len); + pr_warn("DIR %llx %x %zx %zx\n", + req->pos, req->nr_pages, + req->iter->iov_offset, iov_iter_count(req->iter)); - for (i = 0; i < req->nr_pages; i++) { - dbuf = kmap(req->pages[i]); - for (j = 0; j < qty; j++) { - union afs_xdr_dir_block *block = &dbuf->blocks[j]; + xas_for_each(&xas, page, last) { + if (xas_retry(&xas, page)) + continue; - pr_warn("[%02x] %32phN\n", i * qty + j, block); + BUG_ON(PageCompound(page)); + BUG_ON(page->mapping != mapping); + + dbuf = kmap_atomic(page); + for (i = 0; i < qty; i++) { + union afs_xdr_dir_block *block = &dbuf->blocks[i]; + + pr_warn("[%02lx] %32phN\n", page->index * qty + i, block); } - kunmap(req->pages[i]); + kunmap_atomic(dbuf); } - return false; +} + +/* + * Check all the pages in a directory. All the pages are held pinned. + */ +static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req) +{ + struct address_space *mapping = dvnode->vfs_inode.i_mapping; + struct page *page; + pgoff_t last = req->nr_pages - 1; + int ret = 0; + + XA_STATE(xas, &mapping->i_pages, 0); + + if (unlikely(!req->nr_pages)) + return 0; + + rcu_read_lock(); + xas_for_each(&xas, page, last) { + if (xas_retry(&xas, page)) + continue; + + BUG_ON(PageCompound(page)); + BUG_ON(page->mapping != mapping); + + if (!afs_dir_check_page(dvnode, page, req->file_size)) { + afs_dir_dump(dvnode, req); + ret = -EIO; + break; + } + } + + rcu_read_unlock(); + return ret; } /* @@ -214,58 +282,57 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) { struct afs_read *req; loff_t i_size; - int nr_pages, nr_inline, i, n; - int ret = -ENOMEM; + int nr_pages, i, n; + int ret; -retry: - i_size = i_size_read(&dvnode->vfs_inode); - if (i_size < 2048) - return ERR_PTR(afs_bad(dvnode, afs_file_error_dir_small)); - if (i_size > 2048 * 1024) { - trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big); - return ERR_PTR(-EFBIG); - } + _enter(""); - _enter("%llu", i_size); - - /* Get a request record to hold the page list. We want to hold it - * inline if we can, but we don't want to make an order 1 allocation. - */ - nr_pages = (i_size + PAGE_SIZE - 1) / PAGE_SIZE; - nr_inline = nr_pages; - if (nr_inline > (PAGE_SIZE - sizeof(*req)) / sizeof(struct page *)) - nr_inline = 0; - - req = kzalloc(struct_size(req, array, nr_inline), GFP_KERNEL); + req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return ERR_PTR(-ENOMEM); refcount_set(&req->usage, 1); + req->vnode = dvnode; req->key = key_get(key); - req->nr_pages = nr_pages; + req->cleanup = afs_dir_read_cleanup; + +expand: + i_size = i_size_read(&dvnode->vfs_inode); + if (i_size < 2048) { + ret = afs_bad(dvnode, afs_file_error_dir_small); + goto error; + } + if (i_size > 2048 * 1024) { + trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big); + ret = -EFBIG; + goto error; + } + + _enter("%llu", i_size); + + nr_pages = (i_size + PAGE_SIZE - 1) / PAGE_SIZE; + req->actual_len = i_size; /* May change */ req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */ req->data_version = dvnode->status.data_version; /* May change */ - if (nr_inline > 0) { - req->pages = req->array; - } else { - req->pages = kcalloc(nr_pages, sizeof(struct page *), - GFP_KERNEL); - if (!req->pages) - goto error; - } + iov_iter_xarray(&req->def_iter, READ, &dvnode->vfs_inode.i_mapping->i_pages, + 0, i_size); + req->iter = &req->def_iter; - /* Get a list of all the pages that hold or will hold the directory - * content. We need to fill in any gaps that we might find where the - * memory reclaimer has been at work. If there are any gaps, we will + /* Fill in any gaps that we might find where the memory reclaimer has + * been at work and pin all the pages. If there are any gaps, we will * need to reread the entire directory contents. */ - i = 0; - do { + i = req->nr_pages; + while (i < nr_pages) { + struct page *pages[8], *page; + n = find_get_pages_contig(dvnode->vfs_inode.i_mapping, i, - req->nr_pages - i, - req->pages + i); - _debug("find %u at %u/%u", n, i, req->nr_pages); + min_t(unsigned int, nr_pages - i, + ARRAY_SIZE(pages)), + pages); + _debug("find %u at %u/%u", n, i, nr_pages); + if (n == 0) { gfp_t gfp = dvnode->vfs_inode.i_mapping->gfp_mask; @@ -273,22 +340,24 @@ retry: afs_stat_v(dvnode, n_inval); ret = -ENOMEM; - req->pages[i] = __page_cache_alloc(gfp); - if (!req->pages[i]) + page = __page_cache_alloc(gfp); + if (!page) goto error; - ret = add_to_page_cache_lru(req->pages[i], + ret = add_to_page_cache_lru(page, dvnode->vfs_inode.i_mapping, i, gfp); if (ret < 0) goto error; - attach_page_private(req->pages[i], (void *)1); - unlock_page(req->pages[i]); + attach_page_private(page, (void *)1); + unlock_page(page); + req->nr_pages++; i++; } else { + req->nr_pages += n; i += n; } - } while (i < req->nr_pages); + } /* If we're going to reload, we need to lock all the pages to prevent * races. @@ -312,12 +381,17 @@ retry: task_io_account_read(PAGE_SIZE * req->nr_pages); - if (req->len < req->file_size) - goto content_has_grown; + if (req->len < req->file_size) { + /* The content has grown, so we need to expand the + * buffer. + */ + up_write(&dvnode->validate_lock); + goto expand; + } /* Validate the data we just read. */ - ret = -EIO; - if (!afs_dir_check_pages(dvnode, req)) + ret = afs_dir_check(dvnode, req); + if (ret < 0) goto error_unlock; // TODO: Trim excess pages @@ -335,11 +409,6 @@ error: afs_put_read(req); _leave(" = %d", ret); return ERR_PTR(ret); - -content_has_grown: - up_write(&dvnode->validate_lock); - afs_put_read(req); - goto retry; } /* @@ -449,6 +518,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, struct afs_read *req; struct page *page; unsigned blkoff, limit; + void __rcu **slot; int ret; _enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos); @@ -473,9 +543,15 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, blkoff = ctx->pos & ~(sizeof(union afs_xdr_dir_block) - 1); /* Fetch the appropriate page from the directory and re-add it - * to the LRU. + * to the LRU. We have all the pages pinned with an extra ref. */ - page = req->pages[blkoff / PAGE_SIZE]; + rcu_read_lock(); + page = NULL; + slot = radix_tree_lookup_slot(&dvnode->vfs_inode.i_mapping->i_pages, + blkoff / PAGE_SIZE); + if (slot) + page = radix_tree_deref_slot(slot); + rcu_read_unlock(); if (!page) { ret = afs_bad(dvnode, afs_file_error_dir_missing_page); break; diff --git a/fs/afs/file.c b/fs/afs/file.c index af6471defec3..4a34ffaf6de4 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -183,21 +183,72 @@ int afs_release(struct inode *inode, struct file *file) return ret; } +/* + * Handle completion of a read operation. + */ +static void afs_file_read_done(struct afs_read *req) +{ + struct afs_vnode *vnode = req->vnode; + struct page *page; + pgoff_t index = req->pos >> PAGE_SHIFT; + pgoff_t last = index + req->nr_pages - 1; + + XA_STATE(xas, &vnode->vfs_inode.i_mapping->i_pages, index); + + if (iov_iter_count(req->iter) > 0) { + /* The read was short - clear the excess buffer. */ + _debug("afterclear %zx %zx %llx/%llx", + req->iter->iov_offset, + iov_iter_count(req->iter), + req->actual_len, req->len); + iov_iter_zero(iov_iter_count(req->iter), req->iter); + } + + rcu_read_lock(); + xas_for_each(&xas, page, last) { + page_endio(page, false, 0); + put_page(page); + } + rcu_read_unlock(); + + task_io_account_read(req->len); + req->cleanup = NULL; +} + +/* + * Dispose of our locks and refs on the pages if the read failed. + */ +static void afs_file_read_cleanup(struct afs_read *req) +{ + struct page *page; + pgoff_t index = req->pos >> PAGE_SHIFT; + pgoff_t last = index + req->nr_pages - 1; + + if (req->iter) { + XA_STATE(xas, &req->vnode->vfs_inode.i_mapping->i_pages, index); + + _enter("%lu,%u,%zu", index, req->nr_pages, iov_iter_count(req->iter)); + + rcu_read_lock(); + xas_for_each(&xas, page, last) { + BUG_ON(xa_is_value(page)); + BUG_ON(PageCompound(page)); + + page_endio(page, false, req->error); + put_page(page); + } + rcu_read_unlock(); + } +} + /* * Dispose of a ref to a read record. */ void afs_put_read(struct afs_read *req) { - int i; - if (refcount_dec_and_test(&req->usage)) { - if (req->pages) { - for (i = 0; i < req->nr_pages; i++) - if (req->pages[i]) - put_page(req->pages[i]); - if (req->pages != req->array) - kfree(req->pages); - } + if (req->cleanup) + req->cleanup(req); key_put(req->key); kfree(req); } @@ -215,6 +266,7 @@ static void afs_fetch_data_success(struct afs_operation *op) static void afs_fetch_data_put(struct afs_operation *op) { + op->fetch.req->error = op->error; afs_put_read(op->fetch.req); } @@ -254,12 +306,11 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) /* * read page from file, directory or symlink, given a key to use */ -int afs_page_filler(void *data, struct page *page) +static int afs_page_filler(struct key *key, struct page *page) { struct inode *inode = page->mapping->host; struct afs_vnode *vnode = AFS_FS_I(inode); struct afs_read *req; - struct key *key = data; int ret; _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); @@ -270,53 +321,52 @@ int afs_page_filler(void *data, struct page *page) if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) goto error; - req = kzalloc(struct_size(req, array, 1), GFP_KERNEL); + req = kzalloc(sizeof(struct afs_read), GFP_KERNEL); if (!req) goto enomem; - /* We request a full page. If the page is a partial one at the - * end of the file, the server will return a short read and the - * unmarshalling code will clear the unfilled space. - */ refcount_set(&req->usage, 1); - req->key = key_get(key); - req->pos = (loff_t)page->index << PAGE_SHIFT; - req->len = PAGE_SIZE; - req->nr_pages = 1; - req->pages = req->array; - req->pages[0] = page; + req->vnode = vnode; + req->key = key_get(key); + req->pos = (loff_t)page->index << PAGE_SHIFT; + req->len = PAGE_SIZE; + req->nr_pages = 1; + req->done = afs_file_read_done; + req->cleanup = afs_file_read_cleanup; + get_page(page); + iov_iter_xarray(&req->def_iter, READ, &page->mapping->i_pages, + req->pos, req->len); + req->iter = &req->def_iter; - /* read the contents of the file from the server into the - * page */ ret = afs_fetch_data(vnode, req); + if (ret < 0) + goto fetch_error; + afs_put_read(req); - - if (ret < 0) { - if (ret == -ENOENT) { - _debug("got NOENT from server" - " - marking file deleted and stale"); - set_bit(AFS_VNODE_DELETED, &vnode->flags); - ret = -ESTALE; - } - - if (ret == -EINTR || - ret == -ENOMEM || - ret == -ERESTARTSYS || - ret == -EAGAIN) - goto error; - goto io_error; - } - - SetPageUptodate(page); - unlock_page(page); - _leave(" = 0"); return 0; -io_error: - SetPageError(page); - goto error; +fetch_error: + switch (ret) { + case -EINTR: + case -ENOMEM: + case -ERESTARTSYS: + case -EAGAIN: + afs_put_read(req); + goto error; + case -ENOENT: + _debug("got NOENT from server - marking file deleted and stale"); + set_bit(AFS_VNODE_DELETED, &vnode->flags); + ret = -ESTALE; + /* Fall through */ + default: + page_endio(page, false, ret); + afs_put_read(req); + _leave(" = %d", ret); + return ret; + } + enomem: ret = -ENOMEM; error: @@ -351,19 +401,6 @@ static int afs_readpage(struct file *file, struct page *page) return ret; } -/* - * Make pages available as they're filled. - */ -static void afs_readpages_page_done(struct afs_read *req) -{ - struct page *page = req->pages[req->index]; - - req->pages[req->index] = NULL; - SetPageUptodate(page); - unlock_page(page); - put_page(page); -} - /* * Read a contiguous set of pages. */ @@ -375,7 +412,7 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, struct list_head *p; struct page *first, *page; pgoff_t index; - int ret, n, i; + int ret, n; /* Count the number of contiguous pages at the front of the list. Note * that the list goes prev-wards rather than next-wards. @@ -391,21 +428,20 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, n++; } - req = kzalloc(struct_size(req, array, n), GFP_NOFS); + req = kzalloc(sizeof(struct afs_read), GFP_NOFS); if (!req) return -ENOMEM; refcount_set(&req->usage, 1); req->vnode = vnode; req->key = key_get(afs_file_key(file)); - req->page_done = afs_readpages_page_done; + req->done = afs_file_read_done; + req->cleanup = afs_file_read_cleanup; req->pos = first->index; req->pos <<= PAGE_SHIFT; - req->pages = req->array; - /* Transfer the pages to the request. We add them in until one fails - * to add to the LRU and then we stop (as that'll make a hole in the - * contiguous run. + /* Add pages to the LRU until it fails. We keep the pages ref'd and + * locked until the read is complete. * * Note that it's possible for the file size to change whilst we're * doing this, but we rely on the server returning less than we asked @@ -422,8 +458,7 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, break; } - req->pages[req->nr_pages++] = page; - req->len += PAGE_SIZE; + req->nr_pages++; } while (req->nr_pages < n); if (req->nr_pages == 0) { @@ -431,30 +466,25 @@ static int afs_readpages_one(struct file *file, struct address_space *mapping, return 0; } + req->len = req->nr_pages * PAGE_SIZE; + iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, + req->pos, req->len); + req->iter = &req->def_iter; + ret = afs_fetch_data(vnode, req); if (ret < 0) goto error; - task_io_account_read(PAGE_SIZE * req->nr_pages); afs_put_read(req); return 0; error: if (ret == -ENOENT) { - _debug("got NOENT from server" - " - marking file deleted and stale"); + _debug("got NOENT from server - marking file deleted and stale"); set_bit(AFS_VNODE_DELETED, &vnode->flags); ret = -ESTALE; } - for (i = 0; i < req->nr_pages; i++) { - page = req->pages[i]; - if (page) { - SetPageError(page); - unlock_page(page); - } - } - afs_put_read(req); return ret; } diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 4a57c6c6f12b..897b37301851 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -302,7 +302,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) struct afs_vnode_param *vp = &op->file[0]; struct afs_read *req = op->fetch.req; const __be32 *bp; - unsigned int size; int ret; _enter("{%u,%zu,%zu/%llu}", @@ -312,8 +311,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) switch (call->unmarshall) { case 0: req->actual_len = 0; - req->index = 0; - req->offset = req->pos & (PAGE_SIZE - 1); call->unmarshall++; if (call->operation_ID == FSFETCHDATA64) { afs_extract_to_tmp64(call); @@ -323,7 +320,10 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) } fallthrough; - /* extract the returned data length */ + /* Extract the returned data length into + * ->actual_len. This may indicate more or less data than was + * requested will be returned. + */ case 1: _debug("extract data length"); ret = afs_extract_data(call, true); @@ -332,45 +332,25 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) req->actual_len = be64_to_cpu(call->tmp64); _debug("DATA length: %llu", req->actual_len); - req->remain = min(req->len, req->actual_len); - if (req->remain == 0) + + if (req->actual_len == 0) goto no_more_data; + call->iter = req->iter; + call->iov_len = min(req->actual_len, req->len); call->unmarshall++; - - begin_page: - ASSERTCMP(req->index, <, req->nr_pages); - if (req->remain > PAGE_SIZE - req->offset) - size = PAGE_SIZE - req->offset; - else - size = req->remain; - call->iov_len = size; - call->bvec[0].bv_len = size; - call->bvec[0].bv_offset = req->offset; - call->bvec[0].bv_page = req->pages[req->index]; - iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size); - ASSERTCMP(size, <=, PAGE_SIZE); fallthrough; /* extract the returned data */ case 2: _debug("extract data %zu/%llu", - iov_iter_count(call->iter), req->remain); + iov_iter_count(call->iter), req->actual_len); ret = afs_extract_data(call, true); if (ret < 0) return ret; - req->remain -= call->bvec[0].bv_len; - req->offset += call->bvec[0].bv_len; - ASSERTCMP(req->offset, <=, PAGE_SIZE); - if (req->offset == PAGE_SIZE) { - req->offset = 0; - req->index++; - if (req->remain > 0) - goto begin_page; - } - ASSERTCMP(req->remain, ==, 0); + call->iter = &call->def_iter; if (req->actual_len <= req->len) goto no_more_data; @@ -412,16 +392,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) break; } - for (; req->index < req->nr_pages; req->index++) { - if (req->offset < PAGE_SIZE) - zero_user_segment(req->pages[req->index], - req->offset, PAGE_SIZE); - req->offset = 0; - } - - if (req->page_done) - for (req->index = 0; req->index < req->nr_pages; req->index++) - req->page_done(req); + if (req->done) + req->done(req); _leave(" = 0 [done]"); return 0; @@ -496,6 +468,8 @@ void afs_fs_fetch_data(struct afs_operation *op) if (!call) return afs_op_nomem(op); + req->call_debug_id = call->debug_id; + /* marshall the parameters */ bp = call->request; bp[0] = htonl(FSFETCHDATA); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 7b8306d8e81e..83f9f5a540e5 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -31,6 +31,7 @@ struct pagevec; struct afs_call; +struct afs_vnode; /* * Partial file-locking emulation mode. (The problem being that AFS3 only @@ -203,18 +204,18 @@ struct afs_read { loff_t pos; /* Where to start reading */ loff_t len; /* How much we're asking for */ loff_t actual_len; /* How much we're actually getting */ - loff_t remain; /* Amount remaining */ loff_t file_size; /* File size returned by server */ struct key *key; /* The key to use to reissue the read */ + struct afs_vnode *vnode; /* The file being read into. */ afs_dataversion_t data_version; /* Version number returned by server */ refcount_t usage; - unsigned int index; /* Which page we're reading into */ + unsigned int call_debug_id; unsigned int nr_pages; - unsigned int offset; /* offset into current page */ - struct afs_vnode *vnode; - void (*page_done)(struct afs_read *); - struct page **pages; - struct page *array[]; + int error; + void (*done)(struct afs_read *); + void (*cleanup)(struct afs_read *); + struct iov_iter *iter; /* Iterator representing the buffer */ + struct iov_iter def_iter; /* Default iterator */ }; /* @@ -1048,7 +1049,6 @@ extern void afs_put_wb_key(struct afs_wb_key *); extern int afs_open(struct inode *, struct file *); extern int afs_release(struct inode *, struct file *); extern int afs_fetch_data(struct afs_vnode *, struct afs_read *); -extern int afs_page_filler(void *, struct page *); extern void afs_put_read(struct afs_read *); static inline struct afs_read *afs_get_read(struct afs_read *req) diff --git a/fs/afs/write.c b/fs/afs/write.c index a91da2e680da..cb24f849e592 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -22,6 +22,16 @@ int afs_set_page_dirty(struct page *page) return __set_page_dirty_nobuffers(page); } +/* + * Handle completion of a read operation to fill a page. + */ +static void afs_fill_hole(struct afs_read *req) +{ + if (iov_iter_count(req->iter) > 0) + /* The read was short - clear the excess buffer. */ + iov_iter_zero(iov_iter_count(req->iter), req->iter); +} + /* * partly or wholly fill a page that's under preparation for writing */ @@ -45,18 +55,19 @@ static int afs_fill_page(struct file *file, return 0; } - req = kzalloc(struct_size(req, array, 1), GFP_KERNEL); + req = kzalloc(sizeof(struct afs_read), GFP_KERNEL); if (!req) return -ENOMEM; refcount_set(&req->usage, 1); - req->key = key_get(afs_file_key(file)); - req->pos = pos; - req->len = len; - req->nr_pages = 1; - req->pages = req->array; - req->pages[0] = page; - get_page(page); + req->vnode = vnode; + req->done = afs_fill_hole; + req->key = key_get(afs_file_key(file)); + req->pos = pos; + req->len = len; + req->nr_pages = 1; + req->iter = &req->def_iter; + iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, pos, len); ret = afs_fetch_data(vnode, req); afs_put_read(req); diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 6c45d32da13c..abcec145db4b 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -360,7 +360,6 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) struct afs_vnode_param *vp = &op->file[0]; struct afs_read *req = op->fetch.req; const __be32 *bp; - unsigned int size; int ret; _enter("{%u,%zu, %zu/%llu}", @@ -370,13 +369,14 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) switch (call->unmarshall) { case 0: req->actual_len = 0; - req->index = 0; - req->offset = req->pos & (PAGE_SIZE - 1); afs_extract_to_tmp64(call); call->unmarshall++; fallthrough; - /* extract the returned data length */ + /* Extract the returned data length into ->actual_len. This + * may indicate more or less data than was requested will be + * returned. + */ case 1: _debug("extract data length"); ret = afs_extract_data(call, true); @@ -385,45 +385,25 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) req->actual_len = be64_to_cpu(call->tmp64); _debug("DATA length: %llu", req->actual_len); - req->remain = min(req->len, req->actual_len); - if (req->remain == 0) + + if (req->actual_len == 0) goto no_more_data; + call->iter = req->iter; + call->iov_len = min(req->actual_len, req->len); call->unmarshall++; - - begin_page: - ASSERTCMP(req->index, <, req->nr_pages); - if (req->remain > PAGE_SIZE - req->offset) - size = PAGE_SIZE - req->offset; - else - size = req->remain; - call->iov_len = size; - call->bvec[0].bv_len = size; - call->bvec[0].bv_offset = req->offset; - call->bvec[0].bv_page = req->pages[req->index]; - iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size); - ASSERTCMP(size, <=, PAGE_SIZE); fallthrough; /* extract the returned data */ case 2: _debug("extract data %zu/%llu", - iov_iter_count(call->iter), req->remain); + iov_iter_count(call->iter), req->actual_len); ret = afs_extract_data(call, true); if (ret < 0) return ret; - req->remain -= call->bvec[0].bv_len; - req->offset += call->bvec[0].bv_len; - ASSERTCMP(req->offset, <=, PAGE_SIZE); - if (req->offset == PAGE_SIZE) { - req->offset = 0; - req->index++; - if (req->remain > 0) - goto begin_page; - } - ASSERTCMP(req->remain, ==, 0); + call->iter = &call->def_iter; if (req->actual_len <= req->len) goto no_more_data; @@ -469,16 +449,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) break; } - for (; req->index < req->nr_pages; req->index++) { - if (req->offset < PAGE_SIZE) - zero_user_segment(req->pages[req->index], - req->offset, PAGE_SIZE); - req->offset = 0; - } - - if (req->page_done) - for (req->index = 0; req->index < req->nr_pages; req->index++) - req->page_done(req); + if (req->done) + req->done(req); _leave(" = 0 [done]"); return 0; @@ -518,6 +490,8 @@ void yfs_fs_fetch_data(struct afs_operation *op) if (!call) return afs_op_nomem(op); + req->call_debug_id = call->debug_id; + /* marshall the parameters */ bp = call->request; bp = xdr_encode_u32(bp, YFSFETCHDATA64); From bd80d8a80e12895e56a1bb7862b2379942e46167 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 6 Feb 2020 14:22:28 +0000 Subject: [PATCH 08/14] afs: Use ITER_XARRAY for writing Use a single ITER_XARRAY iterator to describe the portion of a file to be transmitted to the server rather than generating a series of small ITER_BVEC iterators on the fly. This will make it easier to implement AIO in afs. In theory we could maybe use one giant ITER_BVEC, but that means potentially allocating a huge array of bio_vec structs (max 256 per page) when in fact the pagecache already has a structure listing all the relevant pages (radix_tree/xarray) that can be walked over. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/153685395197.14766.16289516750731233933.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/158861251312.340223.17924900795425422532.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/159465828607.1377938.6903132788463419368.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/160588535018.3465195.14509994354240338307.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118152415.1232039.6452879415814850025.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161048194.2537118.13763612220937637316.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340411602.1303470.4661108879482218408.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539555629.286939.5241869986617154517.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653811456.2770958.7017388543246759245.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789095005.6155.6789055030327407928.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/fsclient.c | 50 +++++++----------- fs/afs/internal.h | 15 +++--- fs/afs/rxrpc.c | 103 ++++++------------------------------ fs/afs/write.c | 104 +++++++++++++++++++++---------------- fs/afs/yfsclient.c | 25 +++------ include/trace/events/afs.h | 51 +++++++----------- 6 files changed, 128 insertions(+), 220 deletions(-) diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 897b37301851..31e6b3635541 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -1055,8 +1055,7 @@ static const struct afs_call_type afs_RXFSStoreData64 = { /* * store a set of pages to a very large file */ -static void afs_fs_store_data64(struct afs_operation *op, - loff_t pos, loff_t size, loff_t i_size) +static void afs_fs_store_data64(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; @@ -1071,7 +1070,7 @@ static void afs_fs_store_data64(struct afs_operation *op, if (!call) return afs_op_nomem(op); - call->send_pages = true; + call->write_iter = op->store.write_iter; /* marshall the parameters */ bp = call->request; @@ -1087,47 +1086,38 @@ static void afs_fs_store_data64(struct afs_operation *op, *bp++ = 0; /* unix mode */ *bp++ = 0; /* segment size */ - *bp++ = htonl(upper_32_bits(pos)); - *bp++ = htonl(lower_32_bits(pos)); - *bp++ = htonl(upper_32_bits(size)); - *bp++ = htonl(lower_32_bits(size)); - *bp++ = htonl(upper_32_bits(i_size)); - *bp++ = htonl(lower_32_bits(i_size)); + *bp++ = htonl(upper_32_bits(op->store.pos)); + *bp++ = htonl(lower_32_bits(op->store.pos)); + *bp++ = htonl(upper_32_bits(op->store.size)); + *bp++ = htonl(lower_32_bits(op->store.size)); + *bp++ = htonl(upper_32_bits(op->store.i_size)); + *bp++ = htonl(lower_32_bits(op->store.i_size)); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); } /* - * store a set of pages + * Write data to a file on the server. */ void afs_fs_store_data(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - loff_t size, pos, i_size; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); - size = (loff_t)op->store.last_to - (loff_t)op->store.first_offset; - if (op->store.first != op->store.last) - size += (loff_t)(op->store.last - op->store.first) << PAGE_SHIFT; - pos = (loff_t)op->store.first << PAGE_SHIFT; - pos += op->store.first_offset; - - i_size = i_size_read(&vp->vnode->vfs_inode); - if (pos + size > i_size) - i_size = size + pos; - _debug("size %llx, at %llx, i_size %llx", - (unsigned long long) size, (unsigned long long) pos, - (unsigned long long) i_size); + (unsigned long long)op->store.size, + (unsigned long long)op->store.pos, + (unsigned long long)op->store.i_size); - if (upper_32_bits(pos) || upper_32_bits(i_size) || upper_32_bits(size) || - upper_32_bits(pos + size)) - return afs_fs_store_data64(op, pos, size, i_size); + if (upper_32_bits(op->store.pos) || + upper_32_bits(op->store.size) || + upper_32_bits(op->store.i_size)) + return afs_fs_store_data64(op); call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData, (4 + 6 + 3) * 4, @@ -1135,7 +1125,7 @@ void afs_fs_store_data(struct afs_operation *op) if (!call) return afs_op_nomem(op); - call->send_pages = true; + call->write_iter = op->store.write_iter; /* marshall the parameters */ bp = call->request; @@ -1151,9 +1141,9 @@ void afs_fs_store_data(struct afs_operation *op) *bp++ = 0; /* unix mode */ *bp++ = 0; /* segment size */ - *bp++ = htonl(lower_32_bits(pos)); - *bp++ = htonl(lower_32_bits(size)); - *bp++ = htonl(lower_32_bits(i_size)); + *bp++ = htonl(lower_32_bits(op->store.pos)); + *bp++ = htonl(lower_32_bits(op->store.size)); + *bp++ = htonl(lower_32_bits(op->store.i_size)); trace_afs_make_fs_call(call, &vp->fid); afs_make_op_call(op, call, GFP_NOFS); diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 83f9f5a540e5..16020725cc68 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -107,6 +107,7 @@ struct afs_call { void *request; /* request data (first part) */ size_t iov_len; /* Size of *iter to be used */ struct iov_iter def_iter; /* Default buffer/data iterator */ + struct iov_iter *write_iter; /* Iterator defining write to be made */ struct iov_iter *iter; /* Iterator currently in use */ union { /* Convenience for ->def_iter */ struct kvec kvec[1]; @@ -133,7 +134,6 @@ struct afs_call { unsigned char unmarshall; /* unmarshalling phase */ unsigned char addr_ix; /* Address in ->alist */ bool drop_ref; /* T if need to drop ref for incoming call */ - bool send_pages; /* T if data from mapping should be sent */ bool need_attention; /* T if RxRPC poked us */ bool async; /* T if asynchronous */ bool upgrade; /* T to request service upgrade */ @@ -811,12 +811,13 @@ struct afs_operation { afs_lock_type_t type; } lock; struct { - struct address_space *mapping; /* Pages being written from */ - pgoff_t first; /* first page in mapping to deal with */ - pgoff_t last; /* last page in mapping to deal with */ - unsigned first_offset; /* offset into mapping[first] */ - unsigned last_to; /* amount of mapping[last] */ - bool laundering; /* Laundering page, PG_writeback not set */ + struct iov_iter *write_iter; + loff_t pos; + loff_t size; + loff_t i_size; + pgoff_t first; /* first page in mapping to deal with */ + pgoff_t last; /* last page in mapping to deal with */ + bool laundering; /* Laundering page, PG_writeback not set */ } store; struct { struct iattr *attr; diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index ae68576f822f..23a1a92d64bb 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -271,40 +271,6 @@ void afs_flat_call_destructor(struct afs_call *call) call->buffer = NULL; } -#define AFS_BVEC_MAX 8 - -/* - * Load the given bvec with the next few pages. - */ -static void afs_load_bvec(struct afs_call *call, struct msghdr *msg, - struct bio_vec *bv, pgoff_t first, pgoff_t last, - unsigned offset) -{ - struct afs_operation *op = call->op; - struct page *pages[AFS_BVEC_MAX]; - unsigned int nr, n, i, to, bytes = 0; - - nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX); - n = find_get_pages_contig(op->store.mapping, first, nr, pages); - ASSERTCMP(n, ==, nr); - - msg->msg_flags |= MSG_MORE; - for (i = 0; i < nr; i++) { - to = PAGE_SIZE; - if (first + i >= last) { - to = op->store.last_to; - msg->msg_flags &= ~MSG_MORE; - } - bv[i].bv_page = pages[i]; - bv[i].bv_len = to - offset; - bv[i].bv_offset = offset; - bytes += to - offset; - offset = 0; - } - - iov_iter_bvec(&msg->msg_iter, WRITE, bv, nr, bytes); -} - /* * Advance the AFS call state when the RxRPC call ends the transmit phase. */ @@ -317,42 +283,6 @@ static void afs_notify_end_request_tx(struct sock *sock, afs_set_call_state(call, AFS_CALL_CL_REQUESTING, AFS_CALL_CL_AWAIT_REPLY); } -/* - * attach the data from a bunch of pages on an inode to a call - */ -static int afs_send_pages(struct afs_call *call, struct msghdr *msg) -{ - struct afs_operation *op = call->op; - struct bio_vec bv[AFS_BVEC_MAX]; - unsigned int bytes, nr, loop, offset; - pgoff_t first = op->store.first, last = op->store.last; - int ret; - - offset = op->store.first_offset; - op->store.first_offset = 0; - - do { - afs_load_bvec(call, msg, bv, first, last, offset); - trace_afs_send_pages(call, msg, first, last, offset); - - offset = 0; - bytes = msg->msg_iter.count; - nr = msg->msg_iter.nr_segs; - - ret = rxrpc_kernel_send_data(op->net->socket, call->rxcall, msg, - bytes, afs_notify_end_request_tx); - for (loop = 0; loop < nr; loop++) - put_page(bv[loop].bv_page); - if (ret < 0) - break; - - first += nr; - } while (first <= last); - - trace_afs_sent_pages(call, op->store.first, last, first, ret); - return ret; -} - /* * Initiate a call and synchronously queue up the parameters for dispatch. Any * error is stored into the call struct, which the caller must check for. @@ -384,21 +314,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) * after the initial fixed part. */ tx_total_len = call->request_size; - if (call->send_pages) { - struct afs_operation *op = call->op; - - if (op->store.last == op->store.first) { - tx_total_len += op->store.last_to - op->store.first_offset; - } else { - /* It looks mathematically like you should be able to - * combine the following lines with the ones above, but - * unsigned arithmetic is fun when it wraps... - */ - tx_total_len += PAGE_SIZE - op->store.first_offset; - tx_total_len += op->store.last_to; - tx_total_len += (op->store.last - op->store.first - 1) * PAGE_SIZE; - } - } + if (call->write_iter) + tx_total_len += iov_iter_count(call->write_iter); /* If the call is going to be asynchronous, we need an extra ref for * the call to hold itself so the caller need not hang on to its ref. @@ -440,7 +357,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, call->request_size); msg.msg_control = NULL; msg.msg_controllen = 0; - msg.msg_flags = MSG_WAITALL | (call->send_pages ? MSG_MORE : 0); + msg.msg_flags = MSG_WAITALL | (call->write_iter ? MSG_MORE : 0); ret = rxrpc_kernel_send_data(call->net->socket, rxcall, &msg, call->request_size, @@ -448,8 +365,18 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) if (ret < 0) goto error_do_abort; - if (call->send_pages) { - ret = afs_send_pages(call, &msg); + if (call->write_iter) { + msg.msg_iter = *call->write_iter; + msg.msg_flags &= ~MSG_MORE; + trace_afs_send_data(call, &msg); + + ret = rxrpc_kernel_send_data(call->net->socket, + call->rxcall, &msg, + iov_iter_count(&msg.msg_iter), + afs_notify_end_request_tx); + *call->write_iter = msg.msg_iter; + + trace_afs_sent_data(call, &msg, ret); if (ret < 0) goto error_do_abort; } diff --git a/fs/afs/write.c b/fs/afs/write.c index cb24f849e592..6e41b982c71b 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -325,36 +325,27 @@ static void afs_redirty_pages(struct writeback_control *wbc, /* * completion of write to server */ -static void afs_pages_written_back(struct afs_vnode *vnode, - pgoff_t first, pgoff_t last) +static void afs_pages_written_back(struct afs_vnode *vnode, pgoff_t start, pgoff_t last) { - struct pagevec pv; - unsigned count, loop; + struct address_space *mapping = vnode->vfs_inode.i_mapping; + struct page *page; + + XA_STATE(xas, &mapping->i_pages, start); _enter("{%llx:%llu},{%lx-%lx}", - vnode->fid.vid, vnode->fid.vnode, first, last); + vnode->fid.vid, vnode->fid.vnode, start, last); - pagevec_init(&pv); + rcu_read_lock(); - do { - _debug("done %lx-%lx", first, last); + xas_for_each(&xas, page, last) { + ASSERT(PageWriteback(page)); - count = last - first + 1; - if (count > PAGEVEC_SIZE) - count = PAGEVEC_SIZE; - pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping, - first, count, pv.pages); - ASSERTCMP(pv.nr, ==, count); + detach_page_private(page); + trace_afs_page_dirty(vnode, tracepoint_string("clear"), page); + page_endio(page, true, 0); + } - for (loop = 0; loop < count; loop++) { - detach_page_private(pv.pages[loop]); - trace_afs_page_dirty(vnode, tracepoint_string("clear"), - pv.pages[loop]); - end_page_writeback(pv.pages[loop]); - } - first += count; - __pagevec_release(&pv); - } while (first <= last); + rcu_read_unlock(); afs_prune_wb_keys(vnode); _leave(""); @@ -411,9 +402,7 @@ static void afs_store_data_success(struct afs_operation *op) if (!op->store.laundering) afs_pages_written_back(vnode, op->store.first, op->store.last); afs_stat_v(vnode, n_stores); - atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) - - (op->store.first * PAGE_SIZE + op->store.first_offset), - &afs_v2net(vnode)->n_store_bytes); + atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); } } @@ -426,21 +415,21 @@ static const struct afs_operation_ops afs_store_data_operation = { /* * write to a file */ -static int afs_store_data(struct address_space *mapping, - pgoff_t first, pgoff_t last, - unsigned offset, unsigned to, bool laundering) +static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, + loff_t pos, pgoff_t first, pgoff_t last, + bool laundering) { - struct afs_vnode *vnode = AFS_FS_I(mapping->host); struct afs_operation *op; struct afs_wb_key *wbk = NULL; - int ret; + loff_t size = iov_iter_count(iter), i_size; + int ret = -ENOKEY; - _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x", + _enter("%s{%llx:%llu.%u},%llx,%llx", vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, - first, last, offset, to); + size, pos); ret = afs_get_writeback_key(vnode, &wbk); if (ret) { @@ -454,13 +443,16 @@ static int afs_store_data(struct address_space *mapping, return -ENOMEM; } + i_size = i_size_read(&vnode->vfs_inode); + afs_op_set_vnode(op, 0, vnode); op->file[0].dv_delta = 1; - op->store.mapping = mapping; + op->store.write_iter = iter; + op->store.pos = pos; op->store.first = first; op->store.last = last; - op->store.first_offset = offset; - op->store.last_to = to; + op->store.size = size; + op->store.i_size = max(pos + size, i_size); op->store.laundering = laundering; op->mtime = vnode->vfs_inode.i_mtime; op->flags |= AFS_OPERATION_UNINTR; @@ -503,11 +495,12 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, pgoff_t final_page) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); + struct iov_iter iter; struct page *pages[8], *page; unsigned long count, priv; unsigned n, offset, to, f, t; pgoff_t start, first, last; - loff_t i_size, end; + loff_t i_size, pos, end; int loop, ret; _enter(",%lx", primary_page->index); @@ -604,15 +597,28 @@ no_more: first = primary_page->index; last = first + count - 1; - - end = (loff_t)last * PAGE_SIZE + to; - i_size = i_size_read(&vnode->vfs_inode); - _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to); - if (end > i_size) - to = i_size & ~PAGE_MASK; - ret = afs_store_data(mapping, first, last, offset, to, false); + pos = first; + pos <<= PAGE_SHIFT; + pos += offset; + end = last; + end <<= PAGE_SHIFT; + end += to; + + /* Trim the actual write down to the EOF */ + i_size = i_size_read(&vnode->vfs_inode); + if (end > i_size) + end = i_size; + + if (pos < i_size) { + iov_iter_xarray(&iter, WRITE, &mapping->i_pages, pos, end - pos); + ret = afs_store_data(vnode, &iter, pos, first, last, false); + } else { + /* The dirty region was entirely beyond the EOF. */ + ret = 0; + } + switch (ret) { case 0: ret = count; @@ -912,6 +918,8 @@ int afs_launder_page(struct page *page) { struct address_space *mapping = page->mapping; struct afs_vnode *vnode = AFS_FS_I(mapping->host); + struct iov_iter iter; + struct bio_vec bv[1]; unsigned long priv; unsigned int f, t; int ret = 0; @@ -927,8 +935,14 @@ int afs_launder_page(struct page *page) t = afs_page_dirty_to(page, priv); } + bv[0].bv_page = page; + bv[0].bv_offset = f; + bv[0].bv_len = t - f; + iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); + trace_afs_page_dirty(vnode, tracepoint_string("launder"), page); - ret = afs_store_data(mapping, page->index, page->index, t, f, true); + ret = afs_store_data(vnode, &iter, (loff_t)page->index << PAGE_SHIFT, + page->index, page->index, true); } detach_page_private(page); diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index abcec145db4b..363d6dd276c0 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -1078,25 +1078,15 @@ void yfs_fs_store_data(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; struct afs_call *call; - loff_t size, pos, i_size; __be32 *bp; _enter(",%x,{%llx:%llu},,", key_serial(op->key), vp->fid.vid, vp->fid.vnode); - size = (loff_t)op->store.last_to - (loff_t)op->store.first_offset; - if (op->store.first != op->store.last) - size += (loff_t)(op->store.last - op->store.first) << PAGE_SHIFT; - pos = (loff_t)op->store.first << PAGE_SHIFT; - pos += op->store.first_offset; - - i_size = i_size_read(&vp->vnode->vfs_inode); - if (pos + size > i_size) - i_size = size + pos; - _debug("size %llx, at %llx, i_size %llx", - (unsigned long long)size, (unsigned long long)pos, - (unsigned long long)i_size); + (unsigned long long)op->store.size, + (unsigned long long)op->store.pos, + (unsigned long long)op->store.i_size); call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreData64, sizeof(__be32) + @@ -1109,8 +1099,7 @@ void yfs_fs_store_data(struct afs_operation *op) if (!call) return afs_op_nomem(op); - call->key = op->key; - call->send_pages = true; + call->write_iter = op->store.write_iter; /* marshall the parameters */ bp = call->request; @@ -1118,9 +1107,9 @@ void yfs_fs_store_data(struct afs_operation *op) bp = xdr_encode_u32(bp, 0); /* RPC flags */ bp = xdr_encode_YFSFid(bp, &vp->fid); bp = xdr_encode_YFSStoreStatus_mtime(bp, &op->mtime); - bp = xdr_encode_u64(bp, pos); - bp = xdr_encode_u64(bp, size); - bp = xdr_encode_u64(bp, i_size); + bp = xdr_encode_u64(bp, op->store.pos); + bp = xdr_encode_u64(bp, op->store.size); + bp = xdr_encode_u64(bp, op->store.i_size); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h index 9203cf6a8c53..3ccf591b2374 100644 --- a/include/trace/events/afs.h +++ b/include/trace/events/afs.h @@ -886,65 +886,52 @@ TRACE_EVENT(afs_call_done, __entry->rx_call) ); -TRACE_EVENT(afs_send_pages, - TP_PROTO(struct afs_call *call, struct msghdr *msg, - pgoff_t first, pgoff_t last, unsigned int offset), +TRACE_EVENT(afs_send_data, + TP_PROTO(struct afs_call *call, struct msghdr *msg), - TP_ARGS(call, msg, first, last, offset), + TP_ARGS(call, msg), TP_STRUCT__entry( __field(unsigned int, call ) - __field(pgoff_t, first ) - __field(pgoff_t, last ) - __field(unsigned int, nr ) - __field(unsigned int, bytes ) - __field(unsigned int, offset ) __field(unsigned int, flags ) + __field(loff_t, offset ) + __field(loff_t, count ) ), TP_fast_assign( __entry->call = call->debug_id; - __entry->first = first; - __entry->last = last; - __entry->nr = msg->msg_iter.nr_segs; - __entry->bytes = msg->msg_iter.count; - __entry->offset = offset; __entry->flags = msg->msg_flags; + __entry->offset = msg->msg_iter.xarray_start + msg->msg_iter.iov_offset; + __entry->count = iov_iter_count(&msg->msg_iter); ), - TP_printk(" c=%08x %lx-%lx-%lx b=%x o=%x f=%x", - __entry->call, - __entry->first, __entry->first + __entry->nr - 1, __entry->last, - __entry->bytes, __entry->offset, + TP_printk(" c=%08x o=%llx n=%llx f=%x", + __entry->call, __entry->offset, __entry->count, __entry->flags) ); -TRACE_EVENT(afs_sent_pages, - TP_PROTO(struct afs_call *call, pgoff_t first, pgoff_t last, - pgoff_t cursor, int ret), +TRACE_EVENT(afs_sent_data, + TP_PROTO(struct afs_call *call, struct msghdr *msg, int ret), - TP_ARGS(call, first, last, cursor, ret), + TP_ARGS(call, msg, ret), TP_STRUCT__entry( __field(unsigned int, call ) - __field(pgoff_t, first ) - __field(pgoff_t, last ) - __field(pgoff_t, cursor ) __field(int, ret ) + __field(loff_t, offset ) + __field(loff_t, count ) ), TP_fast_assign( __entry->call = call->debug_id; - __entry->first = first; - __entry->last = last; - __entry->cursor = cursor; __entry->ret = ret; + __entry->offset = msg->msg_iter.xarray_start + msg->msg_iter.iov_offset; + __entry->count = iov_iter_count(&msg->msg_iter); ), - TP_printk(" c=%08x %lx-%lx c=%lx r=%d", - __entry->call, - __entry->first, __entry->last, - __entry->cursor, __entry->ret) + TP_printk(" c=%08x o=%llx n=%llx r=%x", + __entry->call, __entry->offset, __entry->count, + __entry->ret) ); TRACE_EVENT(afs_dir_check_failed, From 630f5dda8442ca0bbbc20ab0140c5a3db34b486e Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 6 Feb 2020 14:22:28 +0000 Subject: [PATCH 09/14] afs: Wait on PG_fscache before modifying/releasing a page PG_fscache is going to be used to indicate that a page is being written to the cache, and that the page should not be modified or released until it's finished. Make afs_invalidatepage() and afs_releasepage() wait for it. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/158861253957.340223.7465334678444521655.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/159465832417.1377938.3571599385208729791.stgit@warthog.procyon.org.uk/ Link: https://lore.kernel.org/r/160588536286.3465195.13231895135369807920.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118153708.1232039.3535103645871176749.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161049369.2537118.11591934943429117060.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340412903.1303470.6424701655031380012.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539556890.286939.5873470593519458598.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653812726.2770958.18167145829938766503.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789096241.6155.5907241930823579235.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/file.c | 9 +++++++++ fs/afs/write.c | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/fs/afs/file.c b/fs/afs/file.c index 4a34ffaf6de4..f1e30b89e41c 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -593,6 +593,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset, if (PagePrivate(page)) afs_invalidate_dirty(page, offset, length); + wait_on_page_fscache(page); _leave(""); } @@ -610,6 +611,14 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags) /* deny if page is being written to the cache and the caller hasn't * elected to wait */ +#ifdef CONFIG_AFS_FSCACHE + if (PageFsCache(page)) { + if (!(gfp_flags & __GFP_DIRECT_RECLAIM) || !(gfp_flags & __GFP_FS)) + return false; + wait_on_page_fscache(page); + } +#endif + if (PagePrivate(page)) { detach_page_private(page); trace_afs_page_dirty(vnode, tracepoint_string("rel"), page); diff --git a/fs/afs/write.c b/fs/afs/write.c index 6e41b982c71b..1b8cabf5ac92 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -117,6 +117,10 @@ int afs_write_begin(struct file *file, struct address_space *mapping, SetPageUptodate(page); } +#ifdef CONFIG_AFS_FSCACHE + wait_on_page_fscache(page); +#endif + try_again: /* See if this page is already partially written in a way that we can * merge the new write with. @@ -857,6 +861,11 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) /* Wait for the page to be written to the cache before we allow it to * be modified. We then assume the entire page will need writing back. */ +#ifdef CONFIG_AFS_FSCACHE + if (PageFsCache(vmf->page) && + wait_on_page_bit_killable(vmf->page, PG_fscache) < 0) + return VM_FAULT_RETRY; +#endif if (wait_on_page_writeback_killable(vmf->page)) return VM_FAULT_RETRY; @@ -947,5 +956,6 @@ int afs_launder_page(struct page *page) detach_page_private(page); trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page); + wait_on_page_fscache(page); return ret; } From 810caa3e6708ba234fc12591d84d4b46f9f05d72 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 30 Oct 2020 10:01:09 +0000 Subject: [PATCH 10/14] afs: Extract writeback extension into its own function Extract writeback extension into its own function to break up the writeback function a bit. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/160588538471.3465195.782513375683399583.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118154610.1232039.1765365632920504822.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161050546.2537118.2202554806419189453.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340414102.1303470.9078891484034668985.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539558417.286939.2879469588895925399.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653813972.2770958.12671731209438112378.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789097132.6155.4916609419912731964.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/write.c | 109 ++++++++++++++++++++++++++++++------------------- 1 file changed, 67 insertions(+), 42 deletions(-) diff --git a/fs/afs/write.c b/fs/afs/write.c index 1b8cabf5ac92..4ccd2c263983 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -490,47 +490,25 @@ try_next_key: } /* - * Synchronously write back the locked page and any subsequent non-locked dirty - * pages. + * Extend the region to be written back to include subsequent contiguously + * dirty pages if possible, but don't sleep while doing so. + * + * If this page holds new content, then we can include filler zeros in the + * writeback. */ -static int afs_write_back_from_locked_page(struct address_space *mapping, - struct writeback_control *wbc, - struct page *primary_page, - pgoff_t final_page) +static void afs_extend_writeback(struct address_space *mapping, + struct afs_vnode *vnode, + long *_count, + pgoff_t start, + pgoff_t final_page, + unsigned *_offset, + unsigned *_to, + bool new_content) { - struct afs_vnode *vnode = AFS_FS_I(mapping->host); - struct iov_iter iter; struct page *pages[8], *page; - unsigned long count, priv; - unsigned n, offset, to, f, t; - pgoff_t start, first, last; - loff_t i_size, pos, end; - int loop, ret; - - _enter(",%lx", primary_page->index); - - count = 1; - if (test_set_page_writeback(primary_page)) - BUG(); - - /* Find all consecutive lockable dirty pages that have contiguous - * written regions, stopping when we find a page that is not - * immediately lockable, is not dirty or is missing, or we reach the - * end of the range. - */ - start = primary_page->index; - priv = page_private(primary_page); - offset = afs_page_dirty_from(primary_page, priv); - to = afs_page_dirty_to(primary_page, priv); - trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page); - - WARN_ON(offset == to); - if (offset == to) - trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page); - - if (start >= final_page || - (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))) - goto no_more; + unsigned long count = *_count, priv; + unsigned offset = *_offset, to = *_to, n, f, t; + int loop; start++; do { @@ -551,8 +529,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, for (loop = 0; loop < n; loop++) { page = pages[loop]; - if (to != PAGE_SIZE && - !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) + if (to != PAGE_SIZE && !new_content) break; if (page->index > final_page) break; @@ -566,8 +543,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, priv = page_private(page); f = afs_page_dirty_from(page, priv); t = afs_page_dirty_to(page, priv); - if (f != 0 && - !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) { + if (f != 0 && !new_content) { unlock_page(page); break; } @@ -593,6 +569,55 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, } while (start <= final_page && count < 65536); no_more: + *_count = count; + *_offset = offset; + *_to = to; +} + +/* + * Synchronously write back the locked page and any subsequent non-locked dirty + * pages. + */ +static int afs_write_back_from_locked_page(struct address_space *mapping, + struct writeback_control *wbc, + struct page *primary_page, + pgoff_t final_page) +{ + struct afs_vnode *vnode = AFS_FS_I(mapping->host); + struct iov_iter iter; + unsigned long count, priv; + unsigned offset, to; + pgoff_t start, first, last; + loff_t i_size, pos, end; + bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); + int ret; + + _enter(",%lx", primary_page->index); + + count = 1; + if (test_set_page_writeback(primary_page)) + BUG(); + + /* Find all consecutive lockable dirty pages that have contiguous + * written regions, stopping when we find a page that is not + * immediately lockable, is not dirty or is missing, or we reach the + * end of the range. + */ + start = primary_page->index; + priv = page_private(primary_page); + offset = afs_page_dirty_from(primary_page, priv); + to = afs_page_dirty_to(primary_page, priv); + trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page); + + WARN_ON(offset == to); + if (offset == to) + trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page); + + if (start < final_page && + (to == PAGE_SIZE || new_content)) + afs_extend_writeback(mapping, vnode, &count, start, final_page, + &offset, &to, new_content); + /* We now have a contiguous set of dirty pages, each with writeback * set; the first page is still locked at this point, but all the rest * have been unlocked. From e87b03f5830ecd8ca21836d3ee48c74f8d58fa31 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 20 Oct 2020 09:33:45 +0100 Subject: [PATCH 11/14] afs: Prepare for use of THPs As a prelude to supporting transparent huge pages, use thp_size() and similar rather than PAGE_SIZE/SHIFT. Further, try and frame everything in terms of file positions and lengths rather than page indices and numbers of pages. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/160588540227.3465195.4752143929716269062.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118155821.1232039.540445038028845740.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161051439.2537118.15577827510426326534.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340415869.1303470.6040191748634322355.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539559365.286939.18344613540296085269.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653815142.2770958.454490670311230206.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789098713.6155.16394227991842480300.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/dir.c | 2 +- fs/afs/file.c | 8 +- fs/afs/internal.h | 2 - fs/afs/write.c | 430 +++++++++++++++++++++++++--------------------- 4 files changed, 242 insertions(+), 200 deletions(-) diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 8c093bfff8b6..117df15e5367 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -2083,6 +2083,6 @@ static void afs_dir_invalidatepage(struct page *page, unsigned int offset, afs_stat_v(dvnode, n_inval); /* we clean up only if the entire page is being invalidated */ - if (offset == 0 && length == PAGE_SIZE) + if (offset == 0 && length == thp_size(page)) detach_page_private(page); } diff --git a/fs/afs/file.c b/fs/afs/file.c index f1e30b89e41c..edf21c8708a3 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -329,8 +329,8 @@ static int afs_page_filler(struct key *key, struct page *page) req->vnode = vnode; req->key = key_get(key); req->pos = (loff_t)page->index << PAGE_SHIFT; - req->len = PAGE_SIZE; - req->nr_pages = 1; + req->len = thp_size(page); + req->nr_pages = thp_nr_pages(page); req->done = afs_file_read_done; req->cleanup = afs_file_read_cleanup; @@ -574,8 +574,8 @@ undirty: trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page); clear_page_dirty_for_io(page); full_invalidate: - detach_page_private(page); trace_afs_page_dirty(vnode, tracepoint_string("inval"), page); + detach_page_private(page); } /* @@ -620,8 +620,8 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags) #endif if (PagePrivate(page)) { - detach_page_private(page); trace_afs_page_dirty(vnode, tracepoint_string("rel"), page); + detach_page_private(page); } /* indicate that the page can be released */ diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 16020725cc68..9629b6430a52 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -815,8 +815,6 @@ struct afs_operation { loff_t pos; loff_t size; loff_t i_size; - pgoff_t first; /* first page in mapping to deal with */ - pgoff_t last; /* last page in mapping to deal with */ bool laundering; /* Laundering page, PG_writeback not set */ } store; struct { diff --git a/fs/afs/write.c b/fs/afs/write.c index 4ccd2c263983..099c7dad09c5 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -94,15 +94,15 @@ int afs_write_begin(struct file *file, struct address_space *mapping, struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); struct page *page; unsigned long priv; - unsigned f, from = pos & (PAGE_SIZE - 1); - unsigned t, to = from + len; - pgoff_t index = pos >> PAGE_SHIFT; + unsigned f, from; + unsigned t, to; + pgoff_t index; int ret; - _enter("{%llx:%llu},{%lx},%u,%u", - vnode->fid.vid, vnode->fid.vnode, index, from, to); + _enter("{%llx:%llu},%llx,%x", + vnode->fid.vid, vnode->fid.vnode, pos, len); - page = grab_cache_page_write_begin(mapping, index, flags); + page = grab_cache_page_write_begin(mapping, pos / PAGE_SIZE, flags); if (!page) return -ENOMEM; @@ -121,19 +121,20 @@ int afs_write_begin(struct file *file, struct address_space *mapping, wait_on_page_fscache(page); #endif + index = page->index; + from = pos - index * PAGE_SIZE; + to = from + len; + try_again: /* See if this page is already partially written in a way that we can * merge the new write with. */ - t = f = 0; if (PagePrivate(page)) { priv = page_private(page); f = afs_page_dirty_from(page, priv); t = afs_page_dirty_to(page, priv); ASSERTCMP(f, <=, t); - } - if (f != t) { if (PageWriteback(page)) { trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page); goto flush_conflicting_write; @@ -180,7 +181,7 @@ int afs_write_end(struct file *file, struct address_space *mapping, { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); unsigned long priv; - unsigned int f, from = pos & (PAGE_SIZE - 1); + unsigned int f, from = pos & (thp_size(page) - 1); unsigned int t, to = from + copied; loff_t i_size, maybe_i_size; int ret = 0; @@ -233,9 +234,8 @@ int afs_write_end(struct file *file, struct address_space *mapping, trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page); } - set_page_dirty(page); - if (PageDirty(page)) - _debug("dirtied"); + if (set_page_dirty(page)) + _debug("dirtied %lx", page->index); ret = copied; out: @@ -248,40 +248,43 @@ out: * kill all the pages in the given range */ static void afs_kill_pages(struct address_space *mapping, - pgoff_t first, pgoff_t last) + loff_t start, loff_t len) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); struct pagevec pv; - unsigned count, loop; + unsigned int loop, psize; - _enter("{%llx:%llu},%lx-%lx", - vnode->fid.vid, vnode->fid.vnode, first, last); + _enter("{%llx:%llu},%llx @%llx", + vnode->fid.vid, vnode->fid.vnode, len, start); pagevec_init(&pv); do { - _debug("kill %lx-%lx", first, last); + _debug("kill %llx @%llx", len, start); - count = last - first + 1; - if (count > PAGEVEC_SIZE) - count = PAGEVEC_SIZE; - pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); - ASSERTCMP(pv.nr, ==, count); + pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE, + PAGEVEC_SIZE, pv.pages); + if (pv.nr == 0) + break; - for (loop = 0; loop < count; loop++) { + for (loop = 0; loop < pv.nr; loop++) { struct page *page = pv.pages[loop]; + + if (page->index * PAGE_SIZE >= start + len) + break; + + psize = thp_size(page); + start += psize; + len -= psize; ClearPageUptodate(page); - SetPageError(page); end_page_writeback(page); - if (page->index >= first) - first = page->index + 1; lock_page(page); generic_error_remove_page(mapping, page); unlock_page(page); } __pagevec_release(&pv); - } while (first <= last); + } while (len > 0); _leave(""); } @@ -291,37 +294,40 @@ static void afs_kill_pages(struct address_space *mapping, */ static void afs_redirty_pages(struct writeback_control *wbc, struct address_space *mapping, - pgoff_t first, pgoff_t last) + loff_t start, loff_t len) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); struct pagevec pv; - unsigned count, loop; + unsigned int loop, psize; - _enter("{%llx:%llu},%lx-%lx", - vnode->fid.vid, vnode->fid.vnode, first, last); + _enter("{%llx:%llu},%llx @%llx", + vnode->fid.vid, vnode->fid.vnode, len, start); pagevec_init(&pv); do { - _debug("redirty %lx-%lx", first, last); + _debug("redirty %llx @%llx", len, start); - count = last - first + 1; - if (count > PAGEVEC_SIZE) - count = PAGEVEC_SIZE; - pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); - ASSERTCMP(pv.nr, ==, count); + pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE, + PAGEVEC_SIZE, pv.pages); + if (pv.nr == 0) + break; - for (loop = 0; loop < count; loop++) { + for (loop = 0; loop < pv.nr; loop++) { struct page *page = pv.pages[loop]; + if (page->index * PAGE_SIZE >= start + len) + break; + + psize = thp_size(page); + start += psize; + len -= psize; redirty_page_for_writepage(wbc, page); end_page_writeback(page); - if (page->index >= first) - first = page->index + 1; } __pagevec_release(&pv); - } while (first <= last); + } while (len > 0); _leave(""); } @@ -329,23 +335,28 @@ static void afs_redirty_pages(struct writeback_control *wbc, /* * completion of write to server */ -static void afs_pages_written_back(struct afs_vnode *vnode, pgoff_t start, pgoff_t last) +static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) { struct address_space *mapping = vnode->vfs_inode.i_mapping; struct page *page; + pgoff_t end; - XA_STATE(xas, &mapping->i_pages, start); + XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE); - _enter("{%llx:%llu},{%lx-%lx}", - vnode->fid.vid, vnode->fid.vnode, start, last); + _enter("{%llx:%llu},{%x @%llx}", + vnode->fid.vid, vnode->fid.vnode, len, start); rcu_read_lock(); - xas_for_each(&xas, page, last) { - ASSERT(PageWriteback(page)); + end = (start + len - 1) / PAGE_SIZE; + xas_for_each(&xas, page, end) { + if (!PageWriteback(page)) { + kdebug("bad %x @%llx page %lx %lx", len, start, page->index, end); + ASSERT(PageWriteback(page)); + } - detach_page_private(page); trace_afs_page_dirty(vnode, tracepoint_string("clear"), page); + detach_page_private(page); page_endio(page, true, 0); } @@ -404,7 +415,7 @@ static void afs_store_data_success(struct afs_operation *op) afs_vnode_commit_status(op, &op->file[0]); if (op->error == 0) { if (!op->store.laundering) - afs_pages_written_back(vnode, op->store.first, op->store.last); + afs_pages_written_back(vnode, op->store.pos, op->store.size); afs_stat_v(vnode, n_stores); atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); } @@ -419,8 +430,7 @@ static const struct afs_operation_ops afs_store_data_operation = { /* * write to a file */ -static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, - loff_t pos, pgoff_t first, pgoff_t last, +static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, bool laundering) { struct afs_operation *op; @@ -453,8 +463,6 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, op->file[0].dv_delta = 1; op->store.write_iter = iter; op->store.pos = pos; - op->store.first = first; - op->store.last = last; op->store.size = size; op->store.i_size = max(pos + size, i_size); op->store.laundering = laundering; @@ -499,40 +507,49 @@ try_next_key: static void afs_extend_writeback(struct address_space *mapping, struct afs_vnode *vnode, long *_count, - pgoff_t start, - pgoff_t final_page, - unsigned *_offset, - unsigned *_to, - bool new_content) + loff_t start, + loff_t max_len, + bool new_content, + unsigned int *_len) { - struct page *pages[8], *page; - unsigned long count = *_count, priv; - unsigned offset = *_offset, to = *_to, n, f, t; - int loop; + struct pagevec pvec; + struct page *page; + unsigned long priv; + unsigned int psize, filler = 0; + unsigned int f, t; + loff_t len = *_len; + pgoff_t index = (start + len) / PAGE_SIZE; + bool stop = true; + unsigned int i; + + XA_STATE(xas, &mapping->i_pages, index); + pagevec_init(&pvec); - start++; do { - _debug("more %lx [%lx]", start, count); - n = final_page - start + 1; - if (n > ARRAY_SIZE(pages)) - n = ARRAY_SIZE(pages); - n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages); - _debug("fgpc %u", n); - if (n == 0) - goto no_more; - if (pages[0]->index != start) { - do { - put_page(pages[--n]); - } while (n > 0); - goto no_more; - } + /* Firstly, we gather up a batch of contiguous dirty pages + * under the RCU read lock - but we can't clear the dirty flags + * there if any of those pages are mapped. + */ + rcu_read_lock(); - for (loop = 0; loop < n; loop++) { - page = pages[loop]; - if (to != PAGE_SIZE && !new_content) + xas_for_each(&xas, page, ULONG_MAX) { + stop = true; + if (xas_retry(&xas, page)) + continue; + if (xa_is_value(page)) break; - if (page->index > final_page) + if (page->index != index) break; + + if (!page_cache_get_speculative(page)) { + xas_reset(&xas); + continue; + } + + /* Has the page moved or been split? */ + if (unlikely(page != xas_reload(&xas))) + break; + if (!trylock_page(page)) break; if (!PageDirty(page) || PageWriteback(page)) { @@ -540,6 +557,7 @@ static void afs_extend_writeback(struct address_space *mapping, break; } + psize = thp_size(page); priv = page_private(page); f = afs_page_dirty_from(page, priv); t = afs_page_dirty_to(page, priv); @@ -547,110 +565,126 @@ static void afs_extend_writeback(struct address_space *mapping, unlock_page(page); break; } - to = t; + len += filler + t; + filler = psize - t; + if (len >= max_len || *_count <= 0) + stop = true; + else if (t == psize || new_content) + stop = false; + + index += thp_nr_pages(page); + if (!pagevec_add(&pvec, page)) + break; + if (stop) + break; + } + + if (!stop) + xas_pause(&xas); + rcu_read_unlock(); + + /* Now, if we obtained any pages, we can shift them to being + * writable and mark them for caching. + */ + if (!pagevec_count(&pvec)) + break; + + for (i = 0; i < pagevec_count(&pvec); i++) { + page = pvec.pages[i]; trace_afs_page_dirty(vnode, tracepoint_string("store+"), page); if (!clear_page_dirty_for_io(page)) BUG(); if (test_set_page_writeback(page)) BUG(); + + *_count -= thp_nr_pages(page); unlock_page(page); - put_page(page); - } - count += loop; - if (loop < n) { - for (; loop < n; loop++) - put_page(pages[loop]); - goto no_more; } - start += loop; - } while (start <= final_page && count < 65536); + pagevec_release(&pvec); + cond_resched(); + } while (!stop); -no_more: - *_count = count; - *_offset = offset; - *_to = to; + *_len = len; } /* * Synchronously write back the locked page and any subsequent non-locked dirty * pages. */ -static int afs_write_back_from_locked_page(struct address_space *mapping, - struct writeback_control *wbc, - struct page *primary_page, - pgoff_t final_page) +static ssize_t afs_write_back_from_locked_page(struct address_space *mapping, + struct writeback_control *wbc, + struct page *page, + loff_t start, loff_t end) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); struct iov_iter iter; - unsigned long count, priv; - unsigned offset, to; - pgoff_t start, first, last; - loff_t i_size, pos, end; + unsigned long priv; + unsigned int offset, to, len, max_len; + loff_t i_size = i_size_read(&vnode->vfs_inode); bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); + long count = wbc->nr_to_write; int ret; - _enter(",%lx", primary_page->index); + _enter(",%lx,%llx-%llx", page->index, start, end); - count = 1; - if (test_set_page_writeback(primary_page)) + if (test_set_page_writeback(page)) BUG(); + count -= thp_nr_pages(page); + /* Find all consecutive lockable dirty pages that have contiguous * written regions, stopping when we find a page that is not * immediately lockable, is not dirty or is missing, or we reach the * end of the range. */ - start = primary_page->index; - priv = page_private(primary_page); - offset = afs_page_dirty_from(primary_page, priv); - to = afs_page_dirty_to(primary_page, priv); - trace_afs_page_dirty(vnode, tracepoint_string("store"), primary_page); + priv = page_private(page); + offset = afs_page_dirty_from(page, priv); + to = afs_page_dirty_to(page, priv); + trace_afs_page_dirty(vnode, tracepoint_string("store"), page); - WARN_ON(offset == to); - if (offset == to) - trace_afs_page_dirty(vnode, tracepoint_string("WARN"), primary_page); + len = to - offset; + start += offset; + if (start < i_size) { + /* Trim the write to the EOF; the extra data is ignored. Also + * put an upper limit on the size of a single storedata op. + */ + max_len = 65536 * 4096; + max_len = min_t(unsigned long long, max_len, end - start + 1); + max_len = min_t(unsigned long long, max_len, i_size - start); - if (start < final_page && - (to == PAGE_SIZE || new_content)) - afs_extend_writeback(mapping, vnode, &count, start, final_page, - &offset, &to, new_content); + if (len < max_len && + (to == thp_size(page) || new_content)) + afs_extend_writeback(mapping, vnode, &count, + start, max_len, new_content, &len); + len = min_t(loff_t, len, max_len); + } /* We now have a contiguous set of dirty pages, each with writeback * set; the first page is still locked at this point, but all the rest * have been unlocked. */ - unlock_page(primary_page); + unlock_page(page); - first = primary_page->index; - last = first + count - 1; - _debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to); + if (start < i_size) { + _debug("write back %x @%llx [%llx]", len, start, i_size); - pos = first; - pos <<= PAGE_SHIFT; - pos += offset; - end = last; - end <<= PAGE_SHIFT; - end += to; - - /* Trim the actual write down to the EOF */ - i_size = i_size_read(&vnode->vfs_inode); - if (end > i_size) - end = i_size; - - if (pos < i_size) { - iov_iter_xarray(&iter, WRITE, &mapping->i_pages, pos, end - pos); - ret = afs_store_data(vnode, &iter, pos, first, last, false); + iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); + ret = afs_store_data(vnode, &iter, start, false); } else { + _debug("write discard %x @%llx [%llx]", len, start, i_size); + /* The dirty region was entirely beyond the EOF. */ + afs_pages_written_back(vnode, start, len); ret = 0; } switch (ret) { case 0: - ret = count; + wbc->nr_to_write = count; + ret = len; break; default: @@ -662,13 +696,13 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, case -EKEYEXPIRED: case -EKEYREJECTED: case -EKEYREVOKED: - afs_redirty_pages(wbc, mapping, first, last); + afs_redirty_pages(wbc, mapping, start, len); mapping_set_error(mapping, ret); break; case -EDQUOT: case -ENOSPC: - afs_redirty_pages(wbc, mapping, first, last); + afs_redirty_pages(wbc, mapping, start, len); mapping_set_error(mapping, -ENOSPC); break; @@ -680,7 +714,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, case -ENOMEDIUM: case -ENXIO: trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail); - afs_kill_pages(mapping, first, last); + afs_kill_pages(mapping, start, len); mapping_set_error(mapping, ret); break; } @@ -695,19 +729,19 @@ static int afs_write_back_from_locked_page(struct address_space *mapping, */ int afs_writepage(struct page *page, struct writeback_control *wbc) { - int ret; + ssize_t ret; + loff_t start; _enter("{%lx},", page->index); + start = page->index * PAGE_SIZE; ret = afs_write_back_from_locked_page(page->mapping, wbc, page, - wbc->range_end >> PAGE_SHIFT); + start, LLONG_MAX - start); if (ret < 0) { - _leave(" = %d", ret); - return 0; + _leave(" = %zd", ret); + return ret; } - wbc->nr_to_write -= ret; - _leave(" = 0"); return 0; } @@ -717,35 +751,46 @@ int afs_writepage(struct page *page, struct writeback_control *wbc) */ static int afs_writepages_region(struct address_space *mapping, struct writeback_control *wbc, - pgoff_t index, pgoff_t end, pgoff_t *_next) + loff_t start, loff_t end, loff_t *_next) { struct page *page; - int ret, n; + ssize_t ret; + int n; - _enter(",,%lx,%lx,", index, end); + _enter("%llx,%llx,", start, end); do { - n = find_get_pages_range_tag(mapping, &index, end, - PAGECACHE_TAG_DIRTY, 1, &page); + pgoff_t index = start / PAGE_SIZE; + + n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE, + PAGECACHE_TAG_DIRTY, 1, &page); if (!n) break; + start = (loff_t)page->index * PAGE_SIZE; /* May regress with THPs */ + _debug("wback %lx", page->index); - /* - * at this point we hold neither the i_pages lock nor the + /* At this point we hold neither the i_pages lock nor the * page lock: the page may be truncated or invalidated * (changing page->mapping to NULL), or even swizzled * back from swapper_space to tmpfs file mapping */ - ret = lock_page_killable(page); - if (ret < 0) { - put_page(page); - _leave(" = %d", ret); - return ret; + if (wbc->sync_mode != WB_SYNC_NONE) { + ret = lock_page_killable(page); + if (ret < 0) { + put_page(page); + return ret; + } + } else { + if (!trylock_page(page)) { + put_page(page); + return 0; + } } if (page->mapping != mapping || !PageDirty(page)) { + start += thp_size(page); unlock_page(page); put_page(page); continue; @@ -761,20 +806,20 @@ static int afs_writepages_region(struct address_space *mapping, if (!clear_page_dirty_for_io(page)) BUG(); - ret = afs_write_back_from_locked_page(mapping, wbc, page, end); + ret = afs_write_back_from_locked_page(mapping, wbc, page, start, end); put_page(page); if (ret < 0) { - _leave(" = %d", ret); + _leave(" = %zd", ret); return ret; } - wbc->nr_to_write -= ret; + start += ret * PAGE_SIZE; cond_resched(); - } while (index < end && wbc->nr_to_write > 0); + } while (wbc->nr_to_write > 0); - *_next = index; - _leave(" = 0 [%lx]", *_next); + *_next = start; + _leave(" = 0 [%llx]", *_next); return 0; } @@ -785,7 +830,7 @@ int afs_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct afs_vnode *vnode = AFS_FS_I(mapping->host); - pgoff_t start, end, next; + loff_t start, next; int ret; _enter(""); @@ -800,22 +845,19 @@ int afs_writepages(struct address_space *mapping, return 0; if (wbc->range_cyclic) { - start = mapping->writeback_index; - end = -1; - ret = afs_writepages_region(mapping, wbc, start, end, &next); + start = mapping->writeback_index * PAGE_SIZE; + ret = afs_writepages_region(mapping, wbc, start, LLONG_MAX, &next); if (start > 0 && wbc->nr_to_write > 0 && ret == 0) ret = afs_writepages_region(mapping, wbc, 0, start, &next); - mapping->writeback_index = next; + mapping->writeback_index = next / PAGE_SIZE; } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { - end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT); - ret = afs_writepages_region(mapping, wbc, 0, end, &next); + ret = afs_writepages_region(mapping, wbc, 0, LLONG_MAX, &next); if (wbc->nr_to_write > 0) mapping->writeback_index = next; } else { - start = wbc->range_start >> PAGE_SHIFT; - end = wbc->range_end >> PAGE_SHIFT; - ret = afs_writepages_region(mapping, wbc, start, end, &next); + ret = afs_writepages_region(mapping, wbc, + wbc->range_start, wbc->range_end, &next); } up_read(&vnode->validate_lock); @@ -873,13 +915,13 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync) */ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) { + struct page *page = thp_head(vmf->page); struct file *file = vmf->vma->vm_file; struct inode *inode = file_inode(file); struct afs_vnode *vnode = AFS_FS_I(inode); unsigned long priv; - _enter("{{%llx:%llu}},{%lx}", - vnode->fid.vid, vnode->fid.vnode, vmf->page->index); + _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index); sb_start_pagefault(inode->i_sb); @@ -887,30 +929,32 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) * be modified. We then assume the entire page will need writing back. */ #ifdef CONFIG_AFS_FSCACHE - if (PageFsCache(vmf->page) && - wait_on_page_bit_killable(vmf->page, PG_fscache) < 0) + if (PageFsCache(page) && + wait_on_page_bit_killable(page, PG_fscache) < 0) return VM_FAULT_RETRY; #endif - if (wait_on_page_writeback_killable(vmf->page)) + if (wait_on_page_writeback_killable(page)) return VM_FAULT_RETRY; - if (lock_page_killable(vmf->page) < 0) + if (lock_page_killable(page) < 0) return VM_FAULT_RETRY; /* We mustn't change page->private until writeback is complete as that * details the portion of the page we need to write back and we might * need to redirty the page if there's a problem. */ - wait_on_page_writeback(vmf->page); + wait_on_page_writeback(page); - priv = afs_page_dirty(vmf->page, 0, PAGE_SIZE); + priv = afs_page_dirty(page, 0, thp_size(page)); priv = afs_page_dirty_mmapped(priv); - if (PagePrivate(vmf->page)) - set_page_private(vmf->page, priv); - else - attach_page_private(vmf->page, (void *)priv); - trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), vmf->page); + if (PagePrivate(page)) { + set_page_private(page, priv); + trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page); + } else { + attach_page_private(page, (void *)priv); + trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page); + } file_update_time(file); sb_end_pagefault(inode->i_sb); @@ -963,7 +1007,7 @@ int afs_launder_page(struct page *page) priv = page_private(page); if (clear_page_dirty_for_io(page)) { f = 0; - t = PAGE_SIZE; + t = thp_size(page); if (PagePrivate(page)) { f = afs_page_dirty_from(page, priv); t = afs_page_dirty_to(page, priv); @@ -975,12 +1019,12 @@ int afs_launder_page(struct page *page) iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len); trace_afs_page_dirty(vnode, tracepoint_string("launder"), page); - ret = afs_store_data(vnode, &iter, (loff_t)page->index << PAGE_SHIFT, - page->index, page->index, true); + ret = afs_store_data(vnode, &iter, (loff_t)page->index * PAGE_SIZE, + true); } - detach_page_private(page); trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page); + detach_page_private(page); wait_on_page_fscache(page); return ret; } From dc4191841d0998978349e9119ab2ccb080b8b957 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 18 Sep 2020 09:11:15 +0100 Subject: [PATCH 12/14] afs: Use the fs operation ops to handle FetchData completion Use the 'success' and 'aborted' afs_operations_ops methods and add a 'failed' method to handle the completion of an AFS.FetchData, AFS.FetchData64 or YFS.FetchData64 RPC operation rather than directly calling the done func pointed to by the afs_read struct from the call delivery handler. This means the done function will be called back on error also, not just on successful completion. This allows motion towards asynchronous data reception on data fetch calls and allows any error to be handed off to the fscache read helper in the same place as a successful completion. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/160588541471.3465195.8807019223378490810.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118157260.1232039.6549085372718234792.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161052647.2537118.12922380836599003659.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340417106.1303470.3502017303898569631.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539560673.286939.391310781674212229.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653816367.2770958.5856904574822446404.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789099994.6155.473719823490561190.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/file.c | 15 +++++++++++++++ fs/afs/fs_operation.c | 4 +++- fs/afs/fsclient.c | 3 --- fs/afs/internal.h | 1 + fs/afs/yfsclient.c | 3 --- 5 files changed, 19 insertions(+), 7 deletions(-) diff --git a/fs/afs/file.c b/fs/afs/file.c index edf21c8708a3..2db810467d3f 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -254,6 +254,19 @@ void afs_put_read(struct afs_read *req) } } +static void afs_fetch_data_notify(struct afs_operation *op) +{ + struct afs_read *req = op->fetch.req; + int error = op->error; + + if (error == -ECONNABORTED) + error = afs_abort_to_error(op->ac.abort_code); + req->error = error; + + if (req->done) + req->done(req); +} + static void afs_fetch_data_success(struct afs_operation *op) { struct afs_vnode *vnode = op->file[0].vnode; @@ -262,6 +275,7 @@ static void afs_fetch_data_success(struct afs_operation *op) afs_vnode_commit_status(op, &op->file[0]); afs_stat_v(vnode, n_fetches); atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes); + afs_fetch_data_notify(op); } static void afs_fetch_data_put(struct afs_operation *op) @@ -275,6 +289,7 @@ static const struct afs_operation_ops afs_fetch_data_operation = { .issue_yfs_rpc = yfs_fs_fetch_data, .success = afs_fetch_data_success, .aborted = afs_check_for_remote_deletion, + .failed = afs_fetch_data_notify, .put = afs_fetch_data_put, }; diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c index 71c58723763d..2cb0951acca6 100644 --- a/fs/afs/fs_operation.c +++ b/fs/afs/fs_operation.c @@ -198,8 +198,10 @@ void afs_wait_for_operation(struct afs_operation *op) case -ECONNABORTED: if (op->ops->aborted) op->ops->aborted(op); - break; + fallthrough; default: + if (op->ops->failed) + op->ops->failed(op); break; } diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 31e6b3635541..5e34f4dbd385 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -392,9 +392,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) break; } - if (req->done) - req->done(req); - _leave(" = 0 [done]"); return 0; } diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 9629b6430a52..ee283e3ebc4d 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -742,6 +742,7 @@ struct afs_operation_ops { void (*issue_yfs_rpc)(struct afs_operation *op); void (*success)(struct afs_operation *op); void (*aborted)(struct afs_operation *op); + void (*failed)(struct afs_operation *op); void (*edit_dir)(struct afs_operation *op); void (*put)(struct afs_operation *op); }; diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 363d6dd276c0..2b35cba8ad62 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -449,9 +449,6 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) break; } - if (req->done) - req->done(req); - _leave(" = 0 [done]"); return 0; } From 5cbf03985c67c7f0ac8c5382cf5d4d0d630f95f3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 6 Feb 2020 14:22:29 +0000 Subject: [PATCH 13/14] afs: Use new netfs lib read helper API Make AFS use the new netfs read helpers to implement the VM read operations: - afs_readpage() now hands off responsibility to netfs_readpage(). - afs_readpages() is gone and replaced with afs_readahead(). - afs_readahead() just hands off responsibility to netfs_readahead(). These make use of the cache if a cookie is supplied, otherwise just call the ->issue_op() method a sufficient number of times to complete the entire request. Changes: v5: - Use proper wait function for PG_fscache in afs_page_mkwrite()[1]. - Use killable wait for PG_writeback in afs_page_mkwrite()[1]. v4: - Folded in error handling fixes to afs_req_issue_op(). - Added flag to netfs_subreq_terminated() to indicate that the caller may have been running async and stuff that might sleep needs punting to a workqueue. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/2499407.1616505440@warthog.procyon.org.uk [1] Link: https://lore.kernel.org/r/160588542733.3465195.7526541422073350302.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161118158436.1232039.3884845981224091996.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161161053540.2537118.14904446369309535330.stgit@warthog.procyon.org.uk/ # v2 Link: https://lore.kernel.org/r/161340418739.1303470.5908092911600241280.stgit@warthog.procyon.org.uk/ # v3 Link: https://lore.kernel.org/r/161539561926.286939.5729036262354802339.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653817977.2770958.17696456811587237197.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789101258.6155.3879271028895121537.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/Kconfig | 1 + fs/afs/file.c | 329 +++++++++++----------------------------------- fs/afs/fsclient.c | 1 + fs/afs/internal.h | 3 + fs/afs/write.c | 7 +- 5 files changed, 89 insertions(+), 252 deletions(-) diff --git a/fs/afs/Kconfig b/fs/afs/Kconfig index 1ad211d72b3b..fc8ba9142f2f 100644 --- a/fs/afs/Kconfig +++ b/fs/afs/Kconfig @@ -4,6 +4,7 @@ config AFS_FS depends on INET select AF_RXRPC select DNS_RESOLVER + select NETFS_SUPPORT help If you say Y here, you will get an experimental Andrew File System driver. It currently only supports unsecured read-only AFS access. diff --git a/fs/afs/file.c b/fs/afs/file.c index 2db810467d3f..10c6eaaac2cc 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "internal.h" static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); @@ -22,8 +23,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset, unsigned int length); static int afs_releasepage(struct page *page, gfp_t gfp_flags); -static int afs_readpages(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages); +static void afs_readahead(struct readahead_control *ractl); const struct file_operations afs_file_operations = { .open = afs_open, @@ -47,7 +47,7 @@ const struct inode_operations afs_file_inode_operations = { const struct address_space_operations afs_fs_aops = { .readpage = afs_readpage, - .readpages = afs_readpages, + .readahead = afs_readahead, .set_page_dirty = afs_set_page_dirty, .launder_page = afs_launder_page, .releasepage = afs_releasepage, @@ -184,61 +184,17 @@ int afs_release(struct inode *inode, struct file *file) } /* - * Handle completion of a read operation. + * Allocate a new read record. */ -static void afs_file_read_done(struct afs_read *req) +struct afs_read *afs_alloc_read(gfp_t gfp) { - struct afs_vnode *vnode = req->vnode; - struct page *page; - pgoff_t index = req->pos >> PAGE_SHIFT; - pgoff_t last = index + req->nr_pages - 1; + struct afs_read *req; - XA_STATE(xas, &vnode->vfs_inode.i_mapping->i_pages, index); + req = kzalloc(sizeof(struct afs_read), gfp); + if (req) + refcount_set(&req->usage, 1); - if (iov_iter_count(req->iter) > 0) { - /* The read was short - clear the excess buffer. */ - _debug("afterclear %zx %zx %llx/%llx", - req->iter->iov_offset, - iov_iter_count(req->iter), - req->actual_len, req->len); - iov_iter_zero(iov_iter_count(req->iter), req->iter); - } - - rcu_read_lock(); - xas_for_each(&xas, page, last) { - page_endio(page, false, 0); - put_page(page); - } - rcu_read_unlock(); - - task_io_account_read(req->len); - req->cleanup = NULL; -} - -/* - * Dispose of our locks and refs on the pages if the read failed. - */ -static void afs_file_read_cleanup(struct afs_read *req) -{ - struct page *page; - pgoff_t index = req->pos >> PAGE_SHIFT; - pgoff_t last = index + req->nr_pages - 1; - - if (req->iter) { - XA_STATE(xas, &req->vnode->vfs_inode.i_mapping->i_pages, index); - - _enter("%lu,%u,%zu", index, req->nr_pages, iov_iter_count(req->iter)); - - rcu_read_lock(); - xas_for_each(&xas, page, last) { - BUG_ON(xa_is_value(page)); - BUG_ON(PageCompound(page)); - - page_endio(page, false, req->error); - put_page(page); - } - rcu_read_unlock(); - } + return req; } /* @@ -257,14 +213,20 @@ void afs_put_read(struct afs_read *req) static void afs_fetch_data_notify(struct afs_operation *op) { struct afs_read *req = op->fetch.req; + struct netfs_read_subrequest *subreq = req->subreq; int error = op->error; if (error == -ECONNABORTED) error = afs_abort_to_error(op->ac.abort_code); req->error = error; - if (req->done) + if (subreq) { + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); + netfs_subreq_terminated(subreq, error ?: req->actual_len, false); + req->subreq = NULL; + } else if (req->done) { req->done(req); + } } static void afs_fetch_data_success(struct afs_operation *op) @@ -308,8 +270,11 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) key_serial(req->key)); op = afs_alloc_operation(req->key, vnode->volume); - if (IS_ERR(op)) + if (IS_ERR(op)) { + if (req->subreq) + netfs_subreq_terminated(req->subreq, PTR_ERR(op), false); return PTR_ERR(op); + } afs_op_set_vnode(op, 0, vnode); @@ -318,222 +283,86 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req) return afs_do_sync_operation(op); } -/* - * read page from file, directory or symlink, given a key to use - */ -static int afs_page_filler(struct key *key, struct page *page) +static void afs_req_issue_op(struct netfs_read_subrequest *subreq) { - struct inode *inode = page->mapping->host; - struct afs_vnode *vnode = AFS_FS_I(inode); - struct afs_read *req; - int ret; + struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); + struct afs_read *fsreq; - _enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index); + fsreq = afs_alloc_read(GFP_NOFS); + if (!fsreq) + return netfs_subreq_terminated(subreq, -ENOMEM, false); - BUG_ON(!PageLocked(page)); + fsreq->subreq = subreq; + fsreq->pos = subreq->start + subreq->transferred; + fsreq->len = subreq->len - subreq->transferred; + fsreq->key = subreq->rreq->netfs_priv; + fsreq->vnode = vnode; + fsreq->iter = &fsreq->def_iter; - ret = -ESTALE; - if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) - goto error; + iov_iter_xarray(&fsreq->def_iter, READ, + &fsreq->vnode->vfs_inode.i_mapping->i_pages, + fsreq->pos, fsreq->len); - req = kzalloc(sizeof(struct afs_read), GFP_KERNEL); - if (!req) - goto enomem; - - refcount_set(&req->usage, 1); - req->vnode = vnode; - req->key = key_get(key); - req->pos = (loff_t)page->index << PAGE_SHIFT; - req->len = thp_size(page); - req->nr_pages = thp_nr_pages(page); - req->done = afs_file_read_done; - req->cleanup = afs_file_read_cleanup; - - get_page(page); - iov_iter_xarray(&req->def_iter, READ, &page->mapping->i_pages, - req->pos, req->len); - req->iter = &req->def_iter; - - ret = afs_fetch_data(vnode, req); - if (ret < 0) - goto fetch_error; - - afs_put_read(req); - _leave(" = 0"); - return 0; - -fetch_error: - switch (ret) { - case -EINTR: - case -ENOMEM: - case -ERESTARTSYS: - case -EAGAIN: - afs_put_read(req); - goto error; - case -ENOENT: - _debug("got NOENT from server - marking file deleted and stale"); - set_bit(AFS_VNODE_DELETED, &vnode->flags); - ret = -ESTALE; - /* Fall through */ - default: - page_endio(page, false, ret); - afs_put_read(req); - _leave(" = %d", ret); - return ret; - } - -enomem: - ret = -ENOMEM; -error: - unlock_page(page); - _leave(" = %d", ret); - return ret; + afs_fetch_data(fsreq->vnode, fsreq); } -/* - * read page from file, directory or symlink, given a file to nominate the key - * to be used - */ -static int afs_readpage(struct file *file, struct page *page) +static int afs_symlink_readpage(struct page *page) { - struct key *key; + struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); + struct afs_read *fsreq; int ret; - if (file) { - key = afs_file_key(file); - ASSERT(key != NULL); - ret = afs_page_filler(key, page); - } else { - struct inode *inode = page->mapping->host; - key = afs_request_key(AFS_FS_S(inode->i_sb)->cell); - if (IS_ERR(key)) { - ret = PTR_ERR(key); - } else { - ret = afs_page_filler(key, page); - key_put(key); - } - } - return ret; -} - -/* - * Read a contiguous set of pages. - */ -static int afs_readpages_one(struct file *file, struct address_space *mapping, - struct list_head *pages) -{ - struct afs_vnode *vnode = AFS_FS_I(mapping->host); - struct afs_read *req; - struct list_head *p; - struct page *first, *page; - pgoff_t index; - int ret, n; - - /* Count the number of contiguous pages at the front of the list. Note - * that the list goes prev-wards rather than next-wards. - */ - first = lru_to_page(pages); - index = first->index + 1; - n = 1; - for (p = first->lru.prev; p != pages; p = p->prev) { - page = list_entry(p, struct page, lru); - if (page->index != index) - break; - index++; - n++; - } - - req = kzalloc(sizeof(struct afs_read), GFP_NOFS); - if (!req) + fsreq = afs_alloc_read(GFP_NOFS); + if (!fsreq) return -ENOMEM; - refcount_set(&req->usage, 1); - req->vnode = vnode; - req->key = key_get(afs_file_key(file)); - req->done = afs_file_read_done; - req->cleanup = afs_file_read_cleanup; - req->pos = first->index; - req->pos <<= PAGE_SHIFT; + fsreq->pos = page->index * PAGE_SIZE; + fsreq->len = PAGE_SIZE; + fsreq->vnode = vnode; + fsreq->iter = &fsreq->def_iter; + iov_iter_xarray(&fsreq->def_iter, READ, &page->mapping->i_pages, + fsreq->pos, fsreq->len); - /* Add pages to the LRU until it fails. We keep the pages ref'd and - * locked until the read is complete. - * - * Note that it's possible for the file size to change whilst we're - * doing this, but we rely on the server returning less than we asked - * for if the file shrank. We also rely on this to deal with a partial - * page at the end of the file. - */ - do { - page = lru_to_page(pages); - list_del(&page->lru); - index = page->index; - if (add_to_page_cache_lru(page, mapping, index, - readahead_gfp_mask(mapping))) { - put_page(page); - break; - } - - req->nr_pages++; - } while (req->nr_pages < n); - - if (req->nr_pages == 0) { - afs_put_read(req); - return 0; - } - - req->len = req->nr_pages * PAGE_SIZE; - iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, - req->pos, req->len); - req->iter = &req->def_iter; - - ret = afs_fetch_data(vnode, req); - if (ret < 0) - goto error; - - afs_put_read(req); - return 0; - -error: - if (ret == -ENOENT) { - _debug("got NOENT from server - marking file deleted and stale"); - set_bit(AFS_VNODE_DELETED, &vnode->flags); - ret = -ESTALE; - } - - afs_put_read(req); + ret = afs_fetch_data(fsreq->vnode, fsreq); + page_endio(page, false, ret); return ret; } -/* - * read a set of pages - */ -static int afs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static void afs_init_rreq(struct netfs_read_request *rreq, struct file *file) { - struct key *key = afs_file_key(file); - struct afs_vnode *vnode; - int ret = 0; + rreq->netfs_priv = key_get(afs_file_key(file)); +} - _enter("{%d},{%lu},,%d", - key_serial(key), mapping->host->i_ino, nr_pages); +static int afs_begin_cache_operation(struct netfs_read_request *rreq) +{ + struct afs_vnode *vnode = AFS_FS_I(rreq->inode); - ASSERT(key != NULL); + return fscache_begin_read_operation(rreq, afs_vnode_cache(vnode)); +} - vnode = AFS_FS_I(mapping->host); - if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) { - _leave(" = -ESTALE"); - return -ESTALE; - } +static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv) +{ + key_put(netfs_priv); +} - /* attempt to read as many of the pages as possible */ - while (!list_empty(pages)) { - ret = afs_readpages_one(file, mapping, pages); - if (ret < 0) - break; - } +static const struct netfs_read_request_ops afs_req_ops = { + .init_rreq = afs_init_rreq, + .begin_cache_operation = afs_begin_cache_operation, + .issue_op = afs_req_issue_op, + .cleanup = afs_priv_cleanup, +}; - _leave(" = %d [netting]", ret); - return ret; +static int afs_readpage(struct file *file, struct page *page) +{ + if (!file) + return afs_symlink_readpage(page); + + return netfs_readpage(file, page, &afs_req_ops, NULL); +} + +static void afs_readahead(struct readahead_control *ractl) +{ + netfs_readahead(ractl, &afs_req_ops, NULL); } /* diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index 5e34f4dbd385..2f695a260442 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "internal.h" #include "afs_fs.h" #include "xdr_fs.h" diff --git a/fs/afs/internal.h b/fs/afs/internal.h index ee283e3ebc4d..f9a692fc08f4 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -14,6 +14,7 @@ #include #include #include +#define FSCACHE_USE_NEW_IO_API #include #include #include @@ -207,6 +208,7 @@ struct afs_read { loff_t file_size; /* File size returned by server */ struct key *key; /* The key to use to reissue the read */ struct afs_vnode *vnode; /* The file being read into. */ + struct netfs_read_subrequest *subreq; /* Fscache helper read request this belongs to */ afs_dataversion_t data_version; /* Version number returned by server */ refcount_t usage; unsigned int call_debug_id; @@ -1049,6 +1051,7 @@ extern void afs_put_wb_key(struct afs_wb_key *); extern int afs_open(struct inode *, struct file *); extern int afs_release(struct inode *, struct file *); extern int afs_fetch_data(struct afs_vnode *, struct afs_read *); +extern struct afs_read *afs_alloc_read(gfp_t); extern void afs_put_read(struct afs_read *); static inline struct afs_read *afs_get_read(struct afs_read *req) diff --git a/fs/afs/write.c b/fs/afs/write.c index 099c7dad09c5..bc84c771b0fd 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -930,7 +930,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) */ #ifdef CONFIG_AFS_FSCACHE if (PageFsCache(page) && - wait_on_page_bit_killable(page, PG_fscache) < 0) + wait_on_page_fscache_killable(page) < 0) return VM_FAULT_RETRY; #endif @@ -944,7 +944,10 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf) * details the portion of the page we need to write back and we might * need to redirty the page if there's a problem. */ - wait_on_page_writeback(page); + if (wait_on_page_writeback_killable(page) < 0) { + unlock_page(page); + return VM_FAULT_RETRY; + } priv = afs_page_dirty(page, 0, thp_size(page)); priv = afs_page_dirty_mmapped(priv); From 3003bbd0697b659944237f3459489cb596ba196c Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 6 Feb 2020 14:22:29 +0000 Subject: [PATCH 14/14] afs: Use the netfs_write_begin() helper Make AFS use the new netfs_write_begin() helper to do the pre-reading required before the write. If successful, the helper returns with the required page filled in and locked. It may read more than just one page, expanding the read to meet cache granularity requirements as necessary. Note: A more advanced version of this could be made that does generic_perform_write() for a whole cache granule. This would make it easier to avoid doing the download/read for the data to be overwritten. Signed-off-by: David Howells Tested-By: Marc Dionne cc: linux-afs@lists.infradead.org cc: linux-cachefs@redhat.com cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/160588546422.3465195.1546354372589291098.stgit@warthog.procyon.org.uk/ # rfc Link: https://lore.kernel.org/r/161539563244.286939.16537296241609909980.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/161653819291.2770958.406013201547420544.stgit@warthog.procyon.org.uk/ # v5 Link: https://lore.kernel.org/r/161789102743.6155.17396591236631761195.stgit@warthog.procyon.org.uk/ # v6 --- fs/afs/file.c | 19 +++++++- fs/afs/internal.h | 1 + fs/afs/write.c | 108 ++++++---------------------------------------- 3 files changed, 31 insertions(+), 97 deletions(-) diff --git a/fs/afs/file.c b/fs/afs/file.c index 10c6eaaac2cc..db035ae2a134 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -333,6 +333,13 @@ static void afs_init_rreq(struct netfs_read_request *rreq, struct file *file) rreq->netfs_priv = key_get(afs_file_key(file)); } +static bool afs_is_cache_enabled(struct inode *inode) +{ + struct fscache_cookie *cookie = afs_vnode_cache(AFS_FS_I(inode)); + + return fscache_cookie_enabled(cookie) && !hlist_empty(&cookie->backing_objects); +} + static int afs_begin_cache_operation(struct netfs_read_request *rreq) { struct afs_vnode *vnode = AFS_FS_I(rreq->inode); @@ -340,14 +347,24 @@ static int afs_begin_cache_operation(struct netfs_read_request *rreq) return fscache_begin_read_operation(rreq, afs_vnode_cache(vnode)); } +static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len, + struct page *page, void **_fsdata) +{ + struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); + + return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0; +} + static void afs_priv_cleanup(struct address_space *mapping, void *netfs_priv) { key_put(netfs_priv); } -static const struct netfs_read_request_ops afs_req_ops = { +const struct netfs_read_request_ops afs_req_ops = { .init_rreq = afs_init_rreq, + .is_cache_enabled = afs_is_cache_enabled, .begin_cache_operation = afs_begin_cache_operation, + .check_write_begin = afs_check_write_begin, .issue_op = afs_req_issue_op, .cleanup = afs_priv_cleanup, }; diff --git a/fs/afs/internal.h b/fs/afs/internal.h index f9a692fc08f4..52157a05796a 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -1045,6 +1045,7 @@ extern void afs_dynroot_depopulate(struct super_block *); extern const struct address_space_operations afs_fs_aops; extern const struct inode_operations afs_file_inode_operations; extern const struct file_operations afs_file_operations; +extern const struct netfs_read_request_ops afs_req_ops; extern int afs_cache_wb_key(struct afs_vnode *, struct afs_file *); extern void afs_put_wb_key(struct afs_wb_key *); diff --git a/fs/afs/write.c b/fs/afs/write.c index bc84c771b0fd..dc66ff15dd16 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include "internal.h" /* @@ -22,68 +24,6 @@ int afs_set_page_dirty(struct page *page) return __set_page_dirty_nobuffers(page); } -/* - * Handle completion of a read operation to fill a page. - */ -static void afs_fill_hole(struct afs_read *req) -{ - if (iov_iter_count(req->iter) > 0) - /* The read was short - clear the excess buffer. */ - iov_iter_zero(iov_iter_count(req->iter), req->iter); -} - -/* - * partly or wholly fill a page that's under preparation for writing - */ -static int afs_fill_page(struct file *file, - loff_t pos, unsigned int len, struct page *page) -{ - struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); - struct afs_read *req; - size_t p; - void *data; - int ret; - - _enter(",,%llu", (unsigned long long)pos); - - if (pos >= vnode->vfs_inode.i_size) { - p = pos & ~PAGE_MASK; - ASSERTCMP(p + len, <=, PAGE_SIZE); - data = kmap(page); - memset(data + p, 0, len); - kunmap(page); - return 0; - } - - req = kzalloc(sizeof(struct afs_read), GFP_KERNEL); - if (!req) - return -ENOMEM; - - refcount_set(&req->usage, 1); - req->vnode = vnode; - req->done = afs_fill_hole; - req->key = key_get(afs_file_key(file)); - req->pos = pos; - req->len = len; - req->nr_pages = 1; - req->iter = &req->def_iter; - iov_iter_xarray(&req->def_iter, READ, &file->f_mapping->i_pages, pos, len); - - ret = afs_fetch_data(vnode, req); - afs_put_read(req); - if (ret < 0) { - if (ret == -ENOENT) { - _debug("got NOENT from server" - " - marking file deleted and stale"); - set_bit(AFS_VNODE_DELETED, &vnode->flags); - ret = -ESTALE; - } - } - - _leave(" = %d", ret); - return ret; -} - /* * prepare to perform part of a write to a page */ @@ -102,24 +42,14 @@ int afs_write_begin(struct file *file, struct address_space *mapping, _enter("{%llx:%llu},%llx,%x", vnode->fid.vid, vnode->fid.vnode, pos, len); - page = grab_cache_page_write_begin(mapping, pos / PAGE_SIZE, flags); - if (!page) - return -ENOMEM; - - if (!PageUptodate(page) && len != PAGE_SIZE) { - ret = afs_fill_page(file, pos & PAGE_MASK, PAGE_SIZE, page); - if (ret < 0) { - unlock_page(page); - put_page(page); - _leave(" = %d [prep]", ret); - return ret; - } - SetPageUptodate(page); - } - -#ifdef CONFIG_AFS_FSCACHE - wait_on_page_fscache(page); -#endif + /* Prefetch area to be written into the cache if we're caching this + * file. We need to do this before we get a lock on the page in case + * there's more than one writer competing for the same cache block. + */ + ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata, + &afs_req_ops, NULL); + if (ret < 0) + return ret; index = page->index; from = pos - index * PAGE_SIZE; @@ -184,7 +114,6 @@ int afs_write_end(struct file *file, struct address_space *mapping, unsigned int f, from = pos & (thp_size(page) - 1); unsigned int t, to = from + copied; loff_t i_size, maybe_i_size; - int ret = 0; _enter("{%llx:%llu},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index); @@ -203,19 +132,7 @@ int afs_write_end(struct file *file, struct address_space *mapping, write_sequnlock(&vnode->cb_lock); } - if (!PageUptodate(page)) { - if (copied < len) { - /* Try and load any missing data from the server. The - * unmarshalling routine will take care of clearing any - * bits that are beyond the EOF. - */ - ret = afs_fill_page(file, pos + copied, - len - copied, page); - if (ret < 0) - goto out; - } - SetPageUptodate(page); - } + ASSERT(PageUptodate(page)); if (PagePrivate(page)) { priv = page_private(page); @@ -236,12 +153,11 @@ int afs_write_end(struct file *file, struct address_space *mapping, if (set_page_dirty(page)) _debug("dirtied %lx", page->index); - ret = copied; out: unlock_page(page); put_page(page); - return ret; + return copied; } /*