netfs: Remove deprecated use of PG_private_2 as a second writeback flag

Remove the deprecated use of PG_private_2 in netfslib.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: Matthew Wilcox (Oracle) <willy@infradead.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
This commit is contained in:
David Howells 2023-11-24 13:02:55 +00:00
parent 2e9d7e4b98
commit ae678317b9
3 changed files with 2 additions and 169 deletions

View file

@ -498,11 +498,6 @@ const struct netfs_request_ops ceph_netfs_ops = {
};
#ifdef CONFIG_CEPH_FSCACHE
static void ceph_set_page_fscache(struct page *page)
{
folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
}
static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
{
struct inode *inode = priv;
@ -520,10 +515,6 @@ static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, b
ceph_fscache_write_terminated, inode, true, caching);
}
#else
static inline void ceph_set_page_fscache(struct page *page)
{
}
static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
{
}
@ -715,8 +706,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
len = wlen;
set_page_writeback(page);
if (caching)
ceph_set_page_fscache(page);
ceph_fscache_write_to_cache(inode, page_off, len, caching);
if (IS_ENCRYPTED(inode)) {
@ -800,8 +789,6 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
return AOP_WRITEPAGE_ACTIVATE;
}
folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
err = writepage_nounlock(page, wbc);
if (err == -ERESTARTSYS) {
/* direct memory reclaimer was killed by SIGKILL. return 0
@ -1075,8 +1062,7 @@ static int ceph_writepages_start(struct address_space *mapping,
unlock_page(page);
break;
}
if (PageWriteback(page) ||
PagePrivate2(page) /* [DEPRECATED] */) {
if (PageWriteback(page)) {
if (wbc->sync_mode == WB_SYNC_NONE) {
doutc(cl, "%p under writeback\n", page);
unlock_page(page);
@ -1084,7 +1070,6 @@ static int ceph_writepages_start(struct address_space *mapping,
}
doutc(cl, "waiting on writeback %p\n", page);
wait_on_page_writeback(page);
folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
}
if (!clear_page_dirty_for_io(page)) {
@ -1269,8 +1254,6 @@ static int ceph_writepages_start(struct address_space *mapping,
}
set_page_writeback(page);
if (caching)
ceph_set_page_fscache(page);
len += thp_size(page);
}
ceph_fscache_write_to_cache(inode, offset, len, caching);

View file

@ -464,7 +464,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
if (!netfs_is_cache_enabled(ctx) &&
netfs_skip_folio_read(folio, pos, len, false)) {
netfs_stat(&netfs_n_rh_write_zskip);
goto have_folio_no_wait;
goto have_folio;
}
rreq = netfs_alloc_request(mapping, file,
@ -505,12 +505,6 @@ int netfs_write_begin(struct netfs_inode *ctx,
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
have_folio:
if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) {
ret = folio_wait_private_2_killable(folio);
if (ret < 0)
goto error;
}
have_folio_no_wait:
*_folio = folio;
_leave(" = 0");
return 0;

View file

@ -98,146 +98,6 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
}
/*
* [DEPRECATED] Deal with the completion of writing the data to the cache. We
* have to clear the PG_fscache bits on the folios involved and release the
* caller's ref.
*
* May be called in softirq mode and we inherit a ref from the caller.
*/
static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
bool was_async)
{
struct netfs_io_subrequest *subreq;
struct folio *folio;
pgoff_t unlocked = 0;
bool have_unlocked = false;
rcu_read_lock();
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
if (xas_retry(&xas, folio))
continue;
/* We might have multiple writes from the same huge
* folio, but we mustn't unlock a folio more than once.
*/
if (have_unlocked && folio->index <= unlocked)
continue;
unlocked = folio_next_index(folio) - 1;
trace_netfs_folio(folio, netfs_folio_trace_end_copy);
folio_end_private_2(folio);
have_unlocked = true;
}
}
rcu_read_unlock();
netfs_rreq_completed(rreq, was_async);
}
static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
bool was_async) /* [DEPRECATED] */
{
struct netfs_io_subrequest *subreq = priv;
struct netfs_io_request *rreq = subreq->rreq;
if (IS_ERR_VALUE(transferred_or_error)) {
netfs_stat(&netfs_n_rh_write_failed);
trace_netfs_failure(rreq, subreq, transferred_or_error,
netfs_fail_copy_to_cache);
} else {
netfs_stat(&netfs_n_rh_write_done);
}
trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
/* If we decrement nr_copy_ops to 0, the ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_copy_ops))
netfs_rreq_unmark_after_write(rreq, was_async);
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
}
/*
* [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref
* from the caller.
*/
static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
struct netfs_io_subrequest *subreq, *next, *p;
struct iov_iter iter;
int ret;
trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
/* We don't want terminating writes trying to wake us up whilst we're
* still going through the list.
*/
atomic_inc(&rreq->nr_copy_ops);
list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
list_del_init(&subreq->rreq_link);
netfs_put_subrequest(subreq, false,
netfs_sreq_trace_put_no_copy);
}
}
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
/* Amalgamate adjacent writes */
while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
next = list_next_entry(subreq, rreq_link);
if (next->start != subreq->start + subreq->len)
break;
subreq->len += next->len;
list_del_init(&next->rreq_link);
netfs_put_subrequest(next, false,
netfs_sreq_trace_put_merged);
}
ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
subreq->len, rreq->i_size, true);
if (ret < 0) {
trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
continue;
}
iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
subreq->start, subreq->len);
atomic_inc(&rreq->nr_copy_ops);
netfs_stat(&netfs_n_rh_write);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
trace_netfs_sreq(subreq, netfs_sreq_trace_write);
cres->ops->write(cres, subreq->start, &iter,
netfs_rreq_copy_terminated, subreq);
}
/* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_copy_ops))
netfs_rreq_unmark_after_write(rreq, false);
}
static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
netfs_rreq_do_write_to_cache(rreq);
}
static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */
{
rreq->work.func = netfs_rreq_write_to_cache_work;
if (!queue_work(system_unbound_wq, &rreq->work))
BUG();
}
/*
* Handle a short read.
*/
@ -410,10 +270,6 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) &&
test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))
return netfs_rreq_write_to_cache(rreq);
netfs_rreq_completed(rreq, was_async);
}