netfs: Trace refcounting on the netfs_io_subrequest struct

Add refcount tracing for the netfs_io_subrequest structure.

Changes
=======
ver #3)
 - Switch 'W=' to 'R=' in the traceline to match other request debug IDs.

Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com

Link: https://lore.kernel.org/r/164622998584.3564931.5052255990645723639.stgit@warthog.procyon.org.uk/ # v1
Link: https://lore.kernel.org/r/164678202603.1200972.14726007419792315578.stgit@warthog.procyon.org.uk/ # v2
Link: https://lore.kernel.org/r/164692901860.2099075.4845820886851239935.stgit@warthog.procyon.org.uk/ # v3
This commit is contained in:
David Howells 2022-02-17 15:01:24 +00:00
parent de74023bef
commit 6cd3d6fd1f
5 changed files with 81 additions and 21 deletions

View File

@ -25,8 +25,6 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
enum netfs_rreq_ref_trace what);
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq);
static inline void netfs_see_request(struct netfs_io_request *rreq,
enum netfs_rreq_ref_trace what)

View File

@ -53,7 +53,8 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
subreq = list_first_entry(&rreq->subrequests,
struct netfs_io_subrequest, rreq_link);
list_del(&subreq->rreq_link);
netfs_put_subrequest(subreq, was_async);
netfs_put_subrequest(subreq, was_async,
netfs_sreq_trace_put_clear);
}
}
@ -101,7 +102,7 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq
subreq = kzalloc(sizeof(struct netfs_io_subrequest), GFP_KERNEL);
if (subreq) {
INIT_LIST_HEAD(&subreq->rreq_link);
refcount_set(&subreq->usage, 2);
refcount_set(&subreq->ref, 2);
subreq->rreq = rreq;
netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
netfs_stat(&netfs_n_rh_sreq);
@ -110,13 +111,18 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq
return subreq;
}
void netfs_get_subrequest(struct netfs_io_subrequest *subreq)
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what)
{
refcount_inc(&subreq->usage);
int r;
__refcount_inc(&subreq->ref, &r);
trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
what);
}
static void __netfs_put_subrequest(struct netfs_io_subrequest *subreq,
bool was_async)
static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
bool was_async)
{
struct netfs_io_request *rreq = subreq->rreq;
@ -126,8 +132,16 @@ static void __netfs_put_subrequest(struct netfs_io_subrequest *subreq,
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
}
void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async)
void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
enum netfs_sreq_ref_trace what)
{
if (refcount_dec_and_test(&subreq->usage))
__netfs_put_subrequest(subreq, was_async);
unsigned int debug_index = subreq->debug_index;
unsigned int debug_id = subreq->rreq->debug_id;
bool dead;
int r;
dead = __refcount_dec_and_test(&subreq->ref, &r);
trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
if (dead)
netfs_free_subrequest(subreq, was_async);
}

View File

@ -167,7 +167,7 @@ static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
if (atomic_dec_and_test(&rreq->nr_copy_ops))
netfs_rreq_unmark_after_write(rreq, was_async);
netfs_put_subrequest(subreq, was_async);
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
}
/*
@ -191,7 +191,8 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
list_del_init(&subreq->rreq_link);
netfs_put_subrequest(subreq, false);
netfs_put_subrequest(subreq, false,
netfs_sreq_trace_put_no_copy);
}
}
@ -203,7 +204,8 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
break;
subreq->len += next->len;
list_del_init(&next->rreq_link);
netfs_put_subrequest(next, false);
netfs_put_subrequest(next, false,
netfs_sreq_trace_put_merged);
}
ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
@ -219,7 +221,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
atomic_inc(&rreq->nr_copy_ops);
netfs_stat(&netfs_n_rh_write);
netfs_get_subrequest(subreq);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
trace_netfs_sreq(subreq, netfs_sreq_trace_write);
cres->ops->write(cres, subreq->start, &iter,
netfs_rreq_copy_terminated, subreq);
@ -342,7 +344,7 @@ static void netfs_rreq_short_read(struct netfs_io_request *rreq,
netfs_stat(&netfs_n_rh_short_read);
trace_netfs_sreq(subreq, netfs_sreq_trace_resubmit_short);
netfs_get_subrequest(subreq);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_short_read);
atomic_inc(&rreq->nr_outstanding);
if (subreq->source == NETFS_READ_FROM_CACHE)
netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
@ -376,7 +378,7 @@ static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
subreq->error = 0;
netfs_stat(&netfs_n_rh_download_instead);
trace_netfs_sreq(subreq, netfs_sreq_trace_download_instead);
netfs_get_subrequest(subreq);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
atomic_inc(&rreq->nr_outstanding);
netfs_read_from_server(rreq, subreq);
} else if (test_bit(NETFS_SREQ_SHORT_IO, &subreq->flags)) {
@ -538,7 +540,7 @@ out:
else if (u == 1)
wake_up_var(&rreq->nr_outstanding);
netfs_put_subrequest(subreq, was_async);
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
return;
incomplete:
@ -683,7 +685,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
subreq_failed:
rreq->error = subreq->error;
netfs_put_subrequest(subreq, false);
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_failed);
return false;
}
@ -1030,13 +1032,13 @@ retry:
*/
ractl._nr_pages = folio_nr_pages(folio);
netfs_rreq_expand(rreq, &ractl);
netfs_get_request(rreq, netfs_rreq_trace_get_hold);
/* We hold the folio locks, so we can drop the references */
folio_get(folio);
while (readahead_folio(&ractl))
;
netfs_get_request(rreq, netfs_rreq_trace_get_hold);
atomic_set(&rreq->nr_outstanding, 1);
do {
if (!netfs_rreq_submit_slice(rreq, &debug_index))

View File

@ -18,6 +18,8 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
enum netfs_sreq_ref_trace;
/*
* Overload PG_private_2 to give us PG_fscache - this is used to indicate that
* a page is currently backed by a local disk cache
@ -136,7 +138,7 @@ struct netfs_io_subrequest {
loff_t start; /* Where to start the I/O */
size_t len; /* Size of the I/O */
size_t transferred; /* Amount of data transferred */
refcount_t usage;
refcount_t ref;
short error; /* 0 or error that occurred */
unsigned short debug_index; /* Index in list (for debugging output) */
enum netfs_io_source source; /* Where to read from/write to */
@ -268,6 +270,10 @@ extern int netfs_write_begin(struct file *, struct address_space *,
void *);
extern void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
extern void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
enum netfs_sreq_ref_trace what);
extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq,
bool was_async, enum netfs_sreq_ref_trace what);
extern void netfs_stats_show(struct seq_file *);
#endif /* _LINUX_NETFS_H */

View File

@ -64,6 +64,17 @@
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
E_(netfs_rreq_trace_new, "NEW ")
#define netfs_sreq_ref_traces \
EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \
EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \
EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \
EM(netfs_sreq_trace_new, "NEW ") \
EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \
EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \
EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \
E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
@ -77,6 +88,7 @@ enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
enum netfs_failure { netfs_failures } __mode(byte);
enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
#endif
@ -94,6 +106,7 @@ netfs_sreq_sources;
netfs_sreq_traces;
netfs_failures;
netfs_rreq_ref_traces;
netfs_sreq_ref_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@ -264,6 +277,33 @@ TRACE_EVENT(netfs_rreq_ref,
__entry->ref)
);
TRACE_EVENT(netfs_sreq_ref,
TP_PROTO(unsigned int rreq_debug_id, unsigned int subreq_debug_index,
int ref, enum netfs_sreq_ref_trace what),
TP_ARGS(rreq_debug_id, subreq_debug_index, ref, what),
TP_STRUCT__entry(
__field(unsigned int, rreq )
__field(unsigned int, subreq )
__field(int, ref )
__field(enum netfs_sreq_ref_trace, what )
),
TP_fast_assign(
__entry->rreq = rreq_debug_id;
__entry->subreq = subreq_debug_index;
__entry->ref = ref;
__entry->what = what;
),
TP_printk("R=%08x[%x] %s r=%u",
__entry->rreq,
__entry->subreq,
__print_symbolic(__entry->what, netfs_sreq_ref_traces),
__entry->ref)
);
#undef EM
#undef E_
#endif /* _TRACE_NETFS_H */