net: page_pool: expose page pool stats via netlink

Dump the stats into netlink. More clever approaches
like dumping the stats per-CPU for each CPU individually
to see where the packets get consumed can be implemented
in the future.

A trimmed example from a real (but recently booted system):

$ ./cli.py --no-schema --spec netlink/specs/netdev.yaml \
           --dump page-pool-stats-get
[{'info': {'id': 19, 'ifindex': 2},
  'alloc-empty': 48,
  'alloc-fast': 3024,
  'alloc-refill': 0,
  'alloc-slow': 48,
  'alloc-slow-high-order': 0,
  'alloc-waive': 0,
  'recycle-cache-full': 0,
  'recycle-cached': 0,
  'recycle-released-refcnt': 0,
  'recycle-ring': 0,
  'recycle-ring-full': 0},
 {'info': {'id': 18, 'ifindex': 2},
  'alloc-empty': 66,
  'alloc-fast': 11811,
  'alloc-refill': 35,
  'alloc-slow': 66,
  'alloc-slow-high-order': 0,
  'alloc-waive': 0,
  'recycle-cache-full': 1145,
  'recycle-cached': 6541,
  'recycle-released-refcnt': 0,
  'recycle-ring': 1275,
  'recycle-ring-full': 0},
 {'info': {'id': 17, 'ifindex': 2},
  'alloc-empty': 73,
  'alloc-fast': 62099,
  'alloc-refill': 413,
...

Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
Jakub Kicinski 2023-11-26 15:07:38 -08:00 committed by Paolo Abeni
parent 69cb4952b6
commit d49010adae
8 changed files with 250 additions and 9 deletions

View File

@ -139,6 +139,59 @@ attribute-sets:
"re-attached", they are just waiting to disappear.
Attribute is absent if Page Pool has not been detached, and
can still be used to allocate new memory.
-
name: page-pool-info
subset-of: page-pool
attributes:
-
name: id
-
name: ifindex
-
name: page-pool-stats
doc: |
Page pool statistics, see docs for struct page_pool_stats
for information about individual statistics.
attributes:
-
name: info
doc: Page pool identifying information.
type: nest
nested-attributes: page-pool-info
-
name: alloc-fast
type: uint
value: 8 # reserve some attr ids in case we need more metadata later
-
name: alloc-slow
type: uint
-
name: alloc-slow-high-order
type: uint
-
name: alloc-empty
type: uint
-
name: alloc-refill
type: uint
-
name: alloc-waive
type: uint
-
name: recycle-cached
type: uint
-
name: recycle-cache-full
type: uint
-
name: recycle-ring
type: uint
-
name: recycle-ring-full
type: uint
-
name: recycle-released-refcnt
type: uint
operations:
list:
@ -212,6 +265,31 @@ operations:
notify: page-pool-get
mcgrp: page-pool
config-cond: page-pool
-
name: page-pool-stats-get
doc: Get page pool statistics.
attribute-set: page-pool-stats
do:
request:
attributes:
- info
reply: &pp-stats-reply
attributes:
- info
- alloc-fast
- alloc-slow
- alloc-slow-high-order
- alloc-empty
- alloc-refill
- alloc-waive
- recycle-cached
- recycle-cache-full
- recycle-ring
- recycle-ring-full
- recycle-released-refcnt
dump:
reply: *pp-stats-reply
config-cond: page-pool-stats
mcast-groups:
list:

View File

@ -41,6 +41,11 @@ Architecture overview
| Fast cache | | ptr-ring cache |
+-----------------+ +------------------+
Monitoring
==========
Information about page pools on the system can be accessed via the netdev
genetlink family (see Documentation/netlink/specs/netdev.yaml).
API interface
=============
The number of pools created **must** match the number of hardware queues
@ -107,8 +112,9 @@ page_pool_get_stats() and structures described below are available.
It takes a pointer to a ``struct page_pool`` and a pointer to a struct
page_pool_stats allocated by the caller.
The API will fill in the provided struct page_pool_stats with
statistics about the page_pool.
Older drivers expose page pool statistics via ethtool or debugfs.
The same statistics are accessible via the netlink netdev family
in a driver-independent fashion.
.. kernel-doc:: include/net/page_pool/types.h
:identifiers: struct page_pool_recycle_stats

View File

@ -55,16 +55,12 @@
#include <net/page_pool/types.h>
#ifdef CONFIG_PAGE_POOL_STATS
/* Deprecated driver-facing API, use netlink instead */
int page_pool_ethtool_stats_get_count(void);
u8 *page_pool_ethtool_stats_get_strings(u8 *data);
u64 *page_pool_ethtool_stats_get(u64 *data, void *stats);
/*
* Drivers that wish to harvest page pool stats and report them to users
* (perhaps via ethtool, debugfs, or another mechanism) can allocate a
* struct page_pool_stats call page_pool_get_stats to get stats for the specified pool.
*/
bool page_pool_get_stats(struct page_pool *pool,
bool page_pool_get_stats(const struct page_pool *pool,
struct page_pool_stats *stats);
#else
static inline int page_pool_ethtool_stats_get_count(void)

View File

@ -76,6 +76,24 @@ enum {
NETDEV_A_PAGE_POOL_MAX = (__NETDEV_A_PAGE_POOL_MAX - 1)
};
enum {
NETDEV_A_PAGE_POOL_STATS_INFO = 1,
NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST = 8,
NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
__NETDEV_A_PAGE_POOL_STATS_MAX,
NETDEV_A_PAGE_POOL_STATS_MAX = (__NETDEV_A_PAGE_POOL_STATS_MAX - 1)
};
enum {
NETDEV_CMD_DEV_GET = 1,
NETDEV_CMD_DEV_ADD_NTF,
@ -85,6 +103,7 @@ enum {
NETDEV_CMD_PAGE_POOL_ADD_NTF,
NETDEV_CMD_PAGE_POOL_DEL_NTF,
NETDEV_CMD_PAGE_POOL_CHANGE_NTF,
NETDEV_CMD_PAGE_POOL_STATS_GET,
__NETDEV_CMD_MAX,
NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)

View File

@ -16,6 +16,17 @@ static const struct netlink_range_validation netdev_a_page_pool_id_range = {
.max = 4294967295ULL,
};
static const struct netlink_range_validation netdev_a_page_pool_ifindex_range = {
.min = 1ULL,
.max = 2147483647ULL,
};
/* Common nested types */
const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1] = {
[NETDEV_A_PAGE_POOL_ID] = NLA_POLICY_FULL_RANGE(NLA_UINT, &netdev_a_page_pool_id_range),
[NETDEV_A_PAGE_POOL_IFINDEX] = NLA_POLICY_FULL_RANGE(NLA_U32, &netdev_a_page_pool_ifindex_range),
};
/* NETDEV_CMD_DEV_GET - do */
static const struct nla_policy netdev_dev_get_nl_policy[NETDEV_A_DEV_IFINDEX + 1] = {
[NETDEV_A_DEV_IFINDEX] = NLA_POLICY_MIN(NLA_U32, 1),
@ -28,6 +39,13 @@ static const struct nla_policy netdev_page_pool_get_nl_policy[NETDEV_A_PAGE_POOL
};
#endif /* CONFIG_PAGE_POOL */
/* NETDEV_CMD_PAGE_POOL_STATS_GET - do */
#ifdef CONFIG_PAGE_POOL_STATS
static const struct nla_policy netdev_page_pool_stats_get_nl_policy[NETDEV_A_PAGE_POOL_STATS_INFO + 1] = {
[NETDEV_A_PAGE_POOL_STATS_INFO] = NLA_POLICY_NESTED(netdev_page_pool_info_nl_policy),
};
#endif /* CONFIG_PAGE_POOL_STATS */
/* Ops table for netdev */
static const struct genl_split_ops netdev_nl_ops[] = {
{
@ -56,6 +74,20 @@ static const struct genl_split_ops netdev_nl_ops[] = {
.flags = GENL_CMD_CAP_DUMP,
},
#endif /* CONFIG_PAGE_POOL */
#ifdef CONFIG_PAGE_POOL_STATS
{
.cmd = NETDEV_CMD_PAGE_POOL_STATS_GET,
.doit = netdev_nl_page_pool_stats_get_doit,
.policy = netdev_page_pool_stats_get_nl_policy,
.maxattr = NETDEV_A_PAGE_POOL_STATS_INFO,
.flags = GENL_CMD_CAP_DO,
},
{
.cmd = NETDEV_CMD_PAGE_POOL_STATS_GET,
.dumpit = netdev_nl_page_pool_stats_get_dumpit,
.flags = GENL_CMD_CAP_DUMP,
},
#endif /* CONFIG_PAGE_POOL_STATS */
};
static const struct genl_multicast_group netdev_nl_mcgrps[] = {

View File

@ -11,11 +11,18 @@
#include <uapi/linux/netdev.h>
/* Common nested types */
extern const struct nla_policy netdev_page_pool_info_nl_policy[NETDEV_A_PAGE_POOL_IFINDEX + 1];
int netdev_nl_dev_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_dev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb);
int netdev_nl_page_pool_get_doit(struct sk_buff *skb, struct genl_info *info);
int netdev_nl_page_pool_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb,
struct genl_info *info);
int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb);
enum {
NETDEV_NLGRP_MGMT,

View File

@ -71,7 +71,7 @@ static const char pp_stats[][ETH_GSTRING_LEN] = {
* is passed to this API which is filled in. The caller can then report
* those stats to the user (perhaps via ethtool, debugfs, etc.).
*/
bool page_pool_get_stats(struct page_pool *pool,
bool page_pool_get_stats(const struct page_pool *pool,
struct page_pool_stats *stats)
{
int cpu = 0;

View File

@ -5,6 +5,7 @@
#include <linux/xarray.h>
#include <net/net_debug.h>
#include <net/page_pool/types.h>
#include <net/page_pool/helpers.h>
#include <net/sock.h>
#include "page_pool_priv.h"
@ -106,6 +107,108 @@ netdev_nl_page_pool_get_dump(struct sk_buff *skb, struct netlink_callback *cb,
return err;
}
static int
page_pool_nl_stats_fill(struct sk_buff *rsp, const struct page_pool *pool,
const struct genl_info *info)
{
#ifdef CONFIG_PAGE_POOL_STATS
struct page_pool_stats stats = {};
struct nlattr *nest;
void *hdr;
if (!page_pool_get_stats(pool, &stats))
return 0;
hdr = genlmsg_iput(rsp, info);
if (!hdr)
return -EMSGSIZE;
nest = nla_nest_start(rsp, NETDEV_A_PAGE_POOL_STATS_INFO);
if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_ID, pool->user.id) ||
(pool->slow.netdev->ifindex != LOOPBACK_IFINDEX &&
nla_put_u32(rsp, NETDEV_A_PAGE_POOL_IFINDEX,
pool->slow.netdev->ifindex)))
goto err_cancel_nest;
nla_nest_end(rsp, nest);
if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_FAST,
stats.alloc_stats.fast) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW,
stats.alloc_stats.slow) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_SLOW_HIGH_ORDER,
stats.alloc_stats.slow_high_order) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_EMPTY,
stats.alloc_stats.empty) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_REFILL,
stats.alloc_stats.refill) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_ALLOC_WAIVE,
stats.alloc_stats.waive) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHED,
stats.recycle_stats.cached) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_CACHE_FULL,
stats.recycle_stats.cache_full) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING,
stats.recycle_stats.ring) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RING_FULL,
stats.recycle_stats.ring_full) ||
nla_put_uint(rsp, NETDEV_A_PAGE_POOL_STATS_RECYCLE_RELEASED_REFCNT,
stats.recycle_stats.released_refcnt))
goto err_cancel_msg;
genlmsg_end(rsp, hdr);
return 0;
err_cancel_nest:
nla_nest_cancel(rsp, nest);
err_cancel_msg:
genlmsg_cancel(rsp, hdr);
return -EMSGSIZE;
#else
GENL_SET_ERR_MSG(info, "kernel built without CONFIG_PAGE_POOL_STATS");
return -EOPNOTSUPP;
#endif
}
int netdev_nl_page_pool_stats_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct nlattr *tb[ARRAY_SIZE(netdev_page_pool_info_nl_policy)];
struct nlattr *nest;
int err;
u32 id;
if (GENL_REQ_ATTR_CHECK(info, NETDEV_A_PAGE_POOL_STATS_INFO))
return -EINVAL;
nest = info->attrs[NETDEV_A_PAGE_POOL_STATS_INFO];
err = nla_parse_nested(tb, ARRAY_SIZE(tb) - 1, nest,
netdev_page_pool_info_nl_policy,
info->extack);
if (err)
return err;
if (NL_REQ_ATTR_CHECK(info->extack, nest, tb, NETDEV_A_PAGE_POOL_ID))
return -EINVAL;
if (tb[NETDEV_A_PAGE_POOL_IFINDEX]) {
NL_SET_ERR_MSG_ATTR(info->extack,
tb[NETDEV_A_PAGE_POOL_IFINDEX],
"selecting by ifindex not supported");
return -EINVAL;
}
id = nla_get_uint(tb[NETDEV_A_PAGE_POOL_ID]);
return netdev_nl_page_pool_get_do(info, id, page_pool_nl_stats_fill);
}
int netdev_nl_page_pool_stats_get_dumpit(struct sk_buff *skb,
struct netlink_callback *cb)
{
return netdev_nl_page_pool_get_dump(skb, cb, page_pool_nl_stats_fill);
}
static int
page_pool_nl_fill(struct sk_buff *rsp, const struct page_pool *pool,
const struct genl_info *info)