mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-24 11:25:43 +00:00
net: page_pool: fix recycle stats for system page_pool allocator
Use global percpu page_pool_recycle_stats counter for system page_pool allocator instead of allocating a separate percpu variable for each (also percpu) page pool instance. Reviewed-by: Toke Hoiland-Jorgensen <toke@redhat.com> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com> Link: https://lore.kernel.org/r/87f572425e98faea3da45f76c3c68815c01a20ee.1708075412.git.lorenzo@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
56ef27e3ab
commit
f853fa5c54
3 changed files with 21 additions and 7 deletions
|
@ -18,8 +18,9 @@
|
|||
* Please note DMA-sync-for-CPU is still
|
||||
* device driver responsibility
|
||||
*/
|
||||
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP |\
|
||||
PP_FLAG_DMA_SYNC_DEV)
|
||||
#define PP_FLAG_SYSTEM_POOL BIT(2) /* Global system page_pool */
|
||||
#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | \
|
||||
PP_FLAG_SYSTEM_POOL)
|
||||
|
||||
/*
|
||||
* Fast allocation side cache array/stack
|
||||
|
|
|
@ -11738,6 +11738,7 @@ static int net_page_pool_create(int cpuid)
|
|||
#if IS_ENABLED(CONFIG_PAGE_POOL)
|
||||
struct page_pool_params page_pool_params = {
|
||||
.pool_size = SYSTEM_PERCPU_PAGE_POOL_SIZE,
|
||||
.flags = PP_FLAG_SYSTEM_POOL,
|
||||
.nid = NUMA_NO_NODE,
|
||||
};
|
||||
struct page_pool *pp_ptr;
|
||||
|
|
|
@ -31,6 +31,8 @@
|
|||
#define BIAS_MAX (LONG_MAX >> 1)
|
||||
|
||||
#ifdef CONFIG_PAGE_POOL_STATS
|
||||
static DEFINE_PER_CPU(struct page_pool_recycle_stats, pp_system_recycle_stats);
|
||||
|
||||
/* alloc_stat_inc is intended to be used in softirq context */
|
||||
#define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
|
||||
/* recycle_stat_inc is safe to use when preemption is possible. */
|
||||
|
@ -220,14 +222,23 @@ static int page_pool_init(struct page_pool *pool,
|
|||
pool->has_init_callback = !!pool->slow.init_callback;
|
||||
|
||||
#ifdef CONFIG_PAGE_POOL_STATS
|
||||
pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
|
||||
if (!pool->recycle_stats)
|
||||
return -ENOMEM;
|
||||
if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL)) {
|
||||
pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
|
||||
if (!pool->recycle_stats)
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
/* For system page pool instance we use a singular stats object
|
||||
* instead of allocating a separate percpu variable for each
|
||||
* (also percpu) page pool instance.
|
||||
*/
|
||||
pool->recycle_stats = &pp_system_recycle_stats;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
|
||||
#ifdef CONFIG_PAGE_POOL_STATS
|
||||
free_percpu(pool->recycle_stats);
|
||||
if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
|
||||
free_percpu(pool->recycle_stats);
|
||||
#endif
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -251,7 +262,8 @@ static void page_pool_uninit(struct page_pool *pool)
|
|||
put_device(pool->p.dev);
|
||||
|
||||
#ifdef CONFIG_PAGE_POOL_STATS
|
||||
free_percpu(pool->recycle_stats);
|
||||
if (!(pool->p.flags & PP_FLAG_SYSTEM_POOL))
|
||||
free_percpu(pool->recycle_stats);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue