net: openvswitch: reduce cpu_used_mask memory

Use actual CPU number instead of hardcoded value to decide the size
of 'cpu_used_mask' in 'struct sw_flow'. Below is the reason.

'struct cpumask cpu_used_mask' is embedded in struct sw_flow.
Its size is hardcoded to CONFIG_NR_CPUS bits, which can be
8192 by default, it costs memory and slows down ovs_flow_alloc.

To address this:
 Redefine cpu_used_mask to pointer.
 Append cpumask_size() bytes after 'stat' to hold cpumask.
 Initialization cpu_used_mask right after stats_last_writer.

APIs like cpumask_next and cpumask_set_cpu never access bits
beyond cpu count, cpumask_size() bytes of memory is enough.

Signed-off-by: Eddy Tao <taoyuan_eddy@hotmail.com>
Acked-by: Eelco Chaudron <echaudro@redhat.com>
Link: https://lore.kernel.org/r/OS3P286MB229570CCED618B20355D227AF5D59@OS3P286MB2295.JPNP286.PROD.OUTLOOK.COM
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Eddy Tao 2023-02-05 09:35:37 +08:00 committed by Jakub Kicinski
parent bbe6418663
commit 15ea59a0e9
3 changed files with 12 additions and 7 deletions

View File

@ -107,7 +107,8 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
rcu_assign_pointer(flow->stats[cpu],
new_stats);
cpumask_set_cpu(cpu, &flow->cpu_used_mask);
cpumask_set_cpu(cpu,
flow->cpu_used_mask);
goto unlock;
}
}
@ -135,7 +136,8 @@ void ovs_flow_stats_get(const struct sw_flow *flow,
memset(ovs_stats, 0, sizeof(*ovs_stats));
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
for (cpu = 0; cpu < nr_cpu_ids;
cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]);
if (stats) {
@ -159,7 +161,8 @@ void ovs_flow_stats_clear(struct sw_flow *flow)
int cpu;
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
for (cpu = 0; cpu < nr_cpu_ids;
cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]);
if (stats) {

View File

@ -229,7 +229,7 @@ struct sw_flow {
*/
struct sw_flow_key key;
struct sw_flow_id id;
struct cpumask cpu_used_mask;
struct cpumask *cpu_used_mask;
struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
struct sw_flow_stats __rcu *stats[]; /* One for each CPU. First one

View File

@ -79,6 +79,7 @@ struct sw_flow *ovs_flow_alloc(void)
return ERR_PTR(-ENOMEM);
flow->stats_last_writer = -1;
flow->cpu_used_mask = (struct cpumask *)&flow->stats[nr_cpu_ids];
/* Initialize the default stat node. */
stats = kmem_cache_alloc_node(flow_stats_cache,
@ -91,7 +92,7 @@ struct sw_flow *ovs_flow_alloc(void)
RCU_INIT_POINTER(flow->stats[0], stats);
cpumask_set_cpu(0, &flow->cpu_used_mask);
cpumask_set_cpu(0, flow->cpu_used_mask);
return flow;
err:
@ -115,7 +116,7 @@ static void flow_free(struct sw_flow *flow)
flow->sf_acts);
/* We open code this to make sure cpu 0 is always considered */
for (cpu = 0; cpu < nr_cpu_ids;
cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
cpu = cpumask_next(cpu, flow->cpu_used_mask)) {
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
(struct sw_flow_stats __force *)flow->stats[cpu]);
@ -1196,7 +1197,8 @@ int ovs_flow_init(void)
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
+ (nr_cpu_ids
* sizeof(struct sw_flow_stats *)),
* sizeof(struct sw_flow_stats *))
+ cpumask_size(),
0, 0, NULL);
if (flow_cache == NULL)
return -ENOMEM;