diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c index ed20f96bf89d..0537664620e5 100644 --- a/mm/memory-tiers.c +++ b/mm/memory-tiers.c @@ -359,6 +359,26 @@ static void disable_all_demotion_targets(void) synchronize_rcu(); } +static void dump_demotion_targets(void) +{ + int node; + + for_each_node_state(node, N_MEMORY) { + struct memory_tier *memtier = __node_get_memory_tier(node); + nodemask_t preferred = node_demotion[node].preferred; + + if (!memtier) + continue; + + if (nodes_empty(preferred)) + pr_info("Demotion targets for Node %d: null\n", node); + else + pr_info("Demotion targets for Node %d: preferred: %*pbl, fallback: %*pbl\n", + node, nodemask_pr_args(&preferred), + nodemask_pr_args(&memtier->lower_tier_mask)); + } +} + /* * Find an automatic demotion target for all memory * nodes. Failing here is OK. It might just indicate @@ -443,7 +463,7 @@ static void establish_demotion_targets(void) * Now build the lower_tier mask for each node collecting node mask from * all memory tier below it. This allows us to fallback demotion page * allocation to a set of nodes that is closer the above selected - * perferred node. + * preferred node. */ lower_tier = node_states[N_MEMORY]; list_for_each_entry(memtier, &memory_tiers, list) { @@ -456,6 +476,8 @@ static void establish_demotion_targets(void) nodes_andnot(lower_tier, lower_tier, tier_nodes); memtier->lower_tier_mask = lower_tier; } + + dump_demotion_targets(); } #else