sched/topology: Rename SD_SHARE_PKG_RESOURCES to SD_SHARE_LLC

SD_SHARE_PKG_RESOURCES is a bit of a misnomer: its naming suggests that
it's sharing all 'package resources' - while in reality it's specifically
for sharing the LLC only.

Rename it to SD_SHARE_LLC to reduce confusion.

[ mingo: Rewrote the confusing changelog as well. ]

Suggested-by: Valentin Schneider <vschneid@redhat.com>
Signed-off-by: Alex Shi <alexs@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Reviewed-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
Reviewed-by: Barry Song <baohua@kernel.org>
Link: https://lore.kernel.org/r/20240210113924.1130448-5-alexs@kernel.org
This commit is contained in:
Alex Shi 2024-02-10 19:39:23 +08:00 committed by Ingo Molnar
parent fbc449864e
commit 54de442747
5 changed files with 23 additions and 23 deletions

View File

@ -984,7 +984,7 @@ static bool shared_caches __ro_after_init;
/* cpumask of CPUs with asymmetric SMT dependency */ /* cpumask of CPUs with asymmetric SMT dependency */
static int powerpc_smt_flags(void) static int powerpc_smt_flags(void)
{ {
int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
@ -1010,9 +1010,9 @@ static __ro_after_init DEFINE_STATIC_KEY_FALSE(splpar_asym_pack);
static int powerpc_shared_cache_flags(void) static int powerpc_shared_cache_flags(void)
{ {
if (static_branch_unlikely(&splpar_asym_pack)) if (static_branch_unlikely(&splpar_asym_pack))
return SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING; return SD_SHARE_LLC | SD_ASYM_PACKING;
return SD_SHARE_PKG_RESOURCES; return SD_SHARE_LLC;
} }
static int powerpc_shared_proc_flags(void) static int powerpc_shared_proc_flags(void)

View File

@ -117,13 +117,13 @@ SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS) SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS)
/* /*
* Domain members share CPU package resources (i.e. caches) * Domain members share CPU Last Level Caches
* *
* SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share * SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
* the same cache(s). * the same cache(s).
* NEEDS_GROUPS: Caches are shared between groups. * NEEDS_GROUPS: Caches are shared between groups.
*/ */
SD_FLAG(SD_SHARE_PKG_RESOURCES, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS) SD_FLAG(SD_SHARE_LLC, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
/* /*
* Only a single load balancing instance * Only a single load balancing instance

View File

@ -38,21 +38,21 @@ extern const struct sd_flag_debug sd_flag_debug[];
#ifdef CONFIG_SCHED_SMT #ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void) static inline int cpu_smt_flags(void)
{ {
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; return SD_SHARE_CPUCAPACITY | SD_SHARE_LLC;
} }
#endif #endif
#ifdef CONFIG_SCHED_CLUSTER #ifdef CONFIG_SCHED_CLUSTER
static inline int cpu_cluster_flags(void) static inline int cpu_cluster_flags(void)
{ {
return SD_CLUSTER | SD_SHARE_PKG_RESOURCES; return SD_CLUSTER | SD_SHARE_LLC;
} }
#endif #endif
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
static inline int cpu_core_flags(void) static inline int cpu_core_flags(void)
{ {
return SD_SHARE_PKG_RESOURCES; return SD_SHARE_LLC;
} }
#endif #endif

View File

@ -10678,7 +10678,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
*/ */
if (local->group_type == group_has_spare) { if (local->group_type == group_has_spare) {
if ((busiest->group_type > group_fully_busy) && if ((busiest->group_type > group_fully_busy) &&
!(env->sd->flags & SD_SHARE_PKG_RESOURCES)) { !(env->sd->flags & SD_SHARE_LLC)) {
/* /*
* If busiest is overloaded, try to fill spare * If busiest is overloaded, try to fill spare
* capacity. This might end up creating spare capacity * capacity. This might end up creating spare capacity

View File

@ -657,13 +657,13 @@ static void destroy_sched_domains(struct sched_domain *sd)
} }
/* /*
* Keep a special pointer to the highest sched_domain that has * Keep a special pointer to the highest sched_domain that has SD_SHARE_LLC set
* SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this * (Last Level Cache Domain) for this allows us to avoid some pointer chasing
* allows us to avoid some pointer chasing select_idle_sibling(). * select_idle_sibling().
* *
* Also keep a unique ID per domain (we use the first CPU number in * Also keep a unique ID per domain (we use the first CPU number in the cpumask
* the cpumask of the domain), this allows us to quickly tell if * of the domain), this allows us to quickly tell if two CPUs are in the same
* two CPUs are in the same cache domain, see cpus_share_cache(). * cache domain, see cpus_share_cache().
*/ */
DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc);
DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_size);
@ -684,7 +684,7 @@ static void update_top_cache_domain(int cpu)
int id = cpu; int id = cpu;
int size = 1; int size = 1;
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); sd = highest_flag_domain(cpu, SD_SHARE_LLC);
if (sd) { if (sd) {
id = cpumask_first(sched_domain_span(sd)); id = cpumask_first(sched_domain_span(sd));
size = cpumask_weight(sched_domain_span(sd)); size = cpumask_weight(sched_domain_span(sd));
@ -1554,7 +1554,7 @@ static struct cpumask ***sched_domains_numa_masks;
* function. For details, see include/linux/sched/sd_flags.h. * function. For details, see include/linux/sched/sd_flags.h.
* *
* SD_SHARE_CPUCAPACITY * SD_SHARE_CPUCAPACITY
* SD_SHARE_PKG_RESOURCES * SD_SHARE_LLC
* SD_CLUSTER * SD_CLUSTER
* SD_NUMA * SD_NUMA
* *
@ -1566,7 +1566,7 @@ static struct cpumask ***sched_domains_numa_masks;
#define TOPOLOGY_SD_FLAGS \ #define TOPOLOGY_SD_FLAGS \
(SD_SHARE_CPUCAPACITY | \ (SD_SHARE_CPUCAPACITY | \
SD_CLUSTER | \ SD_CLUSTER | \
SD_SHARE_PKG_RESOURCES | \ SD_SHARE_LLC | \
SD_NUMA | \ SD_NUMA | \
SD_ASYM_PACKING) SD_ASYM_PACKING)
@ -1609,7 +1609,7 @@ sd_init(struct sched_domain_topology_level *tl,
| 0*SD_BALANCE_WAKE | 0*SD_BALANCE_WAKE
| 1*SD_WAKE_AFFINE | 1*SD_WAKE_AFFINE
| 0*SD_SHARE_CPUCAPACITY | 0*SD_SHARE_CPUCAPACITY
| 0*SD_SHARE_PKG_RESOURCES | 0*SD_SHARE_LLC
| 0*SD_SERIALIZE | 0*SD_SERIALIZE
| 1*SD_PREFER_SIBLING | 1*SD_PREFER_SIBLING
| 0*SD_NUMA | 0*SD_NUMA
@ -1646,7 +1646,7 @@ sd_init(struct sched_domain_topology_level *tl,
if (sd->flags & SD_SHARE_CPUCAPACITY) { if (sd->flags & SD_SHARE_CPUCAPACITY) {
sd->imbalance_pct = 110; sd->imbalance_pct = 110;
} else if (sd->flags & SD_SHARE_PKG_RESOURCES) { } else if (sd->flags & SD_SHARE_LLC) {
sd->imbalance_pct = 117; sd->imbalance_pct = 117;
sd->cache_nice_tries = 1; sd->cache_nice_tries = 1;
@ -1671,7 +1671,7 @@ sd_init(struct sched_domain_topology_level *tl,
* For all levels sharing cache; connect a sched_domain_shared * For all levels sharing cache; connect a sched_domain_shared
* instance. * instance.
*/ */
if (sd->flags & SD_SHARE_PKG_RESOURCES) { if (sd->flags & SD_SHARE_LLC) {
sd->shared = *per_cpu_ptr(sdd->sds, sd_id); sd->shared = *per_cpu_ptr(sdd->sds, sd_id);
atomic_inc(&sd->shared->ref); atomic_inc(&sd->shared->ref);
atomic_set(&sd->shared->nr_busy_cpus, sd_weight); atomic_set(&sd->shared->nr_busy_cpus, sd_weight);
@ -2446,8 +2446,8 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
struct sched_domain *child = sd->child; struct sched_domain *child = sd->child;
if (!(sd->flags & SD_SHARE_PKG_RESOURCES) && child && if (!(sd->flags & SD_SHARE_LLC) && child &&
(child->flags & SD_SHARE_PKG_RESOURCES)) { (child->flags & SD_SHARE_LLC)) {
struct sched_domain __rcu *top_p; struct sched_domain __rcu *top_p;
unsigned int nr_llcs; unsigned int nr_llcs;