linux-stable/arch/arm64/kernel/smp_spin_table.c
Fuad Tabba fade9c2c6e arm64: Rename arm64-internal cache maintenance functions
Although naming across the codebase isn't that consistent, it
tends to follow certain patterns. Moreover, the term "flush"
isn't defined in the Arm Architecture reference manual, and might
be interpreted to mean clean, invalidate, or both for a cache.

Rename arm64-internal functions to make the naming internally
consistent, as well as making it consistent with the Arm ARM, by
specifying whether it applies to the instruction, data, or both
caches, whether the operation is a clean, invalidate, or both.
Also specify which point the operation applies to, i.e., to the
point of unification (PoU), coherency (PoC), or persistence
(PoP).

This commit applies the following sed transformation to all files
under arch/arm64:

"s/\b__flush_cache_range\b/caches_clean_inval_pou_macro/g;"\
"s/\b__flush_icache_range\b/caches_clean_inval_pou/g;"\
"s/\binvalidate_icache_range\b/icache_inval_pou/g;"\
"s/\b__flush_dcache_area\b/dcache_clean_inval_poc/g;"\
"s/\b__inval_dcache_area\b/dcache_inval_poc/g;"\
"s/__clean_dcache_area_poc\b/dcache_clean_poc/g;"\
"s/\b__clean_dcache_area_pop\b/dcache_clean_pop/g;"\
"s/\b__clean_dcache_area_pou\b/dcache_clean_pou/g;"\
"s/\b__flush_cache_user_range\b/caches_clean_inval_user_pou/g;"\
"s/\b__flush_icache_all\b/icache_inval_all_pou/g;"

Note that __clean_dcache_area_poc is deliberately missing a word
boundary check at the beginning in order to match the efistub
symbols in image-vars.h.

Also note that, despite its name, __flush_icache_range operates
on both instruction and data caches. The name change here
reflects that.

No functional change intended.

Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20210524083001.2586635-19-tabba@google.com
Signed-off-by: Will Deacon <will@kernel.org>
2021-05-25 19:27:49 +01:00

127 lines
3.1 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Spin Table SMP initialisation
*
* Copyright (C) 2013 ARM Ltd.
*/
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/cputype.h>
#include <asm/io.h>
#include <asm/smp_plat.h>
extern void secondary_holding_pen(void);
volatile unsigned long __section(".mmuoff.data.read")
secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
/*
* Write secondary_holding_pen_release in a way that is guaranteed to be
* visible to all observers, irrespective of whether they're taking part
* in coherency or not. This is necessary for the hotplug code to work
* reliably.
*/
static void write_pen_release(u64 val)
{
void *start = (void *)&secondary_holding_pen_release;
unsigned long size = sizeof(secondary_holding_pen_release);
secondary_holding_pen_release = val;
dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size);
}
static int smp_spin_table_cpu_init(unsigned int cpu)
{
struct device_node *dn;
int ret;
dn = of_get_cpu_node(cpu, NULL);
if (!dn)
return -ENODEV;
/*
* Determine the address from which the CPU is polling.
*/
ret = of_property_read_u64(dn, "cpu-release-addr",
&cpu_release_addr[cpu]);
if (ret)
pr_err("CPU %d: missing or invalid cpu-release-addr property\n",
cpu);
of_node_put(dn);
return ret;
}
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
__le64 __iomem *release_addr;
phys_addr_t pa_holding_pen = __pa_symbol(function_nocfi(secondary_holding_pen));
if (!cpu_release_addr[cpu])
return -ENODEV;
/*
* The cpu-release-addr may or may not be inside the linear mapping.
* As ioremap_cache will either give us a new mapping or reuse the
* existing linear mapping, we can use it to cover both cases. In
* either case the memory will be MT_NORMAL.
*/
release_addr = ioremap_cache(cpu_release_addr[cpu],
sizeof(*release_addr));
if (!release_addr)
return -ENOMEM;
/*
* We write the release address as LE regardless of the native
* endianness of the kernel. Therefore, any boot-loaders that
* read this address need to convert this address to the
* boot-loader's endianness before jumping. This is mandated by
* the boot protocol.
*/
writeq_relaxed(pa_holding_pen, release_addr);
dcache_clean_inval_poc((__force unsigned long)release_addr,
(__force unsigned long)release_addr +
sizeof(*release_addr));
/*
* Send an event to wake up the secondary CPU.
*/
sev();
iounmap(release_addr);
return 0;
}
static int smp_spin_table_cpu_boot(unsigned int cpu)
{
/*
* Update the pen release flag.
*/
write_pen_release(cpu_logical_map(cpu));
/*
* Send an event, causing the secondaries to read pen_release.
*/
sev();
return 0;
}
const struct cpu_operations smp_spin_table_ops = {
.name = "spin-table",
.cpu_init = smp_spin_table_cpu_init,
.cpu_prepare = smp_spin_table_cpu_prepare,
.cpu_boot = smp_spin_table_cpu_boot,
};