linux-stable/arch/arm/mach-shmobile/headsmp.S

148 lines
3 KiB
ArmAsm
Raw Permalink Normal View History

/* SPDX-License-Identifier: GPL-2.0
*
* SMP support for R-Mobile / SH-Mobile
*
* Copyright (C) 2010 Magnus Damm
* Copyright (C) 2010 Takashi Yoshii
*
* Based on vexpress, Copyright (c) 2003 ARM Limited, All Rights Reserved
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/threads.h>
ARM: convert all "mov.* pc, reg" to "bx reg" for ARMv6+ ARMv6 and greater introduced a new instruction ("bx") which can be used to return from function calls. Recent CPUs perform better when the "bx lr" instruction is used rather than the "mov pc, lr" instruction, and this sequence is strongly recommended to be used by the ARM architecture manual (section A.4.1.1). We provide a new macro "ret" with all its variants for the condition code which will resolve to the appropriate instruction. Rather than doing this piecemeal, and miss some instances, change all the "mov pc" instances to use the new macro, with the exception of the "movs" instruction and the kprobes code. This allows us to detect the "mov pc, lr" case and fix it up - and also gives us the possibility of deploying this for other registers depending on the CPU selection. Reported-by: Will Deacon <will.deacon@arm.com> Tested-by: Stephen Warren <swarren@nvidia.com> # Tegra Jetson TK1 Tested-by: Robert Jarzmik <robert.jarzmik@free.fr> # mioa701_bootresume.S Tested-by: Andrew Lunn <andrew@lunn.ch> # Kirkwood Tested-by: Shawn Guo <shawn.guo@freescale.com> Tested-by: Tony Lindgren <tony@atomide.com> # OMAPs Tested-by: Gregory CLEMENT <gregory.clement@free-electrons.com> # Armada XP, 375, 385 Acked-by: Sekhar Nori <nsekhar@ti.com> # DaVinci Acked-by: Christoffer Dall <christoffer.dall@linaro.org> # kvm/hyp Acked-by: Haojian Zhuang <haojian.zhuang@gmail.com> # PXA3xx Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> # Xen Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> # ARMv7M Tested-by: Simon Horman <horms+renesas@verge.net.au> # Shmobile Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-06-30 15:29:12 +00:00
#include <asm/assembler.h>
ARM: mm: Make virt_to_pfn() a static inline Making virt_to_pfn() a static inline taking a strongly typed (const void *) makes the contract of a passing a pointer of that type to the function explicit and exposes any misuse of the macro virt_to_pfn() acting polymorphic and accepting many types such as (void *), (unitptr_t) or (unsigned long) as arguments without warnings. Doing this is a bit intrusive: virt_to_pfn() requires PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in <asm/page.h>, so this must be included *before* <asm/memory.h>. The use of macros were obscuring the unclear inclusion order here, as the macros would eventually be resolved, but a static inline like this cannot be compiled with unresolved macros. The naive solution to include <asm/page.h> at the top of <asm/memory.h> does not work, because <asm/memory.h> sometimes includes <asm/page.h> at the end of itself, which would create a confusing inclusion loop. So instead, take the approach to always unconditionally include <asm/page.h> at the end of <asm/memory.h> arch/arm uses <asm/memory.h> explicitly in a lot of places, however it turns out that if we just unconditionally include <asm/memory.h> into <asm/page.h> and switch all inclusions of <asm/memory.h> to <asm/page.h> instead, we enforce the right order and <asm/memory.h> will always have access to the definitions. Put an inclusion guard in place making it impossible to include <asm/memory.h> explicitly. Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/ Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
2022-06-02 08:18:32 +00:00
#include <asm/page.h>
#define SCTLR_MMU 0x01
#define BOOTROM_ADDRESS 0xE6340000
#define RWTCSRA_ADDRESS 0xE6020004
#define RWTCSRA_WOVF 0x10
/*
* Reset vector for secondary CPUs.
* This will be mapped at address 0 by SBAR register.
* We need _long_ jump to the physical address.
*/
.arm
.align 12
ENTRY(shmobile_boot_vector)
ldr r1, 1f
bx r1
ENDPROC(shmobile_boot_vector)
.align 2
.globl shmobile_boot_fn
shmobile_boot_fn:
1: .space 4
.globl shmobile_boot_size
shmobile_boot_size:
.long . - shmobile_boot_vector
#ifdef CONFIG_ARCH_RCAR_GEN2
/*
* Reset vector for R-Car Gen2 and RZ/G1 secondary CPUs.
* This will be mapped at address 0 by SBAR register.
*/
ENTRY(shmobile_boot_vector_gen2)
mrc p15, 0, r0, c0, c0, 5 @ r0 = MPIDR
ldr r1, shmobile_boot_cpu_gen2
cmp r0, r1
bne shmobile_smp_continue_gen2
mrc p15, 0, r1, c1, c0, 0 @ r1 = SCTLR
and r0, r1, #SCTLR_MMU
cmp r0, #SCTLR_MMU
beq shmobile_smp_continue_gen2
ldr r0, rwtcsra
mov r1, #0
ldrb r1, [r0]
and r0, r1, #RWTCSRA_WOVF
cmp r0, #RWTCSRA_WOVF
bne shmobile_smp_continue_gen2
ldr r0, bootrom
bx r0
shmobile_smp_continue_gen2:
ldr r1, shmobile_boot_fn_gen2
bx r1
ENDPROC(shmobile_boot_vector_gen2)
.align 4
rwtcsra:
.word RWTCSRA_ADDRESS
bootrom:
.word BOOTROM_ADDRESS
.globl shmobile_boot_cpu_gen2
shmobile_boot_cpu_gen2:
.word 0x00000000
.align 2
.globl shmobile_boot_fn_gen2
shmobile_boot_fn_gen2:
.space 4
.globl shmobile_boot_size_gen2
shmobile_boot_size_gen2:
.long . - shmobile_boot_vector_gen2
#endif /* CONFIG_ARCH_RCAR_GEN2 */
/*
* Per-CPU SMP boot function/argument selection code based on MPIDR
*/
ENTRY(shmobile_smp_boot)
mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR
and r0, r1, #0xffffff @ MPIDR_HWID_BITMASK
@ r0 = cpu_logical_map() value
mov r1, #0 @ r1 = CPU index
adr r2, 1f
ldmia r2, {r5, r6, r7}
add r5, r5, r2 @ array of per-cpu mpidr values
add r6, r6, r2 @ array of per-cpu functions
add r7, r7, r2 @ array of per-cpu arguments
shmobile_smp_boot_find_mpidr:
ldr r8, [r5, r1, lsl #2]
cmp r8, r0
bne shmobile_smp_boot_next
ldr r9, [r6, r1, lsl #2]
cmp r9, #0
bne shmobile_smp_boot_found
shmobile_smp_boot_next:
add r1, r1, #1
cmp r1, #NR_CPUS
blo shmobile_smp_boot_find_mpidr
b shmobile_smp_sleep
shmobile_smp_boot_found:
ldr r0, [r7, r1, lsl #2]
ARM: convert all "mov.* pc, reg" to "bx reg" for ARMv6+ ARMv6 and greater introduced a new instruction ("bx") which can be used to return from function calls. Recent CPUs perform better when the "bx lr" instruction is used rather than the "mov pc, lr" instruction, and this sequence is strongly recommended to be used by the ARM architecture manual (section A.4.1.1). We provide a new macro "ret" with all its variants for the condition code which will resolve to the appropriate instruction. Rather than doing this piecemeal, and miss some instances, change all the "mov pc" instances to use the new macro, with the exception of the "movs" instruction and the kprobes code. This allows us to detect the "mov pc, lr" case and fix it up - and also gives us the possibility of deploying this for other registers depending on the CPU selection. Reported-by: Will Deacon <will.deacon@arm.com> Tested-by: Stephen Warren <swarren@nvidia.com> # Tegra Jetson TK1 Tested-by: Robert Jarzmik <robert.jarzmik@free.fr> # mioa701_bootresume.S Tested-by: Andrew Lunn <andrew@lunn.ch> # Kirkwood Tested-by: Shawn Guo <shawn.guo@freescale.com> Tested-by: Tony Lindgren <tony@atomide.com> # OMAPs Tested-by: Gregory CLEMENT <gregory.clement@free-electrons.com> # Armada XP, 375, 385 Acked-by: Sekhar Nori <nsekhar@ti.com> # DaVinci Acked-by: Christoffer Dall <christoffer.dall@linaro.org> # kvm/hyp Acked-by: Haojian Zhuang <haojian.zhuang@gmail.com> # PXA3xx Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> # Xen Tested-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> # ARMv7M Tested-by: Simon Horman <horms+renesas@verge.net.au> # Shmobile Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2014-06-30 15:29:12 +00:00
ret r9
ENDPROC(shmobile_smp_boot)
ENTRY(shmobile_smp_sleep)
wfi
b shmobile_smp_boot
ENDPROC(shmobile_smp_sleep)
.align 2
1: .long shmobile_smp_mpidr - .
.long shmobile_smp_fn - 1b
.long shmobile_smp_arg - 1b
.bss
.globl shmobile_smp_mpidr
shmobile_smp_mpidr:
.space NR_CPUS * 4
.globl shmobile_smp_fn
shmobile_smp_fn:
.space NR_CPUS * 4
.globl shmobile_smp_arg
shmobile_smp_arg:
.space NR_CPUS * 4