RISC-V Patches for the 6.4 Merge Window, Part 2

* Support for hibernation.
 * .rela.dyn has been moved to init.
 * A fix for the SBI probing to allow for implementation-defined
   behavior.
 * Various other fixes and cleanups throughout the tree.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmRVHRATHHBhbG1lckBk
 YWJiZWx0LmNvbQAKCRAuExnzX7sYiearD/9tUL5STN3icSO58t2EBAmp4CuyBqWo
 KVhOmLmvZqz259GeqfcRsHANszLTwRPzyWxHQJGugPzAphZu3ukQRR8BEDTwwZJO
 toIhv9hXZ4RAu8Chi6Fs/J1WyYVyqSneGTk68xXBXOmm1MWaqU91z92Q5bJGfWqy
 yBSPOTMFvnHHAOdhIXigxLl+z0Y9EV013L18aesHArnuDHIgPGSF9UI6slQ7ThNV
 PhR+VsApd3Ho7+njOzK+mn+1afICKXXGAtmrPjyEt+nE4LmaJc/XY471SPTSlr3U
 BLWm3jmVTK/0peZxce4I2H6k3gz21PiSAy21E+26Bp2+lZD1iWH601eUyasLY88n
 FYXF5VQNvwMx8Ba/yN4VmQ8M25eJ7s7AKWvGa6VLwu0iHxGWmePqoaFuI6JaSXON
 TzJFJDN9xAaBf4Jt7c2c4X9tPJTEFZu6V51AaDDJllw/IJicwHNlNskZUsfvmqqb
 wE/fF6VtcrvEoeKvizOyZGXMs6Wgg6soufL0Ve8rD12U6ZBknVkGruQxF7B+JYsJ
 Ri6ndfKuguMRm6hZmJlVCfFULtm+D6wFczWmmfF562AFISAticib8u/kPz3jAGCu
 GbozEi333FFLBat2QpPK9zL0sH6tj7GCT3ppJjpjUtCmGPyyZuD8zT3rgTxSc8pe
 fp1EE13A2rsU3A==
 =xoqj
 -----END PGP SIGNATURE-----

Merge tag 'riscv-for-linus-6.4-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull more RISC-V updates from Palmer Dabbelt:

 - Support for hibernation

 - The .rela.dyn section has been moved to the init area

 - A fix for the SBI probing to allow for implementation-defined
   behavior

 - Various other fixes and cleanups throughout the tree

* tag 'riscv-for-linus-6.4-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  RISC-V: include cpufeature.h in cpufeature.c
  riscv: Move .rela.dyn to the init sections
  dt-bindings: riscv: explicitly mention assumption of Zicsr & Zifencei support
  riscv: compat_syscall_table: Fixup compile warning
  RISC-V: fixup in-flight collision with ARCH_WANT_OPTIMIZE_VMEMMAP rename
  RISC-V: fix sifive and thead section mismatches in errata
  RISC-V: Align SBI probe implementation with spec
  riscv: mm: remove redundant parameter of create_fdt_early_page_table
  riscv: Adjust dependencies of HAVE_DYNAMIC_FTRACE selection
  RISC-V: Add arch functions to support hibernation/suspend-to-disk
  RISC-V: mm: Enable huge page support to kernel_page_present() function
  RISC-V: Factor out common code of __cpu_resume_enter()
  RISC-V: Change suspend_save_csrs and suspend_restore_csrs to public function
This commit is contained in:
Linus Torvalds 2023-05-05 12:23:33 -07:00
commit 982365a8f5
22 changed files with 682 additions and 68 deletions

View File

@ -86,6 +86,12 @@ properties:
User-Level ISA document, available from
https://riscv.org/specifications/
Due to revisions of the ISA specification, some deviations
have arisen over time.
Notably, riscv,isa was defined prior to the creation of the
Zicsr and Zifencei extensions and thus "i" implies
"zicsr_zifencei".
While the isa strings in ISA specification are case
insensitive, letters in the riscv,isa string must be all
lowercase to simplify parsing.

View File

@ -47,16 +47,16 @@ config RISCV
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
select ARCH_WANT_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL
select ARCH_WANT_OPTIMIZE_VMEMMAP
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
select BUILDTIME_TABLE_SORT if MMU
select CLINT_TIMER if !MMU
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if CPU_IDLE
select CPU_PM if CPU_IDLE || HIBERNATION
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY
select GENERIC_ATOMIC64 if !64BIT
@ -142,12 +142,23 @@ config RISCV
select TRACE_IRQFLAGS_SUPPORT
select UACCESS_MEMCPY if !MMU
select ZONE_DMA32 if 64BIT
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE)
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION
config CLANG_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_CLANG
# https://github.com/llvm/llvm-project/commit/6ab8927931851bb42b2c93a00801dc499d7d9b1e
depends on CLANG_VERSION >= 130000
# https://github.com/ClangBuiltLinux/linux/issues/1817
depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
config GCC_SUPPORTS_DYNAMIC_FTRACE
def_bool CC_IS_GCC
depends on $(cc-option,-fpatchable-function-entry=8)
config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT
default 8
@ -788,6 +799,12 @@ menu "Power management options"
source "kernel/power/Kconfig"
config ARCH_HIBERNATION_POSSIBLE
def_bool y
config ARCH_HIBERNATION_HEADER
def_bool HIBERNATION
endmenu # "Power management options"
menu "CPU Power Management"

View File

@ -82,11 +82,9 @@ static void __init_or_module warn_miss_errata(u32 miss_errata)
pr_warn("----------------------------------------------------------------\n");
}
void __init_or_module sifive_errata_patch_func(struct alt_entry *begin,
struct alt_entry *end,
unsigned long archid,
unsigned long impid,
unsigned int stage)
void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage)
{
struct alt_entry *alt;
u32 cpu_req_errata;

View File

@ -83,9 +83,9 @@ static u32 thead_errata_probe(unsigned int stage,
return cpu_req_errata;
}
void __init_or_module thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage)
void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage)
{
struct alt_entry *alt;
u32 cpu_req_errata = thead_errata_probe(stage, archid, impid);

View File

@ -0,0 +1,82 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2023 StarFive Technology Co., Ltd.
*
* Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
*/
#ifndef __ASSEMBLY__
#error "Only include this from assembly code"
#endif
#ifndef __ASM_ASSEMBLER_H
#define __ASM_ASSEMBLER_H
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/csr.h>
/*
* suspend_restore_csrs - restore CSRs
*/
.macro suspend_restore_csrs
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
csrw CSR_EPC, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
csrw CSR_STATUS, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
csrw CSR_TVAL, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
csrw CSR_CAUSE, t0
.endm
/*
* suspend_restore_regs - Restore registers (except A0 and T0-T6)
*/
.macro suspend_restore_regs
REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
.endm
/*
* copy_page - copy 1 page (4KB) of data from source to destination
* @a0 - destination
* @a1 - source
*/
.macro copy_page a0, a1
lui a2, 0x1
add a2, a2, a0
1 :
REG_L t0, 0(a1)
REG_L t1, SZREG(a1)
REG_S t0, 0(a0)
REG_S t1, SZREG(a0)
addi a0, a0, 2 * SZREG
addi a1, a1, 2 * SZREG
bne a2, a0, 1b
.endm
#endif /* __ASM_ASSEMBLER_H */

View File

@ -295,7 +295,7 @@ int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size,
unsigned long asid);
int sbi_probe_extension(int ext);
long sbi_probe_extension(int ext);
/* Check if current SBI specification version is 0.1 or not */
static inline int sbi_spec_is_0_1(void)

View File

@ -21,6 +21,11 @@ struct suspend_context {
#endif
};
/*
* Used by hibernation core and cleared during resume sequence
*/
extern int in_suspend;
/* Low-level CPU suspend entry function */
int __cpu_suspend_enter(struct suspend_context *context);
@ -33,4 +38,21 @@ int cpu_suspend(unsigned long arg,
/* Low-level CPU resume entry function */
int __cpu_resume_enter(unsigned long hartid, unsigned long context);
/* Used to save and restore the CSRs */
void suspend_save_csrs(struct suspend_context *context);
void suspend_restore_csrs(struct suspend_context *context);
/* Low-level API to support hibernation */
int swsusp_arch_suspend(void);
int swsusp_arch_resume(void);
int arch_hibernation_header_save(void *addr, unsigned int max_size);
int arch_hibernation_header_restore(void *addr);
int __hibernate_cpu_resume(void);
/* Used to resume on the CPU we hibernated on */
int hibernate_resume_nonboot_cpu_disable(void);
asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
unsigned long cpu_resume);
asmlinkage int hibernate_core_restore_code(void);
#endif

View File

@ -9,6 +9,7 @@ CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
endif
CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,)
CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,)
ifdef CONFIG_KEXEC
AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax)
@ -64,6 +65,7 @@ obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o
obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o

View File

@ -9,6 +9,7 @@
#include <linux/kbuild.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/suspend.h>
#include <asm/kvm_host.h>
#include <asm/thread_info.h>
#include <asm/ptrace.h>
@ -116,6 +117,10 @@ void asm_offsets(void)
OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
OFFSET(HIBERN_PBE_ADDR, pbe, address);
OFFSET(HIBERN_PBE_ORIG, pbe, orig_address);
OFFSET(HIBERN_PBE_NEXT, pbe, next);
OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero);
OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra);
OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp);

View File

@ -27,7 +27,7 @@ const struct cpu_operations cpu_ops_spinwait = {
void __init cpu_set_ops(int cpuid)
{
#if IS_ENABLED(CONFIG_RISCV_SBI)
if (sbi_probe_extension(SBI_EXT_HSM) > 0) {
if (sbi_probe_extension(SBI_EXT_HSM)) {
if (!cpuid)
pr_info("SBI HSM extension detected\n");
cpu_ops[cpuid] = &cpu_ops_sbi;

View File

@ -14,6 +14,7 @@
#include <linux/of.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
#include <asm/hwcap.h>
#include <asm/patch.h>
#include <asm/processor.h>

View File

@ -0,0 +1,77 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Hibernation low level support for RISCV.
*
* Copyright (C) 2023 StarFive Technology Co., Ltd.
*
* Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/csr.h>
#include <linux/linkage.h>
/*
* int __hibernate_cpu_resume(void)
* Switch back to the hibernated image's page table prior to restoring the CPU
* context.
*
* Always returns 0
*/
ENTRY(__hibernate_cpu_resume)
/* switch to hibernated image's page table. */
csrw CSR_SATP, s0
sfence.vma
REG_L a0, hibernate_cpu_context
suspend_restore_csrs
suspend_restore_regs
/* Return zero value. */
mv a0, zero
ret
END(__hibernate_cpu_resume)
/*
* Prepare to restore the image.
* a0: satp of saved page tables.
* a1: satp of temporary page tables.
* a2: cpu_resume.
*/
ENTRY(hibernate_restore_image)
mv s0, a0
mv s1, a1
mv s2, a2
REG_L s4, restore_pblist
REG_L a1, relocated_restore_code
jalr a1
END(hibernate_restore_image)
/*
* The below code will be executed from a 'safe' page.
* It first switches to the temporary page table, then starts to copy the pages
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
* to restore the CPU context.
*/
ENTRY(hibernate_core_restore_code)
/* switch to temp page table. */
csrw satp, s1
sfence.vma
.Lcopy:
/* The below code will restore the hibernated image. */
REG_L a1, HIBERN_PBE_ADDR(s4)
REG_L a0, HIBERN_PBE_ORIG(s4)
copy_page a0, a1
REG_L s4, HIBERN_PBE_NEXT(s4)
bnez s4, .Lcopy
jalr s2
END(hibernate_core_restore_code)

View File

@ -0,0 +1,427 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hibernation support for RISCV
*
* Copyright (C) 2023 StarFive Technology Co., Ltd.
*
* Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
*/
#include <asm/barrier.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/set_memory.h>
#include <asm/smp.h>
#include <asm/suspend.h>
#include <linux/cpu.h>
#include <linux/memblock.h>
#include <linux/pm.h>
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/utsname.h>
/* The logical cpu number we should resume on, initialised to a non-cpu number. */
static int sleep_cpu = -EINVAL;
/* Pointer to the temporary resume page table. */
static pgd_t *resume_pg_dir;
/* CPU context to be saved. */
struct suspend_context *hibernate_cpu_context;
EXPORT_SYMBOL_GPL(hibernate_cpu_context);
unsigned long relocated_restore_code;
EXPORT_SYMBOL_GPL(relocated_restore_code);
/**
* struct arch_hibernate_hdr_invariants - container to store kernel build version.
* @uts_version: to save the build number and date so that we do not resume with
* a different kernel.
*/
struct arch_hibernate_hdr_invariants {
char uts_version[__NEW_UTS_LEN + 1];
};
/**
* struct arch_hibernate_hdr - helper parameters that help us to restore the image.
* @invariants: container to store kernel build version.
* @hartid: to make sure same boot_cpu executes the hibernate/restore code.
* @saved_satp: original page table used by the hibernated image.
* @restore_cpu_addr: the kernel's image address to restore the CPU context.
*/
static struct arch_hibernate_hdr {
struct arch_hibernate_hdr_invariants invariants;
unsigned long hartid;
unsigned long saved_satp;
unsigned long restore_cpu_addr;
} resume_hdr;
static void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
{
memset(i, 0, sizeof(*i));
memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
}
/*
* Check if the given pfn is in the 'nosave' section.
*/
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn));
}
void notrace save_processor_state(void)
{
WARN_ON(num_online_cpus() != 1);
}
void notrace restore_processor_state(void)
{
}
/*
* Helper parameters need to be saved to the hibernation image header.
*/
int arch_hibernation_header_save(void *addr, unsigned int max_size)
{
struct arch_hibernate_hdr *hdr = addr;
if (max_size < sizeof(*hdr))
return -EOVERFLOW;
arch_hdr_invariants(&hdr->invariants);
hdr->hartid = cpuid_to_hartid_map(sleep_cpu);
hdr->saved_satp = csr_read(CSR_SATP);
hdr->restore_cpu_addr = (unsigned long)__hibernate_cpu_resume;
return 0;
}
EXPORT_SYMBOL_GPL(arch_hibernation_header_save);
/*
* Retrieve the helper parameters from the hibernation image header.
*/
int arch_hibernation_header_restore(void *addr)
{
struct arch_hibernate_hdr_invariants invariants;
struct arch_hibernate_hdr *hdr = addr;
int ret = 0;
arch_hdr_invariants(&invariants);
if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
pr_crit("Hibernate image not generated by this kernel!\n");
return -EINVAL;
}
sleep_cpu = riscv_hartid_to_cpuid(hdr->hartid);
if (sleep_cpu < 0) {
pr_crit("Hibernated on a CPU not known to this kernel!\n");
sleep_cpu = -EINVAL;
return -EINVAL;
}
#ifdef CONFIG_SMP
ret = bringup_hibernate_cpu(sleep_cpu);
if (ret) {
sleep_cpu = -EINVAL;
return ret;
}
#endif
resume_hdr = *hdr;
return ret;
}
EXPORT_SYMBOL_GPL(arch_hibernation_header_restore);
int swsusp_arch_suspend(void)
{
int ret = 0;
if (__cpu_suspend_enter(hibernate_cpu_context)) {
sleep_cpu = smp_processor_id();
suspend_save_csrs(hibernate_cpu_context);
ret = swsusp_save();
} else {
suspend_restore_csrs(hibernate_cpu_context);
flush_tlb_all();
flush_icache_all();
/*
* Tell the hibernation core that we've just restored the memory.
*/
in_suspend = 0;
sleep_cpu = -EINVAL;
}
return ret;
}
static int temp_pgtable_map_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
unsigned long end, pgprot_t prot)
{
pte_t *src_ptep;
pte_t *dst_ptep;
if (pmd_none(READ_ONCE(*dst_pmdp))) {
dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
if (!dst_ptep)
return -ENOMEM;
pmd_populate_kernel(NULL, dst_pmdp, dst_ptep);
}
dst_ptep = pte_offset_kernel(dst_pmdp, start);
src_ptep = pte_offset_kernel(src_pmdp, start);
do {
pte_t pte = READ_ONCE(*src_ptep);
if (pte_present(pte))
set_pte(dst_ptep, __pte(pte_val(pte) | pgprot_val(prot)));
} while (dst_ptep++, src_ptep++, start += PAGE_SIZE, start < end);
return 0;
}
static int temp_pgtable_map_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
unsigned long end, pgprot_t prot)
{
unsigned long next;
unsigned long ret;
pmd_t *src_pmdp;
pmd_t *dst_pmdp;
if (pud_none(READ_ONCE(*dst_pudp))) {
dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
if (!dst_pmdp)
return -ENOMEM;
pud_populate(NULL, dst_pudp, dst_pmdp);
}
dst_pmdp = pmd_offset(dst_pudp, start);
src_pmdp = pmd_offset(src_pudp, start);
do {
pmd_t pmd = READ_ONCE(*src_pmdp);
next = pmd_addr_end(start, end);
if (pmd_none(pmd))
continue;
if (pmd_leaf(pmd)) {
set_pmd(dst_pmdp, __pmd(pmd_val(pmd) | pgprot_val(prot)));
} else {
ret = temp_pgtable_map_pte(dst_pmdp, src_pmdp, start, next, prot);
if (ret)
return -ENOMEM;
}
} while (dst_pmdp++, src_pmdp++, start = next, start != end);
return 0;
}
static int temp_pgtable_map_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start,
unsigned long end, pgprot_t prot)
{
unsigned long next;
unsigned long ret;
pud_t *dst_pudp;
pud_t *src_pudp;
if (p4d_none(READ_ONCE(*dst_p4dp))) {
dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
if (!dst_pudp)
return -ENOMEM;
p4d_populate(NULL, dst_p4dp, dst_pudp);
}
dst_pudp = pud_offset(dst_p4dp, start);
src_pudp = pud_offset(src_p4dp, start);
do {
pud_t pud = READ_ONCE(*src_pudp);
next = pud_addr_end(start, end);
if (pud_none(pud))
continue;
if (pud_leaf(pud)) {
set_pud(dst_pudp, __pud(pud_val(pud) | pgprot_val(prot)));
} else {
ret = temp_pgtable_map_pmd(dst_pudp, src_pudp, start, next, prot);
if (ret)
return -ENOMEM;
}
} while (dst_pudp++, src_pudp++, start = next, start != end);
return 0;
}
static int temp_pgtable_map_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
unsigned long end, pgprot_t prot)
{
unsigned long next;
unsigned long ret;
p4d_t *dst_p4dp;
p4d_t *src_p4dp;
if (pgd_none(READ_ONCE(*dst_pgdp))) {
dst_p4dp = (p4d_t *)get_safe_page(GFP_ATOMIC);
if (!dst_p4dp)
return -ENOMEM;
pgd_populate(NULL, dst_pgdp, dst_p4dp);
}
dst_p4dp = p4d_offset(dst_pgdp, start);
src_p4dp = p4d_offset(src_pgdp, start);
do {
p4d_t p4d = READ_ONCE(*src_p4dp);
next = p4d_addr_end(start, end);
if (p4d_none(p4d))
continue;
if (p4d_leaf(p4d)) {
set_p4d(dst_p4dp, __p4d(p4d_val(p4d) | pgprot_val(prot)));
} else {
ret = temp_pgtable_map_pud(dst_p4dp, src_p4dp, start, next, prot);
if (ret)
return -ENOMEM;
}
} while (dst_p4dp++, src_p4dp++, start = next, start != end);
return 0;
}
static int temp_pgtable_mapping(pgd_t *pgdp, unsigned long start, unsigned long end, pgprot_t prot)
{
pgd_t *dst_pgdp = pgd_offset_pgd(pgdp, start);
pgd_t *src_pgdp = pgd_offset_k(start);
unsigned long next;
unsigned long ret;
do {
pgd_t pgd = READ_ONCE(*src_pgdp);
next = pgd_addr_end(start, end);
if (pgd_none(pgd))
continue;
if (pgd_leaf(pgd)) {
set_pgd(dst_pgdp, __pgd(pgd_val(pgd) | pgprot_val(prot)));
} else {
ret = temp_pgtable_map_p4d(dst_pgdp, src_pgdp, start, next, prot);
if (ret)
return -ENOMEM;
}
} while (dst_pgdp++, src_pgdp++, start = next, start != end);
return 0;
}
static unsigned long relocate_restore_code(void)
{
void *page = (void *)get_safe_page(GFP_ATOMIC);
if (!page)
return -ENOMEM;
copy_page(page, hibernate_core_restore_code);
/* Make the page containing the relocated code executable. */
set_memory_x((unsigned long)page, 1);
return (unsigned long)page;
}
int swsusp_arch_resume(void)
{
unsigned long end = (unsigned long)pfn_to_virt(max_low_pfn);
unsigned long start = PAGE_OFFSET;
int ret;
/*
* Memory allocated by get_safe_page() will be dealt with by the hibernation core,
* we don't need to free it here.
*/
resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!resume_pg_dir)
return -ENOMEM;
/*
* Create a temporary page table and map the whole linear region as executable and
* writable.
*/
ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE | _PAGE_EXEC));
if (ret)
return ret;
/* Move the restore code to a new page so that it doesn't get overwritten by itself. */
relocated_restore_code = relocate_restore_code();
if (relocated_restore_code == -ENOMEM)
return -ENOMEM;
/*
* Map the __hibernate_cpu_resume() address to the temporary page table so that the
* restore code can jumps to it after finished restore the image. The next execution
* code doesn't find itself in a different address space after switching over to the
* original page table used by the hibernated image.
* The __hibernate_cpu_resume() mapping is unnecessary for RV32 since the kernel and
* linear addresses are identical, but different for RV64. To ensure consistency, we
* map it for both RV32 and RV64 kernels.
* Additionally, we should ensure that the page is writable before restoring the image.
*/
start = (unsigned long)resume_hdr.restore_cpu_addr;
end = start + PAGE_SIZE;
ret = temp_pgtable_mapping(resume_pg_dir, start, end, __pgprot(_PAGE_WRITE));
if (ret)
return ret;
hibernate_restore_image(resume_hdr.saved_satp, (PFN_DOWN(__pa(resume_pg_dir)) | satp_mode),
resume_hdr.restore_cpu_addr);
return 0;
}
#ifdef CONFIG_PM_SLEEP_SMP
int hibernate_resume_nonboot_cpu_disable(void)
{
if (sleep_cpu < 0) {
pr_err("Failing to resume from hibernate on an unknown CPU\n");
return -ENODEV;
}
return freeze_secondary_cpus(sleep_cpu);
}
#endif
static int __init riscv_hibernate_init(void)
{
hibernate_cpu_context = kzalloc(sizeof(*hibernate_cpu_context), GFP_KERNEL);
if (WARN_ON(!hibernate_cpu_context))
return -ENOMEM;
return 0;
}
early_initcall(riscv_hibernate_init);

View File

@ -524,19 +524,18 @@ static void sbi_srst_power_off(void)
* sbi_probe_extension() - Check if an SBI extension ID is supported or not.
* @extid: The extension ID to be probed.
*
* Return: Extension specific nonzero value f yes, -ENOTSUPP otherwise.
* Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
*/
int sbi_probe_extension(int extid)
long sbi_probe_extension(int extid)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
0, 0, 0, 0, 0);
if (!ret.error)
if (ret.value)
return ret.value;
return ret.value;
return -ENOTSUPP;
return 0;
}
EXPORT_SYMBOL(sbi_probe_extension);
@ -599,26 +598,26 @@ void __init sbi_init(void)
if (!sbi_spec_is_0_1()) {
pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
sbi_get_firmware_id(), sbi_get_firmware_version());
if (sbi_probe_extension(SBI_EXT_TIME) > 0) {
if (sbi_probe_extension(SBI_EXT_TIME)) {
__sbi_set_timer = __sbi_set_timer_v02;
pr_info("SBI TIME extension detected\n");
} else {
__sbi_set_timer = __sbi_set_timer_v01;
}
if (sbi_probe_extension(SBI_EXT_IPI) > 0) {
if (sbi_probe_extension(SBI_EXT_IPI)) {
__sbi_send_ipi = __sbi_send_ipi_v02;
pr_info("SBI IPI extension detected\n");
} else {
__sbi_send_ipi = __sbi_send_ipi_v01;
}
if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) {
if (sbi_probe_extension(SBI_EXT_RFENCE)) {
__sbi_rfence = __sbi_rfence_v02;
pr_info("SBI RFENCE extension detected\n");
} else {
__sbi_rfence = __sbi_rfence_v01;
}
if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
(sbi_probe_extension(SBI_EXT_SRST) > 0)) {
sbi_probe_extension(SBI_EXT_SRST)) {
pr_info("SBI SRST extension detected\n");
pm_power_off = sbi_srst_power_off;
sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;

View File

@ -8,7 +8,7 @@
#include <asm/csr.h>
#include <asm/suspend.h>
static void suspend_save_csrs(struct suspend_context *context)
void suspend_save_csrs(struct suspend_context *context)
{
context->scratch = csr_read(CSR_SCRATCH);
context->tvec = csr_read(CSR_TVEC);
@ -29,7 +29,7 @@ static void suspend_save_csrs(struct suspend_context *context)
#endif
}
static void suspend_restore_csrs(struct suspend_context *context)
void suspend_restore_csrs(struct suspend_context *context)
{
csr_write(CSR_SCRATCH, context->scratch);
csr_write(CSR_TVEC, context->tvec);

View File

@ -7,6 +7,7 @@
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/csr.h>
#include <asm/xip_fixup.h>
@ -83,39 +84,10 @@ ENTRY(__cpu_resume_enter)
add a0, a1, zero
/* Restore CSRs */
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
csrw CSR_EPC, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
csrw CSR_STATUS, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
csrw CSR_TVAL, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
csrw CSR_CAUSE, t0
suspend_restore_csrs
/* Restore registers (except A0 and T0-T6) */
REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
suspend_restore_regs
/* Return zero value */
add a0, zero, zero

View File

@ -104,6 +104,12 @@ SECTIONS
*(.rel.dyn*)
}
.rela.dyn : ALIGN(8) {
__rela_dyn_start = .;
*(.rela .rela*)
__rela_dyn_end = .;
}
__init_data_end = .;
. = ALIGN(8);
@ -130,12 +136,6 @@ SECTIONS
*(.sdata*)
}
.rela.dyn : ALIGN(8) {
__rela_dyn_start = .;
*(.rela .rela*)
__rela_dyn_end = .;
}
.got : { *(.got*) }
#ifdef CONFIG_RELOCATABLE

View File

@ -80,7 +80,7 @@ static int __init riscv_kvm_init(void)
return -ENODEV;
}
if (sbi_probe_extension(SBI_EXT_RFENCE) <= 0) {
if (!sbi_probe_extension(SBI_EXT_RFENCE)) {
kvm_info("require SBI RFENCE extension\n");
return -ENODEV;
}

View File

@ -919,8 +919,7 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
* this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
* entry.
*/
static void __init create_fdt_early_page_table(pgd_t *pgdir,
uintptr_t fix_fdt_va,
static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
uintptr_t dtb_pa)
{
uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
@ -1132,8 +1131,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_kernel_page_table(early_pg_dir, true);
/* Setup early mapping for FDT early scan */
create_fdt_early_page_table(early_pg_dir,
__fix_to_virt(FIX_FDT), dtb_pa);
create_fdt_early_page_table(__fix_to_virt(FIX_FDT), dtb_pa);
/*
* Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap

View File

@ -217,18 +217,26 @@ bool kernel_page_present(struct page *page)
pgd = pgd_offset_k(addr);
if (!pgd_present(*pgd))
return false;
if (pgd_leaf(*pgd))
return true;
p4d = p4d_offset(pgd, addr);
if (!p4d_present(*p4d))
return false;
if (p4d_leaf(*p4d))
return true;
pud = pud_offset(p4d, addr);
if (!pud_present(*pud))
return false;
if (pud_leaf(*pud))
return true;
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd))
return false;
if (pmd_leaf(*pmd))
return true;
pte = pte_offset_kernel(pmd, addr);
return pte_present(*pte);

View File

@ -613,7 +613,7 @@ static int __init sbi_cpuidle_init(void)
* 2) SBI HSM extension is available
*/
if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
sbi_probe_extension(SBI_EXT_HSM) <= 0) {
!sbi_probe_extension(SBI_EXT_HSM)) {
pr_info("HSM suspend not available\n");
return 0;
}

View File

@ -924,7 +924,7 @@ static int __init pmu_sbi_devinit(void)
struct platform_device *pdev;
if (sbi_spec_version < sbi_mk_version(0, 3) ||
sbi_probe_extension(SBI_EXT_PMU) <= 0) {
!sbi_probe_extension(SBI_EXT_PMU)) {
return 0;
}