mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-20 01:20:54 +00:00
2c8951ab0c
For soft-rebooting a system, it is necessary to map the MMU-off code with an identity mapping so that execution can continue safely once the MMU has been switched off. Currently, switch_mm_for_reboot takes out a 1:1 mapping from 0x0 to TASK_SIZE during reboot in the hope that the reset code lives at a physical address corresponding to a userspace virtual address. This patch modifies the code so that we switch to the idmap_pgd tables, which contain a 1:1 mapping of the cpu_reset code. This has the advantage of only remapping the code that we need and also means we don't need to worry about allocating a pgd from an atomic context in the case that the physical address of the cpu_reset code aliases with the virtual space used by the kernel. Acked-by: Dave Martin <dave.martin@linaro.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
117 lines
2.7 KiB
C
117 lines
2.7 KiB
C
#include <linux/kernel.h>
|
|
|
|
#include <asm/cputype.h>
|
|
#include <asm/idmap.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/sections.h>
|
|
|
|
pgd_t *idmap_pgd;
|
|
|
|
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
|
unsigned long prot)
|
|
{
|
|
pmd_t *pmd = pmd_offset(pud, addr);
|
|
|
|
addr = (addr & PMD_MASK) | prot;
|
|
pmd[0] = __pmd(addr);
|
|
addr += SECTION_SIZE;
|
|
pmd[1] = __pmd(addr);
|
|
flush_pmd_entry(pmd);
|
|
}
|
|
|
|
static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|
unsigned long prot)
|
|
{
|
|
pud_t *pud = pud_offset(pgd, addr);
|
|
unsigned long next;
|
|
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
idmap_add_pmd(pud, addr, next, prot);
|
|
} while (pud++, addr = next, addr != end);
|
|
}
|
|
|
|
void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
|
|
{
|
|
unsigned long prot, next;
|
|
|
|
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
|
|
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
|
|
prot |= PMD_BIT4;
|
|
|
|
pgd += pgd_index(addr);
|
|
do {
|
|
next = pgd_addr_end(addr, end);
|
|
idmap_add_pud(pgd, addr, next, prot);
|
|
} while (pgd++, addr = next, addr != end);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
|
|
{
|
|
pmd_t *pmd = pmd_offset(pud, addr);
|
|
pmd_clear(pmd);
|
|
}
|
|
|
|
static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
|
|
{
|
|
pud_t *pud = pud_offset(pgd, addr);
|
|
unsigned long next;
|
|
|
|
do {
|
|
next = pud_addr_end(addr, end);
|
|
idmap_del_pmd(pud, addr, next);
|
|
} while (pud++, addr = next, addr != end);
|
|
}
|
|
|
|
void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
|
|
{
|
|
unsigned long next;
|
|
|
|
pgd += pgd_index(addr);
|
|
do {
|
|
next = pgd_addr_end(addr, end);
|
|
idmap_del_pud(pgd, addr, next);
|
|
} while (pgd++, addr = next, addr != end);
|
|
}
|
|
#endif
|
|
|
|
extern char __idmap_text_start[], __idmap_text_end[];
|
|
|
|
static int __init init_static_idmap(void)
|
|
{
|
|
phys_addr_t idmap_start, idmap_end;
|
|
|
|
idmap_pgd = pgd_alloc(&init_mm);
|
|
if (!idmap_pgd)
|
|
return -ENOMEM;
|
|
|
|
/* Add an identity mapping for the physical address of the section. */
|
|
idmap_start = virt_to_phys((void *)__idmap_text_start);
|
|
idmap_end = virt_to_phys((void *)__idmap_text_end);
|
|
|
|
pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
|
|
(long long)idmap_start, (long long)idmap_end);
|
|
identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
|
|
|
|
return 0;
|
|
}
|
|
arch_initcall(init_static_idmap);
|
|
|
|
/*
|
|
* In order to soft-boot, we need to switch to a 1:1 mapping for the
|
|
* cpu_reset functions. This will then ensure that we have predictable
|
|
* results when turning off the mmu.
|
|
*/
|
|
void setup_mm_for_reboot(void)
|
|
{
|
|
/* Clean and invalidate L1. */
|
|
flush_cache_all();
|
|
|
|
/* Switch to the identity mapping. */
|
|
cpu_switch_mm(idmap_pgd, &init_mm);
|
|
|
|
/* Flush the TLB. */
|
|
local_flush_tlb_all();
|
|
}
|