mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 08:28:13 +00:00
5673a60b80
1. These tlb flush functions have been using vma instead mm long time ago, but there is still some comments use mm as parameter. 2. the actual struct we use is vm_area_struct instead of vma_struct. 3. remove unused flush_kern_tlb_page. Link: https://lkml.kernel.org/r/87k0oaq311.wl-chenli@uniontech.com Signed-off-by: Chen Li <chenli@uniontech.com> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Jonas Bonn <jonas@southpole.se> Cc: Chris Zankel <chris@zankel.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
92 lines
2.5 KiB
ArmAsm
92 lines
2.5 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* linux/arch/arm/mm/tlb-v7.S
|
|
*
|
|
* Copyright (C) 1997-2002 Russell King
|
|
* Modified for ARMv7 by Catalin Marinas
|
|
*
|
|
* ARM architecture version 6 TLB handling functions.
|
|
* These assume a split I/D TLB.
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/linkage.h>
|
|
#include <asm/assembler.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/page.h>
|
|
#include <asm/tlbflush.h>
|
|
#include "proc-macros.S"
|
|
|
|
/*
|
|
* v7wbi_flush_user_tlb_range(start, end, vma)
|
|
*
|
|
* Invalidate a range of TLB entries in the specified address space.
|
|
*
|
|
* - start - start address (may not be aligned)
|
|
* - end - end address (exclusive, may not be aligned)
|
|
* - vma - vm_area_struct describing address range
|
|
*
|
|
* It is assumed that:
|
|
* - the "Invalidate single entry" instruction will invalidate
|
|
* both the I and the D TLBs on Harvard-style TLBs
|
|
*/
|
|
ENTRY(v7wbi_flush_user_tlb_range)
|
|
vma_vm_mm r3, r2 @ get vma->vm_mm
|
|
mmid r3, r3 @ get vm_mm->context.id
|
|
dsb ish
|
|
mov r0, r0, lsr #PAGE_SHIFT @ align address
|
|
mov r1, r1, lsr #PAGE_SHIFT
|
|
asid r3, r3 @ mask ASID
|
|
#ifdef CONFIG_ARM_ERRATA_720789
|
|
ALT_SMP(W(mov) r3, #0 )
|
|
ALT_UP(W(nop) )
|
|
#endif
|
|
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
|
|
mov r1, r1, lsl #PAGE_SHIFT
|
|
1:
|
|
#ifdef CONFIG_ARM_ERRATA_720789
|
|
ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
|
|
#else
|
|
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
|
|
#endif
|
|
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
|
|
|
|
add r0, r0, #PAGE_SZ
|
|
cmp r0, r1
|
|
blo 1b
|
|
dsb ish
|
|
ret lr
|
|
ENDPROC(v7wbi_flush_user_tlb_range)
|
|
|
|
/*
|
|
* v7wbi_flush_kern_tlb_range(start,end)
|
|
*
|
|
* Invalidate a range of kernel TLB entries
|
|
*
|
|
* - start - start address (may not be aligned)
|
|
* - end - end address (exclusive, may not be aligned)
|
|
*/
|
|
ENTRY(v7wbi_flush_kern_tlb_range)
|
|
dsb ish
|
|
mov r0, r0, lsr #PAGE_SHIFT @ align address
|
|
mov r1, r1, lsr #PAGE_SHIFT
|
|
mov r0, r0, lsl #PAGE_SHIFT
|
|
mov r1, r1, lsl #PAGE_SHIFT
|
|
1:
|
|
#ifdef CONFIG_ARM_ERRATA_720789
|
|
ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
|
|
#else
|
|
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
|
|
#endif
|
|
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
|
|
add r0, r0, #PAGE_SZ
|
|
cmp r0, r1
|
|
blo 1b
|
|
dsb ish
|
|
isb
|
|
ret lr
|
|
ENDPROC(v7wbi_flush_kern_tlb_range)
|
|
|
|
__INIT
|
|
|
|
/* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
|
|
define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp
|