parisc: deduplicate code in flush_cache_mm() and flush_cache_range()

Parts of both functions are the same, so deduplicate them. No functional
change.

Signed-off-by: Sven Schnelle <svens@stackframe.org>
Signed-off-by: Helge Deller <deller@gmx.de>
This commit is contained in:
Sven Schnelle 2021-10-09 20:24:36 +02:00 committed by Helge Deller
parent a5e8ca3783
commit 4f19386739

View file

@ -543,10 +543,33 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
return ptep;
}
static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
unsigned long start, unsigned long end)
{
unsigned long addr, pfn;
pte_t *ptep;
for (addr = start; addr < end; addr += PAGE_SIZE) {
ptep = get_ptep(mm->pgd, addr);
if (ptep) {
pfn = pte_pfn(*ptep);
flush_cache_page(vma, addr, pfn);
}
}
}
static void flush_user_cache_tlb(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
flush_tlb_range(vma, start, end);
}
void flush_cache_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
pgd_t *pgd;
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
@ -560,46 +583,20 @@ void flush_cache_mm(struct mm_struct *mm)
preempt_disable();
if (mm->context == mfsp(3)) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
flush_tlb_range(vma, vma->vm_start, vma->vm_end);
}
for (vma = mm->mmap; vma; vma = vma->vm_next)
flush_user_cache_tlb(vma, vma->vm_start, vma->vm_end);
preempt_enable();
return;
}
pgd = mm->pgd;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long addr;
for (addr = vma->vm_start; addr < vma->vm_end;
addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (!pfn_valid(pfn))
continue;
if (unlikely(mm->context)) {
flush_tlb_page(vma, addr);
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
} else {
__purge_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
for (vma = mm->mmap; vma; vma = vma->vm_next)
flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
preempt_enable();
}
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
pgd_t *pgd;
unsigned long addr;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
end - start >= parisc_cache_flush_threshold) {
if (vma->vm_mm->context)
@ -610,30 +607,12 @@ void flush_cache_range(struct vm_area_struct *vma,
preempt_disable();
if (vma->vm_mm->context == mfsp(3)) {
flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end);
flush_tlb_range(vma, start, end);
flush_user_cache_tlb(vma, start, end);
preempt_enable();
return;
}
pgd = vma->vm_mm->pgd;
for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
unsigned long pfn;
pte_t *ptep = get_ptep(pgd, addr);
if (!ptep)
continue;
pfn = pte_pfn(*ptep);
if (pfn_valid(pfn)) {
if (unlikely(vma->vm_mm->context)) {
flush_tlb_page(vma, addr);
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
} else {
__purge_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
}
flush_cache_pages(vma, vma->vm_mm, vma->vm_start, vma->vm_end);
preempt_enable();
}