linux-stable/arch/m68k/mm/cache.c
Christoph Hellwig 9fde034864 m68k: Remove set_fs()
Add a m68k-only set_fc helper to set the SFC and DFC registers for the
few places that need to override it for special MM operations, but
disconnect that from the deprecated kernel-wide set_fs() API.

Note that the SFC/DFC registers are context switched, so there is no need
to disable preemption.

Partially based on an earlier patch from
Linus Torvalds <torvalds@linux-foundation.org>.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Michael Schmitz <schmitzmic@gmail.com>
Tested-by: Michael Schmitz <schmitzmic@gmail.com>
Link: https://lore.kernel.org/r/20210916070405.52750-7-hch@lst.de
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
2021-09-24 13:35:07 +02:00

127 lines
3 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* linux/arch/m68k/mm/cache.c
*
* Instruction cache handling
*
* Copyright (C) 1995 Hamish Macdonald
*/
#include <linux/module.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
static unsigned long virt_to_phys_slow(unsigned long vaddr)
{
if (CPU_IS_060) {
unsigned long paddr;
/* The PLPAR instruction causes an access error if the translation
* is not possible. To catch this we use the same exception mechanism
* as for user space accesses in <asm/uaccess.h>. */
asm volatile (".chip 68060\n"
"1: plpar (%0)\n"
".chip 68k\n"
"2:\n"
".section .fixup,\"ax\"\n"
" .even\n"
"3: sub.l %0,%0\n"
" jra 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 1b,3b\n"
".previous"
: "=a" (paddr)
: "0" (vaddr));
return paddr;
} else if (CPU_IS_040) {
unsigned long mmusr;
asm volatile (".chip 68040\n\t"
"ptestr (%1)\n\t"
"movec %%mmusr, %0\n\t"
".chip 68k"
: "=r" (mmusr)
: "a" (vaddr));
if (mmusr & MMU_R_040)
return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
} else {
WARN_ON_ONCE(!CPU_IS_040_OR_060);
}
return 0;
}
/* Push n pages at kernel virtual address and clear the icache */
/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
void flush_icache_user_range(unsigned long address, unsigned long endaddr)
{
if (CPU_IS_COLDFIRE) {
unsigned long start, end;
start = address & ICACHE_SET_MASK;
end = endaddr & ICACHE_SET_MASK;
if (start > end) {
flush_cf_icache(0, end);
end = ICACHE_MAX_ADDR;
}
flush_cf_icache(start, end);
} else if (CPU_IS_040_OR_060) {
address &= PAGE_MASK;
do {
asm volatile ("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (virt_to_phys_slow(address)));
address += PAGE_SIZE;
} while (address < endaddr);
} else {
unsigned long tmp;
asm volatile ("movec %%cacr,%0\n\t"
"orw %1,%0\n\t"
"movec %0,%%cacr"
: "=&d" (tmp)
: "di" (FLUSH_I));
}
}
void flush_icache_range(unsigned long address, unsigned long endaddr)
{
set_fc(SUPER_DATA);
flush_icache_user_range(address, endaddr);
set_fc(USER_DATA);
}
EXPORT_SYMBOL(flush_icache_range);
void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
if (CPU_IS_COLDFIRE) {
unsigned long start, end;
start = addr & ICACHE_SET_MASK;
end = (addr + len) & ICACHE_SET_MASK;
if (start > end) {
flush_cf_icache(0, end);
end = ICACHE_MAX_ADDR;
}
flush_cf_icache(start, end);
} else if (CPU_IS_040_OR_060) {
asm volatile ("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (page_to_phys(page)));
} else {
unsigned long tmp;
asm volatile ("movec %%cacr,%0\n\t"
"orw %1,%0\n\t"
"movec %0,%%cacr"
: "=&d" (tmp)
: "di" (FLUSH_I));
}
}