linux-stable/arch/sh/mm/kmap.c
Matthew Wilcox (Oracle) 157efa2904 sh: implement the new page table range API
Add PFN_PTE_SHIFT, update_mmu_cache_range(), flush_dcache_folio() and
flush_icache_pages().  Change the PG_dcache_clean flag from being per-page
to per-folio.  Flush the entire folio containing the pages in
flush_icache_pages() for ease of implementation.

Link: https://lkml.kernel.org/r/20230802151406.3735276-25-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Rich Felker <dalias@libc.org>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-08-24 16:20:23 -07:00

66 lines
1.5 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* arch/sh/mm/kmap.c
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2002 - 2009 Paul Mundt
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
static pte_t *kmap_coherent_pte;
void __init kmap_coherent_init(void)
{
unsigned long vaddr;
/* cache the first coherent kmap pte */
vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
kmap_coherent_pte = virt_to_kpte(vaddr);
}
void *kmap_coherent(struct page *page, unsigned long addr)
{
struct folio *folio = page_folio(page);
enum fixed_addresses idx;
unsigned long vaddr;
BUG_ON(!test_bit(PG_dcache_clean, &folio->flags));
preempt_disable();
pagefault_disable();
idx = FIX_CMAP_END -
(((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) +
(FIX_N_COLOURS * smp_processor_id()));
vaddr = __fix_to_virt(idx);
BUG_ON(!pte_none(*(kmap_coherent_pte - idx)));
set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL));
return (void *)vaddr;
}
void kunmap_coherent(void *kvaddr)
{
if (kvaddr >= (void *)FIXADDR_START) {
unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK;
enum fixed_addresses idx = __virt_to_fix(vaddr);
/* XXX.. Kill this later, here for sanity at the moment.. */
__flush_purge_region((void *)vaddr, PAGE_SIZE);
pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx);
local_flush_tlb_one(get_asid(), vaddr);
}
pagefault_enable();
preempt_enable();
}