[MIPS] Do not use drop_mmu_context to flusing other task's VIPT I-cache.

c-r4k.c and c-sb1.c use drop_mmu_context() to flush virtually tagged
I-caches, but this does not work for flushing other task's icache.  This
is for example triggered by copy_to_user_page() called from ptrace(2).
Use indexed flush for such cases.

Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
This commit is contained in:
Atsushi Nemoto 2006-08-25 17:55:31 +09:00 committed by Ralf Baechle
parent a94d702049
commit f6502791d7
2 changed files with 33 additions and 27 deletions

View file

@ -475,7 +475,7 @@ static inline void local_r4k_flush_cache_page(void *args)
}
}
if (exec) {
if (cpu_has_vtag_icache) {
if (cpu_has_vtag_icache && mm == current->active_mm) {
int cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0)
@ -599,7 +599,7 @@ static inline void local_r4k_flush_icache_page(void *args)
* We're not sure of the virtual address(es) involved here, so
* we have to flush the entire I-cache.
*/
if (cpu_has_vtag_icache) {
if (cpu_has_vtag_icache && vma->vm_mm == current->active_mm) {
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0)

View file

@ -154,6 +154,26 @@ static inline void __sb1_flush_icache_all(void)
}
}
/*
* Invalidate a range of the icache. The addresses are virtual, and
* the cache is virtually indexed and tagged. However, we don't
* necessarily have the right ASID context, so use index ops instead
* of hit ops.
*/
static inline void __sb1_flush_icache_range(unsigned long start,
unsigned long end)
{
start &= ~(icache_line_size - 1);
end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
while (start != end) {
cache_set_op(Index_Invalidate_I, start & icache_index_mask);
start += icache_line_size;
}
mispredict();
sync();
}
/*
* Flush the icache for a given physical page. Need to writeback the
* dcache first, then invalidate the icache. If the page isn't
@ -173,8 +193,11 @@ static void local_sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long
/*
* Bumping the ASID is probably cheaper than the flush ...
*/
if (vma->vm_mm == current->active_mm) {
if (cpu_context(cpu, vma->vm_mm) != 0)
drop_mmu_context(vma->vm_mm, cpu);
} else
__sb1_flush_icache_range(addr, addr + PAGE_SIZE);
}
#ifdef CONFIG_SMP
@ -210,26 +233,6 @@ void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsign
__attribute__((alias("local_sb1_flush_cache_page")));
#endif
/*
* Invalidate a range of the icache. The addresses are virtual, and
* the cache is virtually indexed and tagged. However, we don't
* necessarily have the right ASID context, so use index ops instead
* of hit ops.
*/
static inline void __sb1_flush_icache_range(unsigned long start,
unsigned long end)
{
start &= ~(icache_line_size - 1);
end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
while (start != end) {
cache_set_op(Index_Invalidate_I, start & icache_index_mask);
start += icache_line_size;
}
mispredict();
sync();
}
/*
* Invalidate all caches on this CPU
@ -326,9 +329,12 @@ static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
* If there's a context, bump the ASID (cheaper than a flush,
* since we don't know VAs!)
*/
if (cpu_context(cpu, vma->vm_mm) != 0) {
if (vma->vm_mm == current->active_mm) {
if (cpu_context(cpu, vma->vm_mm) != 0)
drop_mmu_context(vma->vm_mm, cpu);
}
} else
__sb1_flush_icache_range(start, start + PAGE_SIZE);
}
#ifdef CONFIG_SMP