linux-stable/arch/sparc/mm/fault_32.c
Peter Xu 4064b98270 mm: allow VM_FAULT_RETRY for multiple times
The idea comes from a discussion between Linus and Andrea [1].

Before this patch we only allow a page fault to retry once.  We achieved
this by clearing the FAULT_FLAG_ALLOW_RETRY flag when doing
handle_mm_fault() the second time.  This was majorly used to avoid
unexpected starvation of the system by looping over forever to handle the
page fault on a single page.  However that should hardly happen, and after
all for each code path to return a VM_FAULT_RETRY we'll first wait for a
condition (during which time we should possibly yield the cpu) to happen
before VM_FAULT_RETRY is really returned.

This patch removes the restriction by keeping the FAULT_FLAG_ALLOW_RETRY
flag when we receive VM_FAULT_RETRY.  It means that the page fault handler
now can retry the page fault for multiple times if necessary without the
need to generate another page fault event.  Meanwhile we still keep the
FAULT_FLAG_TRIED flag so page fault handler can still identify whether a
page fault is the first attempt or not.

Then we'll have these combinations of fault flags (only considering
ALLOW_RETRY flag and TRIED flag):

  - ALLOW_RETRY and !TRIED:  this means the page fault allows to
                             retry, and this is the first try

  - ALLOW_RETRY and TRIED:   this means the page fault allows to
                             retry, and this is not the first try

  - !ALLOW_RETRY and !TRIED: this means the page fault does not allow
                             to retry at all

  - !ALLOW_RETRY and TRIED:  this is forbidden and should never be used

In existing code we have multiple places that has taken special care of
the first condition above by checking against (fault_flags &
FAULT_FLAG_ALLOW_RETRY).  This patch introduces a simple helper to detect
the first retry of a page fault by checking against both (fault_flags &
FAULT_FLAG_ALLOW_RETRY) and !(fault_flag & FAULT_FLAG_TRIED) because now
even the 2nd try will have the ALLOW_RETRY set, then use that helper in
all existing special paths.  One example is in __lock_page_or_retry(), now
we'll drop the mmap_sem only in the first attempt of page fault and we'll
keep it in follow up retries, so old locking behavior will be retained.

This will be a nice enhancement for current code [2] at the same time a
supporting material for the future userfaultfd-writeprotect work, since in
that work there will always be an explicit userfault writeprotect retry
for protected pages, and if that cannot resolve the page fault (e.g., when
userfaultfd-writeprotect is used in conjunction with swapped pages) then
we'll possibly need a 3rd retry of the page fault.  It might also benefit
other potential users who will have similar requirement like userfault
write-protection.

GUP code is not touched yet and will be covered in follow up patch.

Please read the thread below for more information.

[1] https://lore.kernel.org/lkml/20171102193644.GB22686@redhat.com/
[2] https://lore.kernel.org/lkml/20181230154648.GB9832@redhat.com/

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Suggested-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: Brian Geffon <bgeffon@google.com>
Cc: Bobby Powers <bobbypowers@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Denis Plotnikov <dplotnikov@virtuozzo.com>
Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Martin Cracauer <cracauer@cons.org>
Cc: Marty McFadden <mcfadden8@llnl.gov>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Maya Gokhale <gokhale2@llnl.gov>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Link: http://lkml.kernel.org/r/20200220160246.9790-1-peterx@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-02 09:35:30 -07:00

468 lines
11 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* fault.c: Page fault handlers for the Sparc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/head.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <linux/kdebug.h>
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/traps.h>
#include "mm_32.h"
int show_unhandled_signals = 1;
static void __noreturn unhandled_fault(unsigned long address,
struct task_struct *tsk,
struct pt_regs *regs)
{
if ((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference\n");
} else {
printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
address);
}
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
(tsk->mm ? (unsigned long) tsk->mm->pgd :
(unsigned long) tsk->active_mm->pgd));
die_if_kernel("Oops", regs);
}
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
unsigned long address)
{
struct pt_regs regs;
unsigned long g2;
unsigned int insn;
int i;
i = search_extables_range(ret_pc, &g2);
switch (i) {
case 3:
/* load & store will be handled by fixup */
return 3;
case 1:
/* store will be handled by fixup, load will bump out */
/* for _to_ macros */
insn = *((unsigned int *) pc);
if ((insn >> 21) & 1)
return 1;
break;
case 2:
/* load will be handled by fixup, store will bump out */
/* for _from_ macros */
insn = *((unsigned int *) pc);
if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
return 2;
break;
default:
break;
}
memset(&regs, 0, sizeof(regs));
regs.pc = pc;
regs.npc = pc + 4;
__asm__ __volatile__(
"rd %%psr, %0\n\t"
"nop\n\t"
"nop\n\t"
"nop\n" : "=r" (regs.psr));
unhandled_fault(address, current, &regs);
/* Not reached */
return 0;
}
static inline void
show_signal_msg(struct pt_regs *regs, int sig, int code,
unsigned long address, struct task_struct *tsk)
{
if (!unhandled_signal(tsk, sig))
return;
if (!printk_ratelimit())
return;
printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
tsk->comm, task_pid_nr(tsk), address,
(void *)regs->pc, (void *)regs->u_regs[UREG_I7],
(void *)regs->u_regs[UREG_FP], code);
print_vma_addr(KERN_CONT " in ", regs->pc);
printk(KERN_CONT "\n");
}
static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
unsigned long addr)
{
if (unlikely(show_unhandled_signals))
show_signal_msg(regs, sig, code,
addr, current);
force_sig_fault(sig, code, (void __user *) addr, 0);
}
static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
{
unsigned int insn;
if (text_fault)
return regs->pc;
if (regs->psr & PSR_PS)
insn = *(unsigned int *) regs->pc;
else
__get_user(insn, (unsigned int *) regs->pc);
return safe_compute_effective_address(regs, insn);
}
static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
int text_fault)
{
unsigned long addr = compute_si_addr(regs, text_fault);
__do_fault_siginfo(code, sig, regs, addr);
}
asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address)
{
struct vm_area_struct *vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
unsigned int fixup;
unsigned long g2;
int from_user = !(regs->psr & PSR_PS);
int code;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
if (text_fault)
address = regs->pc;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
code = SEGV_MAPERR;
if (address >= TASK_SIZE)
goto vmalloc_fault;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (pagefault_disabled() || !mm)
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
down_read(&mm->mmap_sem);
if (!from_user && address >= PAGE_OFFSET)
goto bad_area;
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
/* Allow reads even for write-only mappings */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
if (from_user)
flags |= FAULT_FLAG_USER;
if (write)
flags |= FAULT_FLAG_WRITE;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(vma, address, flags);
if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
1, regs, address);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
1, regs, address);
}
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
/* No need to up_read(&mm->mmap_sem) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
goto retry;
}
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (from_user) {
do_fault_siginfo(code, SIGSEGV, regs, text_fault);
return;
}
/* Is this in ex_table? */
no_context:
g2 = regs->u_regs[UREG_G2];
if (!from_user) {
fixup = search_extables_range(regs->pc, &g2);
/* Values below 10 are reserved for other things */
if (fixup > 10) {
extern const unsigned int __memset_start[];
extern const unsigned int __memset_end[];
extern const unsigned int __csum_partial_copy_start[];
extern const unsigned int __csum_partial_copy_end[];
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%08lx> faddr<%08lx>\n",
regs->pc, address);
printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
regs->pc, fixup, g2);
#endif
if ((regs->pc >= (unsigned long)__memset_start &&
regs->pc < (unsigned long)__memset_end) ||
(regs->pc >= (unsigned long)__csum_partial_copy_start &&
regs->pc < (unsigned long)__csum_partial_copy_end)) {
regs->u_regs[UREG_I4] = address;
regs->u_regs[UREG_I5] = regs->pc;
}
regs->u_regs[UREG_G2] = g2;
regs->pc = fixup;
regs->npc = regs->pc + 4;
return;
}
}
unhandled_fault(address, tsk, regs);
do_exit(SIGKILL);
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (from_user) {
pagefault_out_of_memory();
return;
}
goto no_context;
do_sigbus:
up_read(&mm->mmap_sem);
do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
if (!from_user)
goto no_context;
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*/
int offset = pgd_index(address);
pgd_t *pgd, *pgd_k;
p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pgd = tsk->active_mm->pgd + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd)) {
if (!pgd_present(*pgd_k))
goto bad_area_nosemaphore;
pgd_val(*pgd) = pgd_val(*pgd_k);
return;
}
p4d = p4d_offset(pgd, address);
pud = pud_offset(p4d, address);
pmd = pmd_offset(pud, address);
p4d_k = p4d_offset(pgd_k, address);
pud_k = pud_offset(p4d_k, address);
pmd_k = pmd_offset(pud_k, address);
if (pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_area_nosemaphore;
*pmd = *pmd_k;
return;
}
}
/* This always deals with user addresses. */
static void force_user_fault(unsigned long address, int write)
{
struct vm_area_struct *vma;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
unsigned int flags = FAULT_FLAG_USER;
int code;
code = SEGV_MAPERR;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
flags |= FAULT_FLAG_WRITE;
} else {
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
switch (handle_mm_fault(vma, address, flags)) {
case VM_FAULT_SIGBUS:
case VM_FAULT_OOM:
goto do_sigbus;
}
up_read(&mm->mmap_sem);
return;
bad_area:
up_read(&mm->mmap_sem);
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
return;
do_sigbus:
up_read(&mm->mmap_sem);
__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
}
static void check_stack_aligned(unsigned long sp)
{
if (sp & 0x7UL)
force_sig(SIGILL);
}
void window_overflow_fault(void)
{
unsigned long sp;
sp = current_thread_info()->rwbuf_stkptrs[0];
if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 1);
force_user_fault(sp, 1);
check_stack_aligned(sp);
}
void window_underflow_fault(unsigned long sp)
{
if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
check_stack_aligned(sp);
}
void window_ret_fault(struct pt_regs *regs)
{
unsigned long sp;
sp = regs->u_regs[UREG_FP];
if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
check_stack_aligned(sp);
}