linux-stable/arch/mips/kernel/process.c

912 lines
22 KiB
C
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
* Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004 Thiemo Seufer
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
#include <linux/kernel.h>
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() The current MIPS implementation of arch_trigger_cpumask_backtrace() is broken because it attempts to use synchronous IPIs despite the fact that it may be run with interrupts disabled. This means that when arch_trigger_cpumask_backtrace() is invoked, for example by the RCU CPU stall watchdog, we may: - Deadlock due to use of synchronous IPIs with interrupts disabled, causing the CPU that's attempting to generate the backtrace output to hang itself. - Not succeed in generating the desired output from remote CPUs. - Produce warnings about this from smp_call_function_many(), for example: [42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks: [42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0 [42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0 [42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33) [42760.568927] ------------[ cut here ]------------ [42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c [42760.587839] Modules linked in: [42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2 [42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8 [42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000 [42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0 [42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0 [42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca [42760.669424] ... [42760.673919] Call Trace: [42760.678672] [<27fde568>] show_stack+0x70/0xf0 [42760.685417] [<84751641>] dump_stack+0xaa/0xd0 [42760.692188] [<699d671c>] __warn+0x80/0x92 [42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36 [42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c [42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a [42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98 [42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac [42760.737476] [<059b3b43>] update_process_times+0x18/0x34 [42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38 [42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50 [42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226 [42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a [42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a [42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168 [42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86 [42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14 [42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.820086] [<c7521934>] do_IRQ+0x16/0x20 [42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94 [42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78 [42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c [42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c [42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98 [42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44 [42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e [42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66 [42760.883907] [<b3fce717>] exit_mmap+0x76/0xea [42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a [42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c [42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e [42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e [42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c [42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c [42760.931765] ---[ end trace 02aa09da9dc52a60 ]--- [42760.938342] ------------[ cut here ]------------ [42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8 ... This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async IPIs & smp_call_function_single_async() in order to resolve this problem. We ensure use of the pre-allocated call_single_data_t structures is serialized by maintaining a cpumask indicating that they're busy, and refusing to attempt to send an IPI when a CPU's bit is set in this mask. This should only happen if a CPU hasn't responded to a previous backtrace IPI - ie. if it's hung - and we print a warning to the console in this case. I've marked this for stable branches as far back as v4.9, to which it applies cleanly. Strictly speaking the faulty MIPS implementation can be traced further back to commit 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel versions v3.19 through v4.8 will require further work to backport due to the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods"). Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19597/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.9+ Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
2018-06-22 17:55:46 +00:00
#include <linux/nmi.h>
#include <linux/personality.h>
#include <linux/prctl.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
MIPS: VDSO: Always map near top of user memory When using the legacy mmap layout, for example triggered using ulimit -s unlimited, get_unmapped_area() fills memory from bottom to top starting from a fairly low address near TASK_UNMAPPED_BASE. This placement is suboptimal if the user application wishes to allocate large amounts of heap memory using the brk syscall. With the VDSO being located low in the user's virtual address space, the amount of space available for access using brk is limited much more than it was prior to the introduction of the VDSO. For example: # ulimit -s unlimited; cat /proc/self/maps 00400000-004ec000 r-xp 00000000 08:00 71436 /usr/bin/coreutils 004fc000-004fd000 rwxp 000ec000 08:00 71436 /usr/bin/coreutils 004fd000-0050f000 rwxp 00000000 00:00 0 00cc3000-00ce4000 rwxp 00000000 00:00 0 [heap] 2ab96000-2ab98000 r--p 00000000 00:00 0 [vvar] 2ab98000-2ab99000 r-xp 00000000 00:00 0 [vdso] 2ab99000-2ab9d000 rwxp 00000000 00:00 0 ... Resolve this by adjusting STACK_TOP to reserve space for the VDSO & providing an address hint to get_unmapped_area() causing it to use this space even when using the legacy mmap layout. We reserve enough space for the VDSO, plus 1MB or 256MB for 32 bit & 64 bit systems respectively within which we randomize the VDSO base address. Previously this randomization was taken care of by the mmap base address randomization performed by arch_mmap_rnd(). The 1MB & 256MB sizes are somewhat arbitrary but chosen such that we have some randomization without taking up too much of the user's virtual address space, which is often in short supply for 32 bit systems. With this the VDSO is always mapped at a high address, leaving lots of space for statically linked programs to make use of brk: # ulimit -s unlimited; cat /proc/self/maps 00400000-004ec000 r-xp 00000000 08:00 71436 /usr/bin/coreutils 004fc000-004fd000 rwxp 000ec000 08:00 71436 /usr/bin/coreutils 004fd000-0050f000 rwxp 00000000 00:00 0 00c28000-00c49000 rwxp 00000000 00:00 0 [heap] ... 7f67c000-7f69d000 rwxp 00000000 00:00 0 [stack] 7f7fc000-7f7fd000 rwxp 00000000 00:00 0 7fcf1000-7fcf3000 r--p 00000000 00:00 0 [vvar] 7fcf3000-7fcf4000 r-xp 00000000 00:00 0 [vdso] Signed-off-by: Paul Burton <paul.burton@mips.com> Reported-by: Huacai Chen <chenhc@lemote.com> Fixes: ebb5e78cc634 ("MIPS: Initial implementation of a VDSO") Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.4+
2018-09-25 22:51:26 +00:00
#include <asm/abi.h>
#include <asm/asm.h>
MIPS: Use per-mm page to execute branch delay slot instructions In some cases the kernel needs to execute an instruction from the delay slot of an emulated branch instruction. These cases include: - Emulated floating point branch instructions (bc1[ft]l?) for systems which don't include an FPU, or upon which the kernel is run with the "nofpu" parameter. - MIPSr6 systems running binaries targeting older revisions of the architecture, which may include branch instructions whose encodings are no longer valid in MIPSr6. Executing instructions from such delay slots is done by writing the instruction to memory followed by a trap, as part of an "emuframe", and executing it. This avoids the requirement of an emulator for the entire MIPS instruction set. Prior to this patch such emuframes are written to the user stack and executed from there. This patch moves FP branch delay emuframes off of the user stack and into a per-mm page. Allocating a page per-mm leaves userland with access to only what it had access to previously, and compared to other solutions is relatively simple. When a thread requires a delay slot emulation, it is allocated a frame. A thread may only have one frame allocated at any one time, since it may only ever be executing one instruction at any one time. In order to ensure that we can free up allocated frame later, its index is recorded in struct thread_struct. In the typical case, after executing the delay slot instruction we'll execute a break instruction with the BRK_MEMU code. This traps back to the kernel & leads to a call to do_dsemulret which frees the allocated frame & moves the user PC back to the instruction that would have executed following the emulated branch. In some cases the delay slot instruction may be invalid, such as a branch, or may trigger an exception. In these cases the BRK_MEMU break instruction will not be hit. In order to ensure that frames are freed this patch introduces dsemul_thread_cleanup() and calls it to free any allocated frame upon thread exit. If the instruction generated an exception & leads to a signal being delivered to the thread, or indeed if a signal simply happens to be delivered to the thread whilst it is executing from the struct emuframe, then we need to take care to exit the frame appropriately. This is done by either rolling back the user PC to the branch or advancing it to the continuation PC prior to signal delivery, using dsemul_thread_rollback(). If this were not done then a sigreturn would return to the struct emuframe, and if that frame had meanwhile been used in response to an emulated branch instruction within the signal handler then we would execute the wrong user code. Whilst a user could theoretically place something like a compact branch to self in a delay slot and cause their thread to become stuck in an infinite loop with the frame never being deallocated, this would: - Only affect the users single process. - Be architecturally invalid since there would be a branch in the delay slot, which is forbidden. - Be extremely unlikely to happen by mistake, and provide a program with no more ability to harm the system than a simple infinite loop would. If a thread requires a delay slot emulation & no frame is available to it (ie. the process has enough other threads that all frames are currently in use) then the thread joins a waitqueue. It will sleep until a frame is freed by another thread in the process. Since we now know whether a thread has an allocated frame due to our tracking of its index, the cookie field of struct emuframe is removed as we can be more certain whether we have a valid frame. Since a thread may only ever have a single frame at any given time, the epc field of struct emuframe is also removed & the PC to continue from is instead stored in struct thread_struct. Together these changes simplify & shrink struct emuframe somewhat, allowing twice as many frames to fit into the page allocated for them. The primary benefit of this patch is that we are now free to mark the user stack non-executable where that is possible. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: Maciej Rozycki <maciej.rozycki@imgtec.com> Cc: Faraz Shahbazker <faraz.shahbazker@imgtec.com> Cc: Raghu Gandham <raghu.gandham@imgtec.com> Cc: Matthew Fortune <matthew.fortune@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/13764/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-07-08 10:06:19 +00:00
#include <asm/dsemul.h>
#include <asm/dsp.h>
#include <asm/exec.h>
#include <asm/fpu.h>
#include <asm/inst.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/isadep.h>
#include <asm/msa.h>
#include <asm/mips-cps.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <asm/stacktrace.h>
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
play_dead();
}
#endif
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
{
unsigned long status;
/* New thread loses kernel privileges. */
status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
status |= KU_USER;
regs->cp0_status = status;
lose_fpu(0);
clear_thread_flag(TIF_MSA_CTX_LIVE);
clear_used_math();
#ifdef CONFIG_MIPS_FP_SUPPORT
MIPS: Use per-mm page to execute branch delay slot instructions In some cases the kernel needs to execute an instruction from the delay slot of an emulated branch instruction. These cases include: - Emulated floating point branch instructions (bc1[ft]l?) for systems which don't include an FPU, or upon which the kernel is run with the "nofpu" parameter. - MIPSr6 systems running binaries targeting older revisions of the architecture, which may include branch instructions whose encodings are no longer valid in MIPSr6. Executing instructions from such delay slots is done by writing the instruction to memory followed by a trap, as part of an "emuframe", and executing it. This avoids the requirement of an emulator for the entire MIPS instruction set. Prior to this patch such emuframes are written to the user stack and executed from there. This patch moves FP branch delay emuframes off of the user stack and into a per-mm page. Allocating a page per-mm leaves userland with access to only what it had access to previously, and compared to other solutions is relatively simple. When a thread requires a delay slot emulation, it is allocated a frame. A thread may only have one frame allocated at any one time, since it may only ever be executing one instruction at any one time. In order to ensure that we can free up allocated frame later, its index is recorded in struct thread_struct. In the typical case, after executing the delay slot instruction we'll execute a break instruction with the BRK_MEMU code. This traps back to the kernel & leads to a call to do_dsemulret which frees the allocated frame & moves the user PC back to the instruction that would have executed following the emulated branch. In some cases the delay slot instruction may be invalid, such as a branch, or may trigger an exception. In these cases the BRK_MEMU break instruction will not be hit. In order to ensure that frames are freed this patch introduces dsemul_thread_cleanup() and calls it to free any allocated frame upon thread exit. If the instruction generated an exception & leads to a signal being delivered to the thread, or indeed if a signal simply happens to be delivered to the thread whilst it is executing from the struct emuframe, then we need to take care to exit the frame appropriately. This is done by either rolling back the user PC to the branch or advancing it to the continuation PC prior to signal delivery, using dsemul_thread_rollback(). If this were not done then a sigreturn would return to the struct emuframe, and if that frame had meanwhile been used in response to an emulated branch instruction within the signal handler then we would execute the wrong user code. Whilst a user could theoretically place something like a compact branch to self in a delay slot and cause their thread to become stuck in an infinite loop with the frame never being deallocated, this would: - Only affect the users single process. - Be architecturally invalid since there would be a branch in the delay slot, which is forbidden. - Be extremely unlikely to happen by mistake, and provide a program with no more ability to harm the system than a simple infinite loop would. If a thread requires a delay slot emulation & no frame is available to it (ie. the process has enough other threads that all frames are currently in use) then the thread joins a waitqueue. It will sleep until a frame is freed by another thread in the process. Since we now know whether a thread has an allocated frame due to our tracking of its index, the cookie field of struct emuframe is removed as we can be more certain whether we have a valid frame. Since a thread may only ever have a single frame at any given time, the epc field of struct emuframe is also removed & the PC to continue from is instead stored in struct thread_struct. Together these changes simplify & shrink struct emuframe somewhat, allowing twice as many frames to fit into the page allocated for them. The primary benefit of this patch is that we are now free to mark the user stack non-executable where that is possible. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: Maciej Rozycki <maciej.rozycki@imgtec.com> Cc: Faraz Shahbazker <faraz.shahbazker@imgtec.com> Cc: Raghu Gandham <raghu.gandham@imgtec.com> Cc: Matthew Fortune <matthew.fortune@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/13764/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-07-08 10:06:19 +00:00
atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
#endif
init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
}
MIPS: Use per-mm page to execute branch delay slot instructions In some cases the kernel needs to execute an instruction from the delay slot of an emulated branch instruction. These cases include: - Emulated floating point branch instructions (bc1[ft]l?) for systems which don't include an FPU, or upon which the kernel is run with the "nofpu" parameter. - MIPSr6 systems running binaries targeting older revisions of the architecture, which may include branch instructions whose encodings are no longer valid in MIPSr6. Executing instructions from such delay slots is done by writing the instruction to memory followed by a trap, as part of an "emuframe", and executing it. This avoids the requirement of an emulator for the entire MIPS instruction set. Prior to this patch such emuframes are written to the user stack and executed from there. This patch moves FP branch delay emuframes off of the user stack and into a per-mm page. Allocating a page per-mm leaves userland with access to only what it had access to previously, and compared to other solutions is relatively simple. When a thread requires a delay slot emulation, it is allocated a frame. A thread may only have one frame allocated at any one time, since it may only ever be executing one instruction at any one time. In order to ensure that we can free up allocated frame later, its index is recorded in struct thread_struct. In the typical case, after executing the delay slot instruction we'll execute a break instruction with the BRK_MEMU code. This traps back to the kernel & leads to a call to do_dsemulret which frees the allocated frame & moves the user PC back to the instruction that would have executed following the emulated branch. In some cases the delay slot instruction may be invalid, such as a branch, or may trigger an exception. In these cases the BRK_MEMU break instruction will not be hit. In order to ensure that frames are freed this patch introduces dsemul_thread_cleanup() and calls it to free any allocated frame upon thread exit. If the instruction generated an exception & leads to a signal being delivered to the thread, or indeed if a signal simply happens to be delivered to the thread whilst it is executing from the struct emuframe, then we need to take care to exit the frame appropriately. This is done by either rolling back the user PC to the branch or advancing it to the continuation PC prior to signal delivery, using dsemul_thread_rollback(). If this were not done then a sigreturn would return to the struct emuframe, and if that frame had meanwhile been used in response to an emulated branch instruction within the signal handler then we would execute the wrong user code. Whilst a user could theoretically place something like a compact branch to self in a delay slot and cause their thread to become stuck in an infinite loop with the frame never being deallocated, this would: - Only affect the users single process. - Be architecturally invalid since there would be a branch in the delay slot, which is forbidden. - Be extremely unlikely to happen by mistake, and provide a program with no more ability to harm the system than a simple infinite loop would. If a thread requires a delay slot emulation & no frame is available to it (ie. the process has enough other threads that all frames are currently in use) then the thread joins a waitqueue. It will sleep until a frame is freed by another thread in the process. Since we now know whether a thread has an allocated frame due to our tracking of its index, the cookie field of struct emuframe is removed as we can be more certain whether we have a valid frame. Since a thread may only ever have a single frame at any given time, the epc field of struct emuframe is also removed & the PC to continue from is instead stored in struct thread_struct. Together these changes simplify & shrink struct emuframe somewhat, allowing twice as many frames to fit into the page allocated for them. The primary benefit of this patch is that we are now free to mark the user stack non-executable where that is possible. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: Maciej Rozycki <maciej.rozycki@imgtec.com> Cc: Faraz Shahbazker <faraz.shahbazker@imgtec.com> Cc: Raghu Gandham <raghu.gandham@imgtec.com> Cc: Matthew Fortune <matthew.fortune@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/13764/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-07-08 10:06:19 +00:00
void exit_thread(struct task_struct *tsk)
{
/*
* User threads may have allocated a delay slot emulation frame.
* If so, clean up that allocation.
*/
if (!(current->flags & PF_KTHREAD))
dsemul_thread_cleanup(tsk);
}
MIPS: fork: Fix MSA/FPU/DSP context duplication race There is a race in the MIPS fork code which allows the child to get a stale copy of parent MSA/FPU/DSP state that is active in hardware registers when the fork() is called. This is because copy_thread() saves the live register state into the child context only if the hardware is currently in use, apparently on the assumption that the hardware state cannot have been saved and disabled since the initial duplication of the task_struct. However preemption is certainly possible during this window. An example sequence of events is as follows: 1) The parent userland process puts important data into saved floating point registers ($f20-$f31), which are then dirty compared to the process' stored context. 2) The parent process calls fork() which does a clone system call. 3) In the kernel, do_fork() -> copy_process() -> dup_task_struct() -> arch_dup_task_struct() (which uses the weakly defined default implementation). This duplicates the parent process' task context, which includes a stale version of its FP context from when it was last saved, probably some time before (1). 4) At some point before copy_process() calls copy_thread(), such as when duplicating the memory map, the process is desceduled. Perhaps it is preempted asynchronously, or perhaps it sleeps while blocked on a mutex. The dirty FP state in the FP registers is saved to the parent process' context and the FPU is disabled. 5) When the process is rescheduled again it continues copying state until it gets to copy_thread(), which checks whether the FPU is in use, so that it can copy that dirty state to the child process' task context. Because of the deschedule however the FPU is not in use, so the child process' context is left with stale FP context from the last time the parent saved it (some time before (1)). 6) When the new child process is scheduled it reads the important data from the saved floating point register, and ends up doing a NULL pointer dereference as a result of the stale data. This use of saved floating point registers across function calls can be triggered fairly easily by explicitly using inline asm with a current (MIPS R2) compiler, but is far more likely to happen unintentionally with a MIPS R6 compiler where the FP registers are more likely to get used as scratch registers for storing non-fp data. It is easily fixed, in the same way that other architectures do it, by overriding the implementation of arch_dup_task_struct() to sync the dirty hardware state to the parent process' task context *prior* to duplicating it, rather than copying straight to the child process' task context in copy_thread(). Note, the FPU hardware is not disabled so the parent process may continue executing with the live register context, but now the child process is guaranteed to have an identical copy of it at that point. Signed-off-by: James Hogan <james.hogan@imgtec.com> Reported-by: Matthew Fortune <matthew.fortune@imgtec.com> Tested-by: Markos Chandras <markos.chandras@imgtec.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9075/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2015-01-19 10:30:54 +00:00
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
/*
* Save any process state which is live in hardware registers to the
* parent context prior to duplication. This prevents the new child
* state becoming stale if the parent is preempted before copy_thread()
* gets a chance to save the parent's live hardware registers to the
* child context.
*/
preempt_disable();
if (is_msa_enabled())
save_msa(current);
else if (is_fpu_owner())
_save_fp(current);
save_dsp(current);
preempt_enable();
*dst = *src;
return 0;
}
/*
* Copy architecture-specific thread state
*/
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
{
unsigned long clone_flags = args->flags;
unsigned long usp = args->stack;
unsigned long tls = args->tls;
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
unsigned long childksp;
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
/* set up new TSS. */
childregs = (struct pt_regs *) childksp - 1;
/* Put the stack after the struct pt_regs. */
childksp = (unsigned long) childregs;
p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
if (unlikely(args->fn)) {
/* kernel thread */
unsigned long status = p->thread.cp0_status;
memset(childregs, 0, sizeof(struct pt_regs));
p->thread.reg16 = (unsigned long)args->fn;
p->thread.reg17 = (unsigned long)args->fn_arg;
p->thread.reg29 = childksp;
p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
#if defined(CONFIG_CPU_R3000)
status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
((status & (ST0_KUC | ST0_IEC)) << 2);
#else
status |= ST0_EXL;
#endif
childregs->cp0_status = status;
return 0;
}
/* user thread */
*childregs = *regs;
childregs->regs[7] = 0; /* Clear error flag */
childregs->regs[2] = 0; /* Child gets zero as return value */
if (usp)
childregs->regs[29] = usp;
p->thread.reg29 = (unsigned long) childregs;
p->thread.reg31 = (unsigned long) ret_from_fork;
/*
* New tasks lose permission to use the fpu. This accelerates context
* switching for most programs since they don't use the fpu.
*/
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
clear_tsk_thread_flag(p, TIF_USEDFPU);
clear_tsk_thread_flag(p, TIF_USEDMSA);
clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
#ifdef CONFIG_MIPS_MT_FPAFF
clear_tsk_thread_flag(p, TIF_FPUBOUND);
#endif /* CONFIG_MIPS_MT_FPAFF */
#ifdef CONFIG_MIPS_FP_SUPPORT
MIPS: Use per-mm page to execute branch delay slot instructions In some cases the kernel needs to execute an instruction from the delay slot of an emulated branch instruction. These cases include: - Emulated floating point branch instructions (bc1[ft]l?) for systems which don't include an FPU, or upon which the kernel is run with the "nofpu" parameter. - MIPSr6 systems running binaries targeting older revisions of the architecture, which may include branch instructions whose encodings are no longer valid in MIPSr6. Executing instructions from such delay slots is done by writing the instruction to memory followed by a trap, as part of an "emuframe", and executing it. This avoids the requirement of an emulator for the entire MIPS instruction set. Prior to this patch such emuframes are written to the user stack and executed from there. This patch moves FP branch delay emuframes off of the user stack and into a per-mm page. Allocating a page per-mm leaves userland with access to only what it had access to previously, and compared to other solutions is relatively simple. When a thread requires a delay slot emulation, it is allocated a frame. A thread may only have one frame allocated at any one time, since it may only ever be executing one instruction at any one time. In order to ensure that we can free up allocated frame later, its index is recorded in struct thread_struct. In the typical case, after executing the delay slot instruction we'll execute a break instruction with the BRK_MEMU code. This traps back to the kernel & leads to a call to do_dsemulret which frees the allocated frame & moves the user PC back to the instruction that would have executed following the emulated branch. In some cases the delay slot instruction may be invalid, such as a branch, or may trigger an exception. In these cases the BRK_MEMU break instruction will not be hit. In order to ensure that frames are freed this patch introduces dsemul_thread_cleanup() and calls it to free any allocated frame upon thread exit. If the instruction generated an exception & leads to a signal being delivered to the thread, or indeed if a signal simply happens to be delivered to the thread whilst it is executing from the struct emuframe, then we need to take care to exit the frame appropriately. This is done by either rolling back the user PC to the branch or advancing it to the continuation PC prior to signal delivery, using dsemul_thread_rollback(). If this were not done then a sigreturn would return to the struct emuframe, and if that frame had meanwhile been used in response to an emulated branch instruction within the signal handler then we would execute the wrong user code. Whilst a user could theoretically place something like a compact branch to self in a delay slot and cause their thread to become stuck in an infinite loop with the frame never being deallocated, this would: - Only affect the users single process. - Be architecturally invalid since there would be a branch in the delay slot, which is forbidden. - Be extremely unlikely to happen by mistake, and provide a program with no more ability to harm the system than a simple infinite loop would. If a thread requires a delay slot emulation & no frame is available to it (ie. the process has enough other threads that all frames are currently in use) then the thread joins a waitqueue. It will sleep until a frame is freed by another thread in the process. Since we now know whether a thread has an allocated frame due to our tracking of its index, the cookie field of struct emuframe is removed as we can be more certain whether we have a valid frame. Since a thread may only ever have a single frame at any given time, the epc field of struct emuframe is also removed & the PC to continue from is instead stored in struct thread_struct. Together these changes simplify & shrink struct emuframe somewhat, allowing twice as many frames to fit into the page allocated for them. The primary benefit of this patch is that we are now free to mark the user stack non-executable where that is possible. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: Maciej Rozycki <maciej.rozycki@imgtec.com> Cc: Faraz Shahbazker <faraz.shahbazker@imgtec.com> Cc: Raghu Gandham <raghu.gandham@imgtec.com> Cc: Matthew Fortune <matthew.fortune@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/13764/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-07-08 10:06:19 +00:00
atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
#endif
MIPS: Use per-mm page to execute branch delay slot instructions In some cases the kernel needs to execute an instruction from the delay slot of an emulated branch instruction. These cases include: - Emulated floating point branch instructions (bc1[ft]l?) for systems which don't include an FPU, or upon which the kernel is run with the "nofpu" parameter. - MIPSr6 systems running binaries targeting older revisions of the architecture, which may include branch instructions whose encodings are no longer valid in MIPSr6. Executing instructions from such delay slots is done by writing the instruction to memory followed by a trap, as part of an "emuframe", and executing it. This avoids the requirement of an emulator for the entire MIPS instruction set. Prior to this patch such emuframes are written to the user stack and executed from there. This patch moves FP branch delay emuframes off of the user stack and into a per-mm page. Allocating a page per-mm leaves userland with access to only what it had access to previously, and compared to other solutions is relatively simple. When a thread requires a delay slot emulation, it is allocated a frame. A thread may only have one frame allocated at any one time, since it may only ever be executing one instruction at any one time. In order to ensure that we can free up allocated frame later, its index is recorded in struct thread_struct. In the typical case, after executing the delay slot instruction we'll execute a break instruction with the BRK_MEMU code. This traps back to the kernel & leads to a call to do_dsemulret which frees the allocated frame & moves the user PC back to the instruction that would have executed following the emulated branch. In some cases the delay slot instruction may be invalid, such as a branch, or may trigger an exception. In these cases the BRK_MEMU break instruction will not be hit. In order to ensure that frames are freed this patch introduces dsemul_thread_cleanup() and calls it to free any allocated frame upon thread exit. If the instruction generated an exception & leads to a signal being delivered to the thread, or indeed if a signal simply happens to be delivered to the thread whilst it is executing from the struct emuframe, then we need to take care to exit the frame appropriately. This is done by either rolling back the user PC to the branch or advancing it to the continuation PC prior to signal delivery, using dsemul_thread_rollback(). If this were not done then a sigreturn would return to the struct emuframe, and if that frame had meanwhile been used in response to an emulated branch instruction within the signal handler then we would execute the wrong user code. Whilst a user could theoretically place something like a compact branch to self in a delay slot and cause their thread to become stuck in an infinite loop with the frame never being deallocated, this would: - Only affect the users single process. - Be architecturally invalid since there would be a branch in the delay slot, which is forbidden. - Be extremely unlikely to happen by mistake, and provide a program with no more ability to harm the system than a simple infinite loop would. If a thread requires a delay slot emulation & no frame is available to it (ie. the process has enough other threads that all frames are currently in use) then the thread joins a waitqueue. It will sleep until a frame is freed by another thread in the process. Since we now know whether a thread has an allocated frame due to our tracking of its index, the cookie field of struct emuframe is removed as we can be more certain whether we have a valid frame. Since a thread may only ever have a single frame at any given time, the epc field of struct emuframe is also removed & the PC to continue from is instead stored in struct thread_struct. Together these changes simplify & shrink struct emuframe somewhat, allowing twice as many frames to fit into the page allocated for them. The primary benefit of this patch is that we are now free to mark the user stack non-executable where that is possible. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: Maciej Rozycki <maciej.rozycki@imgtec.com> Cc: Faraz Shahbazker <faraz.shahbazker@imgtec.com> Cc: Raghu Gandham <raghu.gandham@imgtec.com> Cc: Matthew Fortune <matthew.fortune@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/13764/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-07-08 10:06:19 +00:00
if (clone_flags & CLONE_SETTLS)
ti->tp_value = tls;
return 0;
}
Kbuild: rename CC_STACKPROTECTOR[_STRONG] config variables The changes to automatically test for working stack protector compiler support in the Kconfig files removed the special STACKPROTECTOR_AUTO option that picked the strongest stack protector that the compiler supported. That was all a nice cleanup - it makes no sense to have the AUTO case now that the Kconfig phase can just determine the compiler support directly. HOWEVER. It also meant that doing "make oldconfig" would now _disable_ the strong stackprotector if you had AUTO enabled, because in a legacy config file, the sane stack protector configuration would look like CONFIG_HAVE_CC_STACKPROTECTOR=y # CONFIG_CC_STACKPROTECTOR_NONE is not set # CONFIG_CC_STACKPROTECTOR_REGULAR is not set # CONFIG_CC_STACKPROTECTOR_STRONG is not set CONFIG_CC_STACKPROTECTOR_AUTO=y and when you ran this through "make oldconfig" with the Kbuild changes, it would ask you about the regular CONFIG_CC_STACKPROTECTOR (that had been renamed from CONFIG_CC_STACKPROTECTOR_REGULAR to just CONFIG_CC_STACKPROTECTOR), but it would think that the STRONG version used to be disabled (because it was really enabled by AUTO), and would disable it in the new config, resulting in: CONFIG_HAVE_CC_STACKPROTECTOR=y CONFIG_CC_HAS_STACKPROTECTOR_NONE=y CONFIG_CC_STACKPROTECTOR=y # CONFIG_CC_STACKPROTECTOR_STRONG is not set CONFIG_CC_HAS_SANE_STACKPROTECTOR=y That's dangerously subtle - people could suddenly find themselves with the weaker stack protector setup without even realizing. The solution here is to just rename not just the old RECULAR stack protector option, but also the strong one. This does that by just removing the CC_ prefix entirely for the user choices, because it really is not about the compiler support (the compiler support now instead automatially impacts _visibility_ of the options to users). This results in "make oldconfig" actually asking the user for their choice, so that we don't have any silent subtle security model changes. The end result would generally look like this: CONFIG_HAVE_CC_STACKPROTECTOR=y CONFIG_CC_HAS_STACKPROTECTOR_NONE=y CONFIG_STACKPROTECTOR=y CONFIG_STACKPROTECTOR_STRONG=y CONFIG_CC_HAS_SANE_STACKPROTECTOR=y where the "CC_" versions really are about internal compiler infrastructure, not the user selections. Acked-by: Masahiro Yamada <yamada.masahiro@socionext.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-06-14 03:21:18 +00:00
#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
struct mips_frame_info {
void *func;
unsigned long func_size;
int frame_size;
int pc_offset;
};
#define J_TARGET(pc,target) \
(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
static inline int is_jr_ra_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* jr16 ra
* jr ra
*/
if (mm_insn_16bit(ip->word >> 16)) {
if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
ip->mm16_r5_format.rt == mm_jr16_op &&
ip->mm16_r5_format.imm == 31)
return 1;
return 0;
}
if (ip->r_format.opcode == mm_pool32a_op &&
ip->r_format.func == mm_pool32axf_op &&
((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
ip->r_format.rt == 31)
return 1;
return 0;
#else
if (ip->r_format.opcode == spec_op &&
ip->r_format.func == jr_op &&
ip->r_format.rs == 31)
return 1;
return 0;
#endif
}
MIPS: Calculate microMIPS ra properly when unwinding the stack get_frame_info() calculates the offset of the return address within a stack frame simply by dividing a the bottom 16 bits of the instruction, treated as a signed integer, by the size of a long. Whilst this works for MIPS32 & MIPS64 ISAs where the sw or sd instructions are used, it's incorrect for microMIPS where encodings differ. The result is that we typically completely fail to unwind the stack on microMIPS. Fix this by adjusting is_ra_save_ins() to calculate the return address offset, and take into account the various different encodings there in the same place as we consider whether an instruction is storing the ra/$31 register. With this we are now able to unwind the stack for kernels targetting the microMIPS ISA, for example we can produce: Call Trace: [<80109e1f>] show_stack+0x63/0x7c [<8011ea17>] __warn+0x9b/0xac [<8011ea45>] warn_slowpath_fmt+0x1d/0x20 [<8013fe53>] register_console+0x43/0x314 [<8067c58d>] of_setup_earlycon+0x1dd/0x1ec [<8067f63f>] early_init_dt_scan_chosen_stdout+0xe7/0xf8 [<8066c115>] do_early_param+0x75/0xac [<801302f9>] parse_args+0x1dd/0x308 [<8066c459>] parse_early_options+0x25/0x28 [<8066c48b>] parse_early_param+0x2f/0x38 [<8066e8cf>] setup_arch+0x113/0x488 [<8066c4f3>] start_kernel+0x57/0x328 ---[ end trace 0000000000000000 ]--- Whereas previously we only produced: Call Trace: [<80109e1f>] show_stack+0x63/0x7c ---[ end trace 0000000000000000 ]--- Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: linux-mips@linux-mips.org Cc: <stable@vger.kernel.org> # v3.10+ Patchwork: https://patchwork.linux-mips.org/patch/14532/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-07 15:07:06 +00:00
static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* swsp ra,offset
* swm16 reglist,offset(sp)
* swm32 reglist,offset(sp)
* sw32 ra,offset(sp)
* jradiussp - NOT SUPPORTED
*
* microMIPS is way more fun...
*/
MIPS: Stacktrace: Fix microMIPS stack unwinding on big endian systems The stack unwinding code uses the mips_instuction union to decode the instructions it finds. That union uses the __BITFIELD_FIELD macro to reorder depending on endianness. The stack unwinding code always places 16bit instructions in halfword 1 of the union. This makes the union accesses correct for little endian systems. Similarly, 32bit instructions are reordered such that they are correct for little endian systems. This handling leaves unwinding the stack on big endian systems broken, as the mips_instruction union will then look for the fields in the wrong halfword. To fix this, use a logical shift to place the 16bit instruction into the correct position in the word field of the union. Use the same shifting to order the 2 halfwords of 32bit instuctions. Then replace accesses to the halfword with accesses to the shifted word. In the case of the ADDIUS5 instruction, switch to using the mm16_r5_format union member to avoid the need for a 16bit shift. Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Reviewed-by: James Hogan <james.hogan@imgtec.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16956/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-08 12:22:34 +00:00
if (mm_insn_16bit(ip->word >> 16)) {
MIPS: Calculate microMIPS ra properly when unwinding the stack get_frame_info() calculates the offset of the return address within a stack frame simply by dividing a the bottom 16 bits of the instruction, treated as a signed integer, by the size of a long. Whilst this works for MIPS32 & MIPS64 ISAs where the sw or sd instructions are used, it's incorrect for microMIPS where encodings differ. The result is that we typically completely fail to unwind the stack on microMIPS. Fix this by adjusting is_ra_save_ins() to calculate the return address offset, and take into account the various different encodings there in the same place as we consider whether an instruction is storing the ra/$31 register. With this we are now able to unwind the stack for kernels targetting the microMIPS ISA, for example we can produce: Call Trace: [<80109e1f>] show_stack+0x63/0x7c [<8011ea17>] __warn+0x9b/0xac [<8011ea45>] warn_slowpath_fmt+0x1d/0x20 [<8013fe53>] register_console+0x43/0x314 [<8067c58d>] of_setup_earlycon+0x1dd/0x1ec [<8067f63f>] early_init_dt_scan_chosen_stdout+0xe7/0xf8 [<8066c115>] do_early_param+0x75/0xac [<801302f9>] parse_args+0x1dd/0x308 [<8066c459>] parse_early_options+0x25/0x28 [<8066c48b>] parse_early_param+0x2f/0x38 [<8066e8cf>] setup_arch+0x113/0x488 [<8066c4f3>] start_kernel+0x57/0x328 ---[ end trace 0000000000000000 ]--- Whereas previously we only produced: Call Trace: [<80109e1f>] show_stack+0x63/0x7c ---[ end trace 0000000000000000 ]--- Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: linux-mips@linux-mips.org Cc: <stable@vger.kernel.org> # v3.10+ Patchwork: https://patchwork.linux-mips.org/patch/14532/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-07 15:07:06 +00:00
switch (ip->mm16_r5_format.opcode) {
case mm_swsp16_op:
if (ip->mm16_r5_format.rt != 31)
return 0;
*poff = ip->mm16_r5_format.imm;
MIPS: Calculate microMIPS ra properly when unwinding the stack get_frame_info() calculates the offset of the return address within a stack frame simply by dividing a the bottom 16 bits of the instruction, treated as a signed integer, by the size of a long. Whilst this works for MIPS32 & MIPS64 ISAs where the sw or sd instructions are used, it's incorrect for microMIPS where encodings differ. The result is that we typically completely fail to unwind the stack on microMIPS. Fix this by adjusting is_ra_save_ins() to calculate the return address offset, and take into account the various different encodings there in the same place as we consider whether an instruction is storing the ra/$31 register. With this we are now able to unwind the stack for kernels targetting the microMIPS ISA, for example we can produce: Call Trace: [<80109e1f>] show_stack+0x63/0x7c [<8011ea17>] __warn+0x9b/0xac [<8011ea45>] warn_slowpath_fmt+0x1d/0x20 [<8013fe53>] register_console+0x43/0x314 [<8067c58d>] of_setup_earlycon+0x1dd/0x1ec [<8067f63f>] early_init_dt_scan_chosen_stdout+0xe7/0xf8 [<8066c115>] do_early_param+0x75/0xac [<801302f9>] parse_args+0x1dd/0x308 [<8066c459>] parse_early_options+0x25/0x28 [<8066c48b>] parse_early_param+0x2f/0x38 [<8066e8cf>] setup_arch+0x113/0x488 [<8066c4f3>] start_kernel+0x57/0x328 ---[ end trace 0000000000000000 ]--- Whereas previously we only produced: Call Trace: [<80109e1f>] show_stack+0x63/0x7c ---[ end trace 0000000000000000 ]--- Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: linux-mips@linux-mips.org Cc: <stable@vger.kernel.org> # v3.10+ Patchwork: https://patchwork.linux-mips.org/patch/14532/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-07 15:07:06 +00:00
*poff = (*poff << 2) / sizeof(ulong);
return 1;
case mm_pool16c_op:
switch (ip->mm16_m_format.func) {
case mm_swm16_op:
*poff = ip->mm16_m_format.imm;
*poff += 1 + ip->mm16_m_format.rlist;
*poff = (*poff << 2) / sizeof(ulong);
return 1;
default:
return 0;
}
default:
return 0;
}
}
MIPS: Calculate microMIPS ra properly when unwinding the stack get_frame_info() calculates the offset of the return address within a stack frame simply by dividing a the bottom 16 bits of the instruction, treated as a signed integer, by the size of a long. Whilst this works for MIPS32 & MIPS64 ISAs where the sw or sd instructions are used, it's incorrect for microMIPS where encodings differ. The result is that we typically completely fail to unwind the stack on microMIPS. Fix this by adjusting is_ra_save_ins() to calculate the return address offset, and take into account the various different encodings there in the same place as we consider whether an instruction is storing the ra/$31 register. With this we are now able to unwind the stack for kernels targetting the microMIPS ISA, for example we can produce: Call Trace: [<80109e1f>] show_stack+0x63/0x7c [<8011ea17>] __warn+0x9b/0xac [<8011ea45>] warn_slowpath_fmt+0x1d/0x20 [<8013fe53>] register_console+0x43/0x314 [<8067c58d>] of_setup_earlycon+0x1dd/0x1ec [<8067f63f>] early_init_dt_scan_chosen_stdout+0xe7/0xf8 [<8066c115>] do_early_param+0x75/0xac [<801302f9>] parse_args+0x1dd/0x308 [<8066c459>] parse_early_options+0x25/0x28 [<8066c48b>] parse_early_param+0x2f/0x38 [<8066e8cf>] setup_arch+0x113/0x488 [<8066c4f3>] start_kernel+0x57/0x328 ---[ end trace 0000000000000000 ]--- Whereas previously we only produced: Call Trace: [<80109e1f>] show_stack+0x63/0x7c ---[ end trace 0000000000000000 ]--- Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: linux-mips@linux-mips.org Cc: <stable@vger.kernel.org> # v3.10+ Patchwork: https://patchwork.linux-mips.org/patch/14532/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-07 15:07:06 +00:00
switch (ip->i_format.opcode) {
case mm_sw32_op:
if (ip->i_format.rs != 29)
return 0;
if (ip->i_format.rt != 31)
return 0;
*poff = ip->i_format.simmediate / sizeof(ulong);
return 1;
case mm_pool32b_op:
switch (ip->mm_m_format.func) {
case mm_swm32_func:
if (ip->mm_m_format.rd < 0x10)
return 0;
if (ip->mm_m_format.base != 29)
return 0;
*poff = ip->mm_m_format.simmediate;
*poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
*poff /= sizeof(ulong);
return 1;
default:
return 0;
}
default:
return 0;
}
#else
/* sw / sd $ra, offset($sp) */
MIPS: Calculate microMIPS ra properly when unwinding the stack get_frame_info() calculates the offset of the return address within a stack frame simply by dividing a the bottom 16 bits of the instruction, treated as a signed integer, by the size of a long. Whilst this works for MIPS32 & MIPS64 ISAs where the sw or sd instructions are used, it's incorrect for microMIPS where encodings differ. The result is that we typically completely fail to unwind the stack on microMIPS. Fix this by adjusting is_ra_save_ins() to calculate the return address offset, and take into account the various different encodings there in the same place as we consider whether an instruction is storing the ra/$31 register. With this we are now able to unwind the stack for kernels targetting the microMIPS ISA, for example we can produce: Call Trace: [<80109e1f>] show_stack+0x63/0x7c [<8011ea17>] __warn+0x9b/0xac [<8011ea45>] warn_slowpath_fmt+0x1d/0x20 [<8013fe53>] register_console+0x43/0x314 [<8067c58d>] of_setup_earlycon+0x1dd/0x1ec [<8067f63f>] early_init_dt_scan_chosen_stdout+0xe7/0xf8 [<8066c115>] do_early_param+0x75/0xac [<801302f9>] parse_args+0x1dd/0x308 [<8066c459>] parse_early_options+0x25/0x28 [<8066c48b>] parse_early_param+0x2f/0x38 [<8066e8cf>] setup_arch+0x113/0x488 [<8066c4f3>] start_kernel+0x57/0x328 ---[ end trace 0000000000000000 ]--- Whereas previously we only produced: Call Trace: [<80109e1f>] show_stack+0x63/0x7c ---[ end trace 0000000000000000 ]--- Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: linux-mips@linux-mips.org Cc: <stable@vger.kernel.org> # v3.10+ Patchwork: https://patchwork.linux-mips.org/patch/14532/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-07 15:07:06 +00:00
if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
ip->i_format.rs == 29 && ip->i_format.rt == 31) {
*poff = ip->i_format.simmediate / sizeof(ulong);
return 1;
}
#ifdef CONFIG_CPU_LOONGSON64
if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
(ip->loongson3_lswc2_format.ls == 1) &&
(ip->loongson3_lswc2_format.fr == 0) &&
(ip->loongson3_lswc2_format.base == 29)) {
if (ip->loongson3_lswc2_format.rt == 31) {
*poff = ip->loongson3_lswc2_format.offset << 1;
return 1;
}
if (ip->loongson3_lswc2_format.rq == 31) {
*poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
return 1;
}
}
#endif
MIPS: Calculate microMIPS ra properly when unwinding the stack get_frame_info() calculates the offset of the return address within a stack frame simply by dividing a the bottom 16 bits of the instruction, treated as a signed integer, by the size of a long. Whilst this works for MIPS32 & MIPS64 ISAs where the sw or sd instructions are used, it's incorrect for microMIPS where encodings differ. The result is that we typically completely fail to unwind the stack on microMIPS. Fix this by adjusting is_ra_save_ins() to calculate the return address offset, and take into account the various different encodings there in the same place as we consider whether an instruction is storing the ra/$31 register. With this we are now able to unwind the stack for kernels targetting the microMIPS ISA, for example we can produce: Call Trace: [<80109e1f>] show_stack+0x63/0x7c [<8011ea17>] __warn+0x9b/0xac [<8011ea45>] warn_slowpath_fmt+0x1d/0x20 [<8013fe53>] register_console+0x43/0x314 [<8067c58d>] of_setup_earlycon+0x1dd/0x1ec [<8067f63f>] early_init_dt_scan_chosen_stdout+0xe7/0xf8 [<8066c115>] do_early_param+0x75/0xac [<801302f9>] parse_args+0x1dd/0x308 [<8066c459>] parse_early_options+0x25/0x28 [<8066c48b>] parse_early_param+0x2f/0x38 [<8066e8cf>] setup_arch+0x113/0x488 [<8066c4f3>] start_kernel+0x57/0x328 ---[ end trace 0000000000000000 ]--- Whereas previously we only produced: Call Trace: [<80109e1f>] show_stack+0x63/0x7c ---[ end trace 0000000000000000 ]--- Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: linux-mips@linux-mips.org Cc: <stable@vger.kernel.org> # v3.10+ Patchwork: https://patchwork.linux-mips.org/patch/14532/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-07 15:07:06 +00:00
return 0;
#endif
}
static inline int is_jump_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* jr16,jrc,jalr16,jalr16
* jal
* jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
* jraddiusp - NOT SUPPORTED
*
* microMIPS is kind of more fun...
*/
MIPS: Stacktrace: Fix microMIPS stack unwinding on big endian systems The stack unwinding code uses the mips_instuction union to decode the instructions it finds. That union uses the __BITFIELD_FIELD macro to reorder depending on endianness. The stack unwinding code always places 16bit instructions in halfword 1 of the union. This makes the union accesses correct for little endian systems. Similarly, 32bit instructions are reordered such that they are correct for little endian systems. This handling leaves unwinding the stack on big endian systems broken, as the mips_instruction union will then look for the fields in the wrong halfword. To fix this, use a logical shift to place the 16bit instruction into the correct position in the word field of the union. Use the same shifting to order the 2 halfwords of 32bit instuctions. Then replace accesses to the halfword with accesses to the shifted word. In the case of the ADDIUS5 instruction, switch to using the mm16_r5_format union member to avoid the need for a 16bit shift. Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Reviewed-by: James Hogan <james.hogan@imgtec.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16956/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-08 12:22:34 +00:00
if (mm_insn_16bit(ip->word >> 16)) {
if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
(ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
return 1;
return 0;
}
if (ip->j_format.opcode == mm_j32_op)
return 1;
if (ip->j_format.opcode == mm_jal32_op)
return 1;
if (ip->r_format.opcode != mm_pool32a_op ||
ip->r_format.func != mm_pool32axf_op)
return 0;
return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
#else
if (ip->j_format.opcode == j_op)
return 1;
if (ip->j_format.opcode == jal_op)
return 1;
if (ip->r_format.opcode != spec_op)
return 0;
return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
#endif
}
static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
{
#ifdef CONFIG_CPU_MICROMIPS
unsigned short tmp;
/*
* addiusp -imm
* addius5 sp,-imm
* addiu32 sp,sp,-imm
* jradiussp - NOT SUPPORTED
*
* microMIPS is not more fun...
*/
MIPS: Stacktrace: Fix microMIPS stack unwinding on big endian systems The stack unwinding code uses the mips_instuction union to decode the instructions it finds. That union uses the __BITFIELD_FIELD macro to reorder depending on endianness. The stack unwinding code always places 16bit instructions in halfword 1 of the union. This makes the union accesses correct for little endian systems. Similarly, 32bit instructions are reordered such that they are correct for little endian systems. This handling leaves unwinding the stack on big endian systems broken, as the mips_instruction union will then look for the fields in the wrong halfword. To fix this, use a logical shift to place the 16bit instruction into the correct position in the word field of the union. Use the same shifting to order the 2 halfwords of 32bit instuctions. Then replace accesses to the halfword with accesses to the shifted word. In the case of the ADDIUS5 instruction, switch to using the mm16_r5_format union member to avoid the need for a 16bit shift. Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Reviewed-by: James Hogan <james.hogan@imgtec.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16956/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-08 12:22:34 +00:00
if (mm_insn_16bit(ip->word >> 16)) {
if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
ip->mm16_r3_format.simmediate & mm_addiusp_func) {
tmp = ip->mm_b0_format.simmediate >> 1;
tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
tmp ^= 0x100;
*frame_size = -(signed short)(tmp << 2);
return 1;
}
if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
ip->mm16_r5_format.rt == 29) {
tmp = ip->mm16_r5_format.imm >> 1;
*frame_size = -(signed short)(tmp & 0xf);
return 1;
}
return 0;
}
if (ip->mm_i_format.opcode == mm_addiu32_op &&
ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
*frame_size = -ip->i_format.simmediate;
return 1;
}
#else
/* addiu/daddiu sp,sp,-imm */
if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
return 0;
if (ip->i_format.opcode == addiu_op ||
ip->i_format.opcode == daddiu_op) {
*frame_size = -ip->i_format.simmediate;
return 1;
}
#endif
return 0;
}
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip, *ip_end;
MIPS: Handle non word sized instructions when examining frame Commit 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") added fairly broken support for handling 16bit microMIPS instructions in get_frame_info(). It adjusts the instruction pointer by 16bits in the case of a 16bit sp move instruction, but not any other 16bit instruction. Commit b6c7a324df37 ("MIPS: Fix get_frame_info() handling of microMIPS function size") goes some way to fixing get_frame_info() to iterate over microMIPS instuctions, but the instruction pointer is still manipulated using a postincrement, and is of union mips_instruction type. Since the union is sized to the largest member (a word), but microMIPS instructions are a mix of halfword and word sizes, the function does not always iterate correctly, ending up misaligned with the instruction stream and interpreting it incorrectly. Since the instruction modifying the stack pointer is usually the first in the function, that one is usually handled correctly. But the instruction which saves the return address to the sp is some variable number of instructions into the frame and is frequently missed due to not being on a word boundary, leading to incomplete walking of the stack. Fix this by incrementing the instruction pointer based on the size of the previously decoded instruction (& remove the hack introduced by commit 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") which adjusts the instruction pointer in the case of a 16bit sp move instruction, but not any other). Fixes: 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16953/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-08 12:22:30 +00:00
unsigned int last_insn_size = 0;
MIPS: Fix issues in backtraces I saw two problems when doing backtraces: The compiler was putting a "fast return" at the top of some functions, before it set up the frame. The backtrace code would stop when it saw a jump instruction, so it would never get to the stack frame setup and would thus misinterpret it. To fix this, don't look for jump instructions until the frame setup has been seen. The assembly code here is: ffffffff80b885a0 <serial8250_handle_irq>: ffffffff80b885a0: c8a00003 bbit0 a1,0x0,ffffffff80b885b0 <serial8250_handle_irq+0x10> ffffffff80b885a4: 0000102d move v0,zero ffffffff80b885a8: 03e00008 jr ra ffffffff80b885ac: 00000000 nop ffffffff80b885b0: 67bdffd0 daddiu sp,sp,-48 ffffffff80b885b4: ffb00008 sd s0,8(sp) The second problem was the compiler was putting the last instruction of the frame save in the delay slot of the jump instruction. If it saved the RA in there, the backtrace could would miss it and misinterpret the frame. To fix this, make sure to process the instruction after the first jump seen. The assembly code for this is: ffffffff80806fd0 <plat_irq_dispatch>: ffffffff80806fd0: 67bdffd0 daddiu sp,sp,-48 ffffffff80806fd4: ffb30020 sd s3,32(sp) ffffffff80806fd8: 24130018 li s3,24 ffffffff80806fdc: ffb20018 sd s2,24(sp) ffffffff80806fe0: 3c12811c lui s2,0x811c ffffffff80806fe4: ffb10010 sd s1,16(sp) ffffffff80806fe8: 3c11811c lui s1,0x811c ffffffff80806fec: ffb00008 sd s0,8(sp) ffffffff80806ff0: 3c10811c lui s0,0x811c ffffffff80806ff4: 08201c03 j ffffffff8080700c <plat_irq_dispa tch+0x3c> ffffffff80806ff8: ffbf0028 sd ra,40(sp) Signed-off-by: Corey Minyard <cminyard@mvista.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16992/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-10 18:27:37 +00:00
bool saw_jump = false;
info->pc_offset = -1;
info->frame_size = 0;
ip = (void *)msk_isa16_mode((ulong)info->func);
if (!ip)
goto err;
ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
while (ip < ip_end) {
MIPS: Handle non word sized instructions when examining frame Commit 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") added fairly broken support for handling 16bit microMIPS instructions in get_frame_info(). It adjusts the instruction pointer by 16bits in the case of a 16bit sp move instruction, but not any other 16bit instruction. Commit b6c7a324df37 ("MIPS: Fix get_frame_info() handling of microMIPS function size") goes some way to fixing get_frame_info() to iterate over microMIPS instuctions, but the instruction pointer is still manipulated using a postincrement, and is of union mips_instruction type. Since the union is sized to the largest member (a word), but microMIPS instructions are a mix of halfword and word sizes, the function does not always iterate correctly, ending up misaligned with the instruction stream and interpreting it incorrectly. Since the instruction modifying the stack pointer is usually the first in the function, that one is usually handled correctly. But the instruction which saves the return address to the sp is some variable number of instructions into the frame and is frequently missed due to not being on a word boundary, leading to incomplete walking of the stack. Fix this by incrementing the instruction pointer based on the size of the previously decoded instruction (& remove the hack introduced by commit 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") which adjusts the instruction pointer in the case of a 16bit sp move instruction, but not any other). Fixes: 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16953/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-08 12:22:30 +00:00
ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
MIPS: Stacktrace: Fix microMIPS stack unwinding on big endian systems The stack unwinding code uses the mips_instuction union to decode the instructions it finds. That union uses the __BITFIELD_FIELD macro to reorder depending on endianness. The stack unwinding code always places 16bit instructions in halfword 1 of the union. This makes the union accesses correct for little endian systems. Similarly, 32bit instructions are reordered such that they are correct for little endian systems. This handling leaves unwinding the stack on big endian systems broken, as the mips_instruction union will then look for the fields in the wrong halfword. To fix this, use a logical shift to place the 16bit instruction into the correct position in the word field of the union. Use the same shifting to order the 2 halfwords of 32bit instuctions. Then replace accesses to the halfword with accesses to the shifted word. In the case of the ADDIUS5 instruction, switch to using the mm16_r5_format union member to avoid the need for a 16bit shift. Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Reviewed-by: James Hogan <james.hogan@imgtec.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16956/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-08 12:22:34 +00:00
insn.word = ip->halfword[0] << 16;
MIPS: Handle non word sized instructions when examining frame Commit 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") added fairly broken support for handling 16bit microMIPS instructions in get_frame_info(). It adjusts the instruction pointer by 16bits in the case of a 16bit sp move instruction, but not any other 16bit instruction. Commit b6c7a324df37 ("MIPS: Fix get_frame_info() handling of microMIPS function size") goes some way to fixing get_frame_info() to iterate over microMIPS instuctions, but the instruction pointer is still manipulated using a postincrement, and is of union mips_instruction type. Since the union is sized to the largest member (a word), but microMIPS instructions are a mix of halfword and word sizes, the function does not always iterate correctly, ending up misaligned with the instruction stream and interpreting it incorrectly. Since the instruction modifying the stack pointer is usually the first in the function, that one is usually handled correctly. But the instruction which saves the return address to the sp is some variable number of instructions into the frame and is frequently missed due to not being on a word boundary, leading to incomplete walking of the stack. Fix this by incrementing the instruction pointer based on the size of the previously decoded instruction (& remove the hack introduced by commit 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") which adjusts the instruction pointer in the case of a 16bit sp move instruction, but not any other). Fixes: 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16953/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-08 12:22:30 +00:00
last_insn_size = 2;
} else if (is_mmips) {
MIPS: Stacktrace: Fix microMIPS stack unwinding on big endian systems The stack unwinding code uses the mips_instuction union to decode the instructions it finds. That union uses the __BITFIELD_FIELD macro to reorder depending on endianness. The stack unwinding code always places 16bit instructions in halfword 1 of the union. This makes the union accesses correct for little endian systems. Similarly, 32bit instructions are reordered such that they are correct for little endian systems. This handling leaves unwinding the stack on big endian systems broken, as the mips_instruction union will then look for the fields in the wrong halfword. To fix this, use a logical shift to place the 16bit instruction into the correct position in the word field of the union. Use the same shifting to order the 2 halfwords of 32bit instuctions. Then replace accesses to the halfword with accesses to the shifted word. In the case of the ADDIUS5 instruction, switch to using the mm16_r5_format union member to avoid the need for a 16bit shift. Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Reviewed-by: James Hogan <james.hogan@imgtec.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16956/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-08 12:22:34 +00:00
insn.word = ip->halfword[0] << 16 | ip->halfword[1];
MIPS: Handle non word sized instructions when examining frame Commit 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") added fairly broken support for handling 16bit microMIPS instructions in get_frame_info(). It adjusts the instruction pointer by 16bits in the case of a 16bit sp move instruction, but not any other 16bit instruction. Commit b6c7a324df37 ("MIPS: Fix get_frame_info() handling of microMIPS function size") goes some way to fixing get_frame_info() to iterate over microMIPS instuctions, but the instruction pointer is still manipulated using a postincrement, and is of union mips_instruction type. Since the union is sized to the largest member (a word), but microMIPS instructions are a mix of halfword and word sizes, the function does not always iterate correctly, ending up misaligned with the instruction stream and interpreting it incorrectly. Since the instruction modifying the stack pointer is usually the first in the function, that one is usually handled correctly. But the instruction which saves the return address to the sp is some variable number of instructions into the frame and is frequently missed due to not being on a word boundary, leading to incomplete walking of the stack. Fix this by incrementing the instruction pointer based on the size of the previously decoded instruction (& remove the hack introduced by commit 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") which adjusts the instruction pointer in the case of a 16bit sp move instruction, but not any other). Fixes: 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16953/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-08 12:22:30 +00:00
last_insn_size = 4;
} else {
insn.word = ip->word;
MIPS: Handle non word sized instructions when examining frame Commit 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") added fairly broken support for handling 16bit microMIPS instructions in get_frame_info(). It adjusts the instruction pointer by 16bits in the case of a 16bit sp move instruction, but not any other 16bit instruction. Commit b6c7a324df37 ("MIPS: Fix get_frame_info() handling of microMIPS function size") goes some way to fixing get_frame_info() to iterate over microMIPS instuctions, but the instruction pointer is still manipulated using a postincrement, and is of union mips_instruction type. Since the union is sized to the largest member (a word), but microMIPS instructions are a mix of halfword and word sizes, the function does not always iterate correctly, ending up misaligned with the instruction stream and interpreting it incorrectly. Since the instruction modifying the stack pointer is usually the first in the function, that one is usually handled correctly. But the instruction which saves the return address to the sp is some variable number of instructions into the frame and is frequently missed due to not being on a word boundary, leading to incomplete walking of the stack. Fix this by incrementing the instruction pointer based on the size of the previously decoded instruction (& remove the hack introduced by commit 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") which adjusts the instruction pointer in the case of a 16bit sp move instruction, but not any other). Fixes: 34c2f668d0f6b ("MIPS: microMIPS: Add unaligned access support.") Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Paul Burton <paul.burton@imgtec.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16953/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-08 12:22:30 +00:00
last_insn_size = 4;
}
if (is_jr_ra_ins(ip)) {
break;
} else if (!info->frame_size) {
is_sp_move_ins(&insn, &info->frame_size);
continue;
MIPS: Fix issues in backtraces I saw two problems when doing backtraces: The compiler was putting a "fast return" at the top of some functions, before it set up the frame. The backtrace code would stop when it saw a jump instruction, so it would never get to the stack frame setup and would thus misinterpret it. To fix this, don't look for jump instructions until the frame setup has been seen. The assembly code here is: ffffffff80b885a0 <serial8250_handle_irq>: ffffffff80b885a0: c8a00003 bbit0 a1,0x0,ffffffff80b885b0 <serial8250_handle_irq+0x10> ffffffff80b885a4: 0000102d move v0,zero ffffffff80b885a8: 03e00008 jr ra ffffffff80b885ac: 00000000 nop ffffffff80b885b0: 67bdffd0 daddiu sp,sp,-48 ffffffff80b885b4: ffb00008 sd s0,8(sp) The second problem was the compiler was putting the last instruction of the frame save in the delay slot of the jump instruction. If it saved the RA in there, the backtrace could would miss it and misinterpret the frame. To fix this, make sure to process the instruction after the first jump seen. The assembly code for this is: ffffffff80806fd0 <plat_irq_dispatch>: ffffffff80806fd0: 67bdffd0 daddiu sp,sp,-48 ffffffff80806fd4: ffb30020 sd s3,32(sp) ffffffff80806fd8: 24130018 li s3,24 ffffffff80806fdc: ffb20018 sd s2,24(sp) ffffffff80806fe0: 3c12811c lui s2,0x811c ffffffff80806fe4: ffb10010 sd s1,16(sp) ffffffff80806fe8: 3c11811c lui s1,0x811c ffffffff80806fec: ffb00008 sd s0,8(sp) ffffffff80806ff0: 3c10811c lui s0,0x811c ffffffff80806ff4: 08201c03 j ffffffff8080700c <plat_irq_dispa tch+0x3c> ffffffff80806ff8: ffbf0028 sd ra,40(sp) Signed-off-by: Corey Minyard <cminyard@mvista.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16992/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-10 18:27:37 +00:00
} else if (!saw_jump && is_jump_ins(ip)) {
/*
* If we see a jump instruction, we are finished
* with the frame save.
*
* Some functions can have a shortcut return at
* the beginning of the function, so don't start
* looking for jump instruction until we see the
* frame setup.
*
* The RA save instruction can get put into the
* delay slot of the jump instruction, so look
* at the next instruction, too.
*/
saw_jump = true;
continue;
}
MIPS: Calculate microMIPS ra properly when unwinding the stack get_frame_info() calculates the offset of the return address within a stack frame simply by dividing a the bottom 16 bits of the instruction, treated as a signed integer, by the size of a long. Whilst this works for MIPS32 & MIPS64 ISAs where the sw or sd instructions are used, it's incorrect for microMIPS where encodings differ. The result is that we typically completely fail to unwind the stack on microMIPS. Fix this by adjusting is_ra_save_ins() to calculate the return address offset, and take into account the various different encodings there in the same place as we consider whether an instruction is storing the ra/$31 register. With this we are now able to unwind the stack for kernels targetting the microMIPS ISA, for example we can produce: Call Trace: [<80109e1f>] show_stack+0x63/0x7c [<8011ea17>] __warn+0x9b/0xac [<8011ea45>] warn_slowpath_fmt+0x1d/0x20 [<8013fe53>] register_console+0x43/0x314 [<8067c58d>] of_setup_earlycon+0x1dd/0x1ec [<8067f63f>] early_init_dt_scan_chosen_stdout+0xe7/0xf8 [<8066c115>] do_early_param+0x75/0xac [<801302f9>] parse_args+0x1dd/0x308 [<8066c459>] parse_early_options+0x25/0x28 [<8066c48b>] parse_early_param+0x2f/0x38 [<8066e8cf>] setup_arch+0x113/0x488 [<8066c4f3>] start_kernel+0x57/0x328 ---[ end trace 0000000000000000 ]--- Whereas previously we only produced: Call Trace: [<80109e1f>] show_stack+0x63/0x7c ---[ end trace 0000000000000000 ]--- Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 34c2f668d0f6 ("MIPS: microMIPS: Add unaligned access support.") Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> Cc: linux-mips@linux-mips.org Cc: <stable@vger.kernel.org> # v3.10+ Patchwork: https://patchwork.linux-mips.org/patch/14532/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-11-07 15:07:06 +00:00
if (info->pc_offset == -1 &&
is_ra_save_ins(&insn, &info->pc_offset))
break;
MIPS: Fix issues in backtraces I saw two problems when doing backtraces: The compiler was putting a "fast return" at the top of some functions, before it set up the frame. The backtrace code would stop when it saw a jump instruction, so it would never get to the stack frame setup and would thus misinterpret it. To fix this, don't look for jump instructions until the frame setup has been seen. The assembly code here is: ffffffff80b885a0 <serial8250_handle_irq>: ffffffff80b885a0: c8a00003 bbit0 a1,0x0,ffffffff80b885b0 <serial8250_handle_irq+0x10> ffffffff80b885a4: 0000102d move v0,zero ffffffff80b885a8: 03e00008 jr ra ffffffff80b885ac: 00000000 nop ffffffff80b885b0: 67bdffd0 daddiu sp,sp,-48 ffffffff80b885b4: ffb00008 sd s0,8(sp) The second problem was the compiler was putting the last instruction of the frame save in the delay slot of the jump instruction. If it saved the RA in there, the backtrace could would miss it and misinterpret the frame. To fix this, make sure to process the instruction after the first jump seen. The assembly code for this is: ffffffff80806fd0 <plat_irq_dispatch>: ffffffff80806fd0: 67bdffd0 daddiu sp,sp,-48 ffffffff80806fd4: ffb30020 sd s3,32(sp) ffffffff80806fd8: 24130018 li s3,24 ffffffff80806fdc: ffb20018 sd s2,24(sp) ffffffff80806fe0: 3c12811c lui s2,0x811c ffffffff80806fe4: ffb10010 sd s1,16(sp) ffffffff80806fe8: 3c11811c lui s1,0x811c ffffffff80806fec: ffb00008 sd s0,8(sp) ffffffff80806ff0: 3c10811c lui s0,0x811c ffffffff80806ff4: 08201c03 j ffffffff8080700c <plat_irq_dispa tch+0x3c> ffffffff80806ff8: ffbf0028 sd ra,40(sp) Signed-off-by: Corey Minyard <cminyard@mvista.com> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16992/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-08-10 18:27:37 +00:00
if (saw_jump)
break;
}
if (info->frame_size && info->pc_offset >= 0) /* nested */
return 0;
if (info->pc_offset < 0) /* leaf */
return 1;
/* prologue seems bogus... */
err:
return -1;
}
static struct mips_frame_info schedule_mfi __read_mostly;
#ifdef CONFIG_KALLSYMS
static unsigned long get___schedule_addr(void)
{
return kallsyms_lookup_name("__schedule");
}
#else
static unsigned long get___schedule_addr(void)
{
union mips_instruction *ip = (void *)schedule;
int max_insns = 8;
int i;
for (i = 0; i < max_insns; i++, ip++) {
if (ip->j_format.opcode == j_op)
return J_TARGET(ip, ip->j_format.target);
}
return 0;
}
#endif
static int __init frame_info_init(void)
{
unsigned long size = 0;
#ifdef CONFIG_KALLSYMS
unsigned long ofs;
#endif
unsigned long addr;
addr = get___schedule_addr();
if (!addr)
addr = (unsigned long)schedule;
#ifdef CONFIG_KALLSYMS
kallsyms_lookup_size_offset(addr, &size, &ofs);
#endif
schedule_mfi.func = (void *)addr;
schedule_mfi.func_size = size;
get_frame_info(&schedule_mfi);
/*
* Without schedule() frame info, result given by
* thread_saved_pc() and __get_wchan() are not reliable.
*/
if (schedule_mfi.pc_offset < 0)
printk("Can't analyze schedule() prologue at %p\n", schedule);
return 0;
}
arch_initcall(frame_info_init);
/*
* Return saved PC of a blocked thread.
*/
static unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct thread_struct *t = &tsk->thread;
/* New born processes are a special case */
if (t->reg31 == (unsigned long) ret_from_fork)
return t->reg31;
if (schedule_mfi.pc_offset < 0)
return 0;
return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
}
#ifdef CONFIG_KALLSYMS
/* generic stack unwinding function */
unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
unsigned long *sp,
unsigned long pc,
unsigned long *ra)
{
MIPS: IRQ Stack: Unwind IRQ stack onto task stack When the separate IRQ stack was introduced, stack unwinding only proceeded as far as the top of the IRQ stack, leading to kernel backtraces being less useful, lacking the trace of what was interrupted. Fix this by providing a means for the kernel to unwind the IRQ stack onto the interrupted task stack. The processor state is saved to the kernel task stack on interrupt. The IRQ_STACK_START macro reserves an unsigned long at the top of the IRQ stack where the interrupted task stack pointer can be saved. After the active stack is switched to the IRQ stack, save the interrupted tasks stack pointer to the reserved location. Fix the stack unwinding code to look for the frame being the top of the IRQ stack and if so get the next frame from the saved location. The existing test does not work with the separate stack since the ra is no longer pointed at ret_from_{irq,exception}. The test to stop unwinding the stack 32 bytes from the top of a stack must be modified to allow unwinding to continue up to the location of the saved task stack pointer when on the IRQ stack. The low / high marks of the stack are set depending on whether the sp is on an irq stack or not. Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: Masanari Iida <standby24x7@gmail.com> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jason A. Donenfeld <jason@zx2c4.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/15788/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-21 14:52:25 +00:00
unsigned long low, high, irq_stack_high;
struct mips_frame_info info;
unsigned long size, ofs;
MIPS: IRQ Stack: Unwind IRQ stack onto task stack When the separate IRQ stack was introduced, stack unwinding only proceeded as far as the top of the IRQ stack, leading to kernel backtraces being less useful, lacking the trace of what was interrupted. Fix this by providing a means for the kernel to unwind the IRQ stack onto the interrupted task stack. The processor state is saved to the kernel task stack on interrupt. The IRQ_STACK_START macro reserves an unsigned long at the top of the IRQ stack where the interrupted task stack pointer can be saved. After the active stack is switched to the IRQ stack, save the interrupted tasks stack pointer to the reserved location. Fix the stack unwinding code to look for the frame being the top of the IRQ stack and if so get the next frame from the saved location. The existing test does not work with the separate stack since the ra is no longer pointed at ret_from_{irq,exception}. The test to stop unwinding the stack 32 bytes from the top of a stack must be modified to allow unwinding to continue up to the location of the saved task stack pointer when on the IRQ stack. The low / high marks of the stack are set depending on whether the sp is on an irq stack or not. Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: Masanari Iida <standby24x7@gmail.com> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jason A. Donenfeld <jason@zx2c4.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/15788/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-21 14:52:25 +00:00
struct pt_regs *regs;
int leaf;
if (!stack_page)
return 0;
/*
MIPS: IRQ Stack: Unwind IRQ stack onto task stack When the separate IRQ stack was introduced, stack unwinding only proceeded as far as the top of the IRQ stack, leading to kernel backtraces being less useful, lacking the trace of what was interrupted. Fix this by providing a means for the kernel to unwind the IRQ stack onto the interrupted task stack. The processor state is saved to the kernel task stack on interrupt. The IRQ_STACK_START macro reserves an unsigned long at the top of the IRQ stack where the interrupted task stack pointer can be saved. After the active stack is switched to the IRQ stack, save the interrupted tasks stack pointer to the reserved location. Fix the stack unwinding code to look for the frame being the top of the IRQ stack and if so get the next frame from the saved location. The existing test does not work with the separate stack since the ra is no longer pointed at ret_from_{irq,exception}. The test to stop unwinding the stack 32 bytes from the top of a stack must be modified to allow unwinding to continue up to the location of the saved task stack pointer when on the IRQ stack. The low / high marks of the stack are set depending on whether the sp is on an irq stack or not. Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: Masanari Iida <standby24x7@gmail.com> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jason A. Donenfeld <jason@zx2c4.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/15788/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-21 14:52:25 +00:00
* IRQ stacks start at IRQ_STACK_START
* task stacks at THREAD_SIZE - 32
*/
MIPS: IRQ Stack: Unwind IRQ stack onto task stack When the separate IRQ stack was introduced, stack unwinding only proceeded as far as the top of the IRQ stack, leading to kernel backtraces being less useful, lacking the trace of what was interrupted. Fix this by providing a means for the kernel to unwind the IRQ stack onto the interrupted task stack. The processor state is saved to the kernel task stack on interrupt. The IRQ_STACK_START macro reserves an unsigned long at the top of the IRQ stack where the interrupted task stack pointer can be saved. After the active stack is switched to the IRQ stack, save the interrupted tasks stack pointer to the reserved location. Fix the stack unwinding code to look for the frame being the top of the IRQ stack and if so get the next frame from the saved location. The existing test does not work with the separate stack since the ra is no longer pointed at ret_from_{irq,exception}. The test to stop unwinding the stack 32 bytes from the top of a stack must be modified to allow unwinding to continue up to the location of the saved task stack pointer when on the IRQ stack. The low / high marks of the stack are set depending on whether the sp is on an irq stack or not. Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: Masanari Iida <standby24x7@gmail.com> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jason A. Donenfeld <jason@zx2c4.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/15788/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-21 14:52:25 +00:00
low = stack_page;
if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
high = stack_page + IRQ_STACK_START;
irq_stack_high = high;
} else {
high = stack_page + THREAD_SIZE - 32;
irq_stack_high = 0;
}
/*
* If we reached the top of the interrupt stack, start unwinding
* the interrupted task stack.
*/
if (unlikely(*sp == irq_stack_high)) {
unsigned long task_sp = *(unsigned long *)*sp;
/*
* Check that the pointer saved in the IRQ stack head points to
* something within the stack of the current task
*/
if (!object_is_on_stack((void *)task_sp))
return 0;
/*
* Follow pointer to tasks kernel stack frame where interrupted
* state was saved.
*/
regs = (struct pt_regs *)task_sp;
pc = regs->cp0_epc;
if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
}
return 0;
}
if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
return 0;
/*
* Return ra if an exception occurred at the first instruction
*/
if (unlikely(ofs == 0)) {
pc = *ra;
*ra = 0;
return pc;
}
info.func = (void *)(pc - ofs);
info.func_size = ofs; /* analyze from start to ofs */
leaf = get_frame_info(&info);
if (leaf < 0)
return 0;
MIPS: IRQ Stack: Unwind IRQ stack onto task stack When the separate IRQ stack was introduced, stack unwinding only proceeded as far as the top of the IRQ stack, leading to kernel backtraces being less useful, lacking the trace of what was interrupted. Fix this by providing a means for the kernel to unwind the IRQ stack onto the interrupted task stack. The processor state is saved to the kernel task stack on interrupt. The IRQ_STACK_START macro reserves an unsigned long at the top of the IRQ stack where the interrupted task stack pointer can be saved. After the active stack is switched to the IRQ stack, save the interrupted tasks stack pointer to the reserved location. Fix the stack unwinding code to look for the frame being the top of the IRQ stack and if so get the next frame from the saved location. The existing test does not work with the separate stack since the ra is no longer pointed at ret_from_{irq,exception}. The test to stop unwinding the stack 32 bytes from the top of a stack must be modified to allow unwinding to continue up to the location of the saved task stack pointer when on the IRQ stack. The low / high marks of the stack are set depending on whether the sp is on an irq stack or not. Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Marcin Nowakowski <marcin.nowakowski@imgtec.com> Cc: Masanari Iida <standby24x7@gmail.com> Cc: Chris Metcalf <cmetcalf@mellanox.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: Paul Burton <paul.burton@imgtec.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jason A. Donenfeld <jason@zx2c4.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/15788/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-03-21 14:52:25 +00:00
if (*sp < low || *sp + info.frame_size > high)
return 0;
if (leaf)
/*
* For some extreme cases, get_frame_info() can
* consider wrongly a nested function as a leaf
* one. In that cases avoid to return always the
* same value.
*/
pc = pc != *ra ? *ra : 0;
else
pc = ((unsigned long *)(*sp))[info.pc_offset];
*sp += info.frame_size;
*ra = 0;
return __kernel_text_address(pc) ? pc : 0;
}
EXPORT_SYMBOL(unwind_stack_by_address);
/* used by show_backtrace() */
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
unsigned long pc, unsigned long *ra)
{
unsigned long stack_page = 0;
int cpu;
for_each_possible_cpu(cpu) {
if (on_irq_stack(cpu, *sp)) {
stack_page = (unsigned long)irq_stack[cpu];
break;
}
}
if (!stack_page)
stack_page = (unsigned long)task_stack_page(task);
return unwind_stack_by_address(stack_page, sp, pc, ra);
}
#endif
/*
* __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
*/
unsigned long __get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
unsigned long sp;
unsigned long ra = 0;
#endif
if (!task_stack_page(task))
goto out;
pc = thread_saved_pc(task);
#ifdef CONFIG_KALLSYMS
sp = task->thread.reg29 + schedule_mfi.frame_size;
while (in_sched_functions(pc))
pc = unwind_stack(task, &sp, pc, &ra);
#endif
out:
return pc;
}
MIPS: VDSO: Always map near top of user memory When using the legacy mmap layout, for example triggered using ulimit -s unlimited, get_unmapped_area() fills memory from bottom to top starting from a fairly low address near TASK_UNMAPPED_BASE. This placement is suboptimal if the user application wishes to allocate large amounts of heap memory using the brk syscall. With the VDSO being located low in the user's virtual address space, the amount of space available for access using brk is limited much more than it was prior to the introduction of the VDSO. For example: # ulimit -s unlimited; cat /proc/self/maps 00400000-004ec000 r-xp 00000000 08:00 71436 /usr/bin/coreutils 004fc000-004fd000 rwxp 000ec000 08:00 71436 /usr/bin/coreutils 004fd000-0050f000 rwxp 00000000 00:00 0 00cc3000-00ce4000 rwxp 00000000 00:00 0 [heap] 2ab96000-2ab98000 r--p 00000000 00:00 0 [vvar] 2ab98000-2ab99000 r-xp 00000000 00:00 0 [vdso] 2ab99000-2ab9d000 rwxp 00000000 00:00 0 ... Resolve this by adjusting STACK_TOP to reserve space for the VDSO & providing an address hint to get_unmapped_area() causing it to use this space even when using the legacy mmap layout. We reserve enough space for the VDSO, plus 1MB or 256MB for 32 bit & 64 bit systems respectively within which we randomize the VDSO base address. Previously this randomization was taken care of by the mmap base address randomization performed by arch_mmap_rnd(). The 1MB & 256MB sizes are somewhat arbitrary but chosen such that we have some randomization without taking up too much of the user's virtual address space, which is often in short supply for 32 bit systems. With this the VDSO is always mapped at a high address, leaving lots of space for statically linked programs to make use of brk: # ulimit -s unlimited; cat /proc/self/maps 00400000-004ec000 r-xp 00000000 08:00 71436 /usr/bin/coreutils 004fc000-004fd000 rwxp 000ec000 08:00 71436 /usr/bin/coreutils 004fd000-0050f000 rwxp 00000000 00:00 0 00c28000-00c49000 rwxp 00000000 00:00 0 [heap] ... 7f67c000-7f69d000 rwxp 00000000 00:00 0 [stack] 7f7fc000-7f7fd000 rwxp 00000000 00:00 0 7fcf1000-7fcf3000 r--p 00000000 00:00 0 [vvar] 7fcf3000-7fcf4000 r-xp 00000000 00:00 0 [vdso] Signed-off-by: Paul Burton <paul.burton@mips.com> Reported-by: Huacai Chen <chenhc@lemote.com> Fixes: ebb5e78cc634 ("MIPS: Initial implementation of a VDSO") Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.4+
2018-09-25 22:51:26 +00:00
unsigned long mips_stack_top(void)
{
unsigned long top = TASK_SIZE & PAGE_MASK;
if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
/* One page for branch delay slot "emulation" */
top -= PAGE_SIZE;
}
MIPS: VDSO: Always map near top of user memory When using the legacy mmap layout, for example triggered using ulimit -s unlimited, get_unmapped_area() fills memory from bottom to top starting from a fairly low address near TASK_UNMAPPED_BASE. This placement is suboptimal if the user application wishes to allocate large amounts of heap memory using the brk syscall. With the VDSO being located low in the user's virtual address space, the amount of space available for access using brk is limited much more than it was prior to the introduction of the VDSO. For example: # ulimit -s unlimited; cat /proc/self/maps 00400000-004ec000 r-xp 00000000 08:00 71436 /usr/bin/coreutils 004fc000-004fd000 rwxp 000ec000 08:00 71436 /usr/bin/coreutils 004fd000-0050f000 rwxp 00000000 00:00 0 00cc3000-00ce4000 rwxp 00000000 00:00 0 [heap] 2ab96000-2ab98000 r--p 00000000 00:00 0 [vvar] 2ab98000-2ab99000 r-xp 00000000 00:00 0 [vdso] 2ab99000-2ab9d000 rwxp 00000000 00:00 0 ... Resolve this by adjusting STACK_TOP to reserve space for the VDSO & providing an address hint to get_unmapped_area() causing it to use this space even when using the legacy mmap layout. We reserve enough space for the VDSO, plus 1MB or 256MB for 32 bit & 64 bit systems respectively within which we randomize the VDSO base address. Previously this randomization was taken care of by the mmap base address randomization performed by arch_mmap_rnd(). The 1MB & 256MB sizes are somewhat arbitrary but chosen such that we have some randomization without taking up too much of the user's virtual address space, which is often in short supply for 32 bit systems. With this the VDSO is always mapped at a high address, leaving lots of space for statically linked programs to make use of brk: # ulimit -s unlimited; cat /proc/self/maps 00400000-004ec000 r-xp 00000000 08:00 71436 /usr/bin/coreutils 004fc000-004fd000 rwxp 000ec000 08:00 71436 /usr/bin/coreutils 004fd000-0050f000 rwxp 00000000 00:00 0 00c28000-00c49000 rwxp 00000000 00:00 0 [heap] ... 7f67c000-7f69d000 rwxp 00000000 00:00 0 [stack] 7f7fc000-7f7fd000 rwxp 00000000 00:00 0 7fcf1000-7fcf3000 r--p 00000000 00:00 0 [vvar] 7fcf3000-7fcf4000 r-xp 00000000 00:00 0 [vdso] Signed-off-by: Paul Burton <paul.burton@mips.com> Reported-by: Huacai Chen <chenhc@lemote.com> Fixes: ebb5e78cc634 ("MIPS: Initial implementation of a VDSO") Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.4+
2018-09-25 22:51:26 +00:00
/* Space for the VDSO, data page & GIC user page */
top -= PAGE_ALIGN(current->thread.abi->vdso->size);
top -= PAGE_SIZE;
top -= mips_gic_present() ? PAGE_SIZE : 0;
/* Space for cache colour alignment */
if (cpu_has_dc_aliases)
top -= shm_align_mask + 1;
/* Space to randomize the VDSO base */
if (current->flags & PF_RANDOMIZE)
top -= VDSO_RANDOMIZE_SIZE;
return top;
}
/*
* Don't forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
*/
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
sp -= get_random_u32_below(PAGE_SIZE);
return sp & ALMASK;
}
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() The current MIPS implementation of arch_trigger_cpumask_backtrace() is broken because it attempts to use synchronous IPIs despite the fact that it may be run with interrupts disabled. This means that when arch_trigger_cpumask_backtrace() is invoked, for example by the RCU CPU stall watchdog, we may: - Deadlock due to use of synchronous IPIs with interrupts disabled, causing the CPU that's attempting to generate the backtrace output to hang itself. - Not succeed in generating the desired output from remote CPUs. - Produce warnings about this from smp_call_function_many(), for example: [42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks: [42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0 [42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0 [42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33) [42760.568927] ------------[ cut here ]------------ [42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c [42760.587839] Modules linked in: [42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2 [42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8 [42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000 [42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0 [42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0 [42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca [42760.669424] ... [42760.673919] Call Trace: [42760.678672] [<27fde568>] show_stack+0x70/0xf0 [42760.685417] [<84751641>] dump_stack+0xaa/0xd0 [42760.692188] [<699d671c>] __warn+0x80/0x92 [42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36 [42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c [42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a [42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98 [42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac [42760.737476] [<059b3b43>] update_process_times+0x18/0x34 [42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38 [42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50 [42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226 [42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a [42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a [42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168 [42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86 [42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14 [42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.820086] [<c7521934>] do_IRQ+0x16/0x20 [42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94 [42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78 [42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c [42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c [42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98 [42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44 [42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e [42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66 [42760.883907] [<b3fce717>] exit_mmap+0x76/0xea [42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a [42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c [42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e [42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e [42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c [42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c [42760.931765] ---[ end trace 02aa09da9dc52a60 ]--- [42760.938342] ------------[ cut here ]------------ [42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8 ... This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async IPIs & smp_call_function_single_async() in order to resolve this problem. We ensure use of the pre-allocated call_single_data_t structures is serialized by maintaining a cpumask indicating that they're busy, and refusing to attempt to send an IPI when a CPU's bit is set in this mask. This should only happen if a CPU hasn't responded to a previous backtrace IPI - ie. if it's hung - and we print a warning to the console in this case. I've marked this for stable branches as far back as v4.9, to which it applies cleanly. Strictly speaking the faulty MIPS implementation can be traced further back to commit 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel versions v3.19 through v4.8 will require further work to backport due to the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods"). Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19597/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.9+ Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
2018-06-22 17:55:46 +00:00
static struct cpumask backtrace_csd_busy;
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() The current MIPS implementation of arch_trigger_cpumask_backtrace() is broken because it attempts to use synchronous IPIs despite the fact that it may be run with interrupts disabled. This means that when arch_trigger_cpumask_backtrace() is invoked, for example by the RCU CPU stall watchdog, we may: - Deadlock due to use of synchronous IPIs with interrupts disabled, causing the CPU that's attempting to generate the backtrace output to hang itself. - Not succeed in generating the desired output from remote CPUs. - Produce warnings about this from smp_call_function_many(), for example: [42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks: [42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0 [42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0 [42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33) [42760.568927] ------------[ cut here ]------------ [42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c [42760.587839] Modules linked in: [42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2 [42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8 [42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000 [42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0 [42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0 [42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca [42760.669424] ... [42760.673919] Call Trace: [42760.678672] [<27fde568>] show_stack+0x70/0xf0 [42760.685417] [<84751641>] dump_stack+0xaa/0xd0 [42760.692188] [<699d671c>] __warn+0x80/0x92 [42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36 [42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c [42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a [42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98 [42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac [42760.737476] [<059b3b43>] update_process_times+0x18/0x34 [42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38 [42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50 [42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226 [42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a [42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a [42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168 [42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86 [42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14 [42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.820086] [<c7521934>] do_IRQ+0x16/0x20 [42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94 [42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78 [42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c [42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c [42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98 [42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44 [42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e [42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66 [42760.883907] [<b3fce717>] exit_mmap+0x76/0xea [42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a [42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c [42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e [42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e [42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c [42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c [42760.931765] ---[ end trace 02aa09da9dc52a60 ]--- [42760.938342] ------------[ cut here ]------------ [42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8 ... This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async IPIs & smp_call_function_single_async() in order to resolve this problem. We ensure use of the pre-allocated call_single_data_t structures is serialized by maintaining a cpumask indicating that they're busy, and refusing to attempt to send an IPI when a CPU's bit is set in this mask. This should only happen if a CPU hasn't responded to a previous backtrace IPI - ie. if it's hung - and we print a warning to the console in this case. I've marked this for stable branches as far back as v4.9, to which it applies cleanly. Strictly speaking the faulty MIPS implementation can be traced further back to commit 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel versions v3.19 through v4.8 will require further work to backport due to the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods"). Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19597/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.9+ Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
2018-06-22 17:55:46 +00:00
static void handle_backtrace(void *info)
{
nmi_cpu_backtrace(get_irq_regs());
cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
}
static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
CSD_INIT(handle_backtrace, NULL);
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() The current MIPS implementation of arch_trigger_cpumask_backtrace() is broken because it attempts to use synchronous IPIs despite the fact that it may be run with interrupts disabled. This means that when arch_trigger_cpumask_backtrace() is invoked, for example by the RCU CPU stall watchdog, we may: - Deadlock due to use of synchronous IPIs with interrupts disabled, causing the CPU that's attempting to generate the backtrace output to hang itself. - Not succeed in generating the desired output from remote CPUs. - Produce warnings about this from smp_call_function_many(), for example: [42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks: [42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0 [42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0 [42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33) [42760.568927] ------------[ cut here ]------------ [42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c [42760.587839] Modules linked in: [42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2 [42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8 [42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000 [42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0 [42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0 [42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca [42760.669424] ... [42760.673919] Call Trace: [42760.678672] [<27fde568>] show_stack+0x70/0xf0 [42760.685417] [<84751641>] dump_stack+0xaa/0xd0 [42760.692188] [<699d671c>] __warn+0x80/0x92 [42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36 [42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c [42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a [42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98 [42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac [42760.737476] [<059b3b43>] update_process_times+0x18/0x34 [42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38 [42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50 [42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226 [42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a [42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a [42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168 [42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86 [42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14 [42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.820086] [<c7521934>] do_IRQ+0x16/0x20 [42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94 [42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78 [42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c [42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c [42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98 [42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44 [42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e [42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66 [42760.883907] [<b3fce717>] exit_mmap+0x76/0xea [42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a [42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c [42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e [42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e [42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c [42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c [42760.931765] ---[ end trace 02aa09da9dc52a60 ]--- [42760.938342] ------------[ cut here ]------------ [42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8 ... This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async IPIs & smp_call_function_single_async() in order to resolve this problem. We ensure use of the pre-allocated call_single_data_t structures is serialized by maintaining a cpumask indicating that they're busy, and refusing to attempt to send an IPI when a CPU's bit is set in this mask. This should only happen if a CPU hasn't responded to a previous backtrace IPI - ie. if it's hung - and we print a warning to the console in this case. I've marked this for stable branches as far back as v4.9, to which it applies cleanly. Strictly speaking the faulty MIPS implementation can be traced further back to commit 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel versions v3.19 through v4.8 will require further work to backport due to the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods"). Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19597/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.9+ Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
2018-06-22 17:55:46 +00:00
static void raise_backtrace(cpumask_t *mask)
{
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() The current MIPS implementation of arch_trigger_cpumask_backtrace() is broken because it attempts to use synchronous IPIs despite the fact that it may be run with interrupts disabled. This means that when arch_trigger_cpumask_backtrace() is invoked, for example by the RCU CPU stall watchdog, we may: - Deadlock due to use of synchronous IPIs with interrupts disabled, causing the CPU that's attempting to generate the backtrace output to hang itself. - Not succeed in generating the desired output from remote CPUs. - Produce warnings about this from smp_call_function_many(), for example: [42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks: [42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0 [42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0 [42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33) [42760.568927] ------------[ cut here ]------------ [42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c [42760.587839] Modules linked in: [42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2 [42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8 [42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000 [42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0 [42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0 [42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca [42760.669424] ... [42760.673919] Call Trace: [42760.678672] [<27fde568>] show_stack+0x70/0xf0 [42760.685417] [<84751641>] dump_stack+0xaa/0xd0 [42760.692188] [<699d671c>] __warn+0x80/0x92 [42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36 [42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c [42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a [42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98 [42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac [42760.737476] [<059b3b43>] update_process_times+0x18/0x34 [42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38 [42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50 [42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226 [42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a [42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a [42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168 [42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86 [42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14 [42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.820086] [<c7521934>] do_IRQ+0x16/0x20 [42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94 [42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78 [42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c [42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c [42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98 [42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44 [42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e [42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66 [42760.883907] [<b3fce717>] exit_mmap+0x76/0xea [42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a [42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c [42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e [42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e [42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c [42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c [42760.931765] ---[ end trace 02aa09da9dc52a60 ]--- [42760.938342] ------------[ cut here ]------------ [42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8 ... This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async IPIs & smp_call_function_single_async() in order to resolve this problem. We ensure use of the pre-allocated call_single_data_t structures is serialized by maintaining a cpumask indicating that they're busy, and refusing to attempt to send an IPI when a CPU's bit is set in this mask. This should only happen if a CPU hasn't responded to a previous backtrace IPI - ie. if it's hung - and we print a warning to the console in this case. I've marked this for stable branches as far back as v4.9, to which it applies cleanly. Strictly speaking the faulty MIPS implementation can be traced further back to commit 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel versions v3.19 through v4.8 will require further work to backport due to the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods"). Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19597/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.9+ Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
2018-06-22 17:55:46 +00:00
call_single_data_t *csd;
int cpu;
nmi_backtrace: add more trigger_*_cpu_backtrace() methods Patch series "improvements to the nmi_backtrace code" v9. This patch series modifies the trigger_xxx_backtrace() NMI-based remote backtracing code to make it more flexible, and makes a few small improvements along the way. The motivation comes from the task isolation code, where there are scenarios where we want to be able to diagnose a case where some cpu is about to interrupt a task-isolated cpu. It can be helpful to see both where the interrupting cpu is, and also an approximation of where the cpu that is being interrupted is. The nmi_backtrace framework allows us to discover the stack of the interrupted cpu. I've tested that the change works as desired on tile, and build-tested x86, arm, mips, and sparc64. For x86 I confirmed that the generic cpuidle stuff as well as the architecture-specific routines are in the new cpuidle section. For arm, mips, and sparc I just build-tested it and made sure the generic cpuidle routines were in the new cpuidle section, but I didn't attempt to figure out which the platform-specific idle routines might be. That might be more usefully done by someone with platform experience in follow-up patches. This patch (of 4): Currently you can only request a backtrace of either all cpus, or all cpus but yourself. It can also be helpful to request a remote backtrace of a single cpu, and since we want that, the logical extension is to support a cpumask as the underlying primitive. This change modifies the existing lib/nmi_backtrace.c code to take a cpumask as its basic primitive, and modifies the linux/nmi.h code to use the new "cpumask" method instead. The existing clients of nmi_backtrace (arm and x86) are converted to using the new cpumask approach in this change. The other users of the backtracing API (sparc64 and mips) are converted to use the cpumask approach rather than the all/allbutself approach. The mips code ignored the "include_self" boolean but with this change it will now also dump a local backtrace if requested. Link: http://lkml.kernel.org/r/1472487169-14923-2-git-send-email-cmetcalf@mellanox.com Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com> Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm] Reviewed-by: Aaron Tomlin <atomlin@redhat.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-08 00:02:45 +00:00
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() The current MIPS implementation of arch_trigger_cpumask_backtrace() is broken because it attempts to use synchronous IPIs despite the fact that it may be run with interrupts disabled. This means that when arch_trigger_cpumask_backtrace() is invoked, for example by the RCU CPU stall watchdog, we may: - Deadlock due to use of synchronous IPIs with interrupts disabled, causing the CPU that's attempting to generate the backtrace output to hang itself. - Not succeed in generating the desired output from remote CPUs. - Produce warnings about this from smp_call_function_many(), for example: [42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks: [42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0 [42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0 [42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33) [42760.568927] ------------[ cut here ]------------ [42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c [42760.587839] Modules linked in: [42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2 [42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8 [42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000 [42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0 [42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0 [42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca [42760.669424] ... [42760.673919] Call Trace: [42760.678672] [<27fde568>] show_stack+0x70/0xf0 [42760.685417] [<84751641>] dump_stack+0xaa/0xd0 [42760.692188] [<699d671c>] __warn+0x80/0x92 [42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36 [42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c [42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a [42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98 [42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac [42760.737476] [<059b3b43>] update_process_times+0x18/0x34 [42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38 [42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50 [42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226 [42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a [42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a [42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168 [42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86 [42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14 [42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.820086] [<c7521934>] do_IRQ+0x16/0x20 [42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94 [42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78 [42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c [42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c [42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98 [42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44 [42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e [42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66 [42760.883907] [<b3fce717>] exit_mmap+0x76/0xea [42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a [42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c [42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e [42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e [42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c [42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c [42760.931765] ---[ end trace 02aa09da9dc52a60 ]--- [42760.938342] ------------[ cut here ]------------ [42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8 ... This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async IPIs & smp_call_function_single_async() in order to resolve this problem. We ensure use of the pre-allocated call_single_data_t structures is serialized by maintaining a cpumask indicating that they're busy, and refusing to attempt to send an IPI when a CPU's bit is set in this mask. This should only happen if a CPU hasn't responded to a previous backtrace IPI - ie. if it's hung - and we print a warning to the console in this case. I've marked this for stable branches as far back as v4.9, to which it applies cleanly. Strictly speaking the faulty MIPS implementation can be traced further back to commit 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel versions v3.19 through v4.8 will require further work to backport due to the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods"). Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19597/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.9+ Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
2018-06-22 17:55:46 +00:00
for_each_cpu(cpu, mask) {
/*
* If we previously sent an IPI to the target CPU & it hasn't
* cleared its bit in the busy cpumask then it didn't handle
* our previous IPI & it's not safe for us to reuse the
* call_single_data_t.
*/
if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
cpu);
continue;
}
nmi_backtrace: add more trigger_*_cpu_backtrace() methods Patch series "improvements to the nmi_backtrace code" v9. This patch series modifies the trigger_xxx_backtrace() NMI-based remote backtracing code to make it more flexible, and makes a few small improvements along the way. The motivation comes from the task isolation code, where there are scenarios where we want to be able to diagnose a case where some cpu is about to interrupt a task-isolated cpu. It can be helpful to see both where the interrupting cpu is, and also an approximation of where the cpu that is being interrupted is. The nmi_backtrace framework allows us to discover the stack of the interrupted cpu. I've tested that the change works as desired on tile, and build-tested x86, arm, mips, and sparc64. For x86 I confirmed that the generic cpuidle stuff as well as the architecture-specific routines are in the new cpuidle section. For arm, mips, and sparc I just build-tested it and made sure the generic cpuidle routines were in the new cpuidle section, but I didn't attempt to figure out which the platform-specific idle routines might be. That might be more usefully done by someone with platform experience in follow-up patches. This patch (of 4): Currently you can only request a backtrace of either all cpus, or all cpus but yourself. It can also be helpful to request a remote backtrace of a single cpu, and since we want that, the logical extension is to support a cpumask as the underlying primitive. This change modifies the existing lib/nmi_backtrace.c code to take a cpumask as its basic primitive, and modifies the linux/nmi.h code to use the new "cpumask" method instead. The existing clients of nmi_backtrace (arm and x86) are converted to using the new cpumask approach in this change. The other users of the backtracing API (sparc64 and mips) are converted to use the cpumask approach rather than the all/allbutself approach. The mips code ignored the "include_self" boolean but with this change it will now also dump a local backtrace if requested. Link: http://lkml.kernel.org/r/1472487169-14923-2-git-send-email-cmetcalf@mellanox.com Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com> Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm] Reviewed-by: Aaron Tomlin <atomlin@redhat.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-08 00:02:45 +00:00
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() The current MIPS implementation of arch_trigger_cpumask_backtrace() is broken because it attempts to use synchronous IPIs despite the fact that it may be run with interrupts disabled. This means that when arch_trigger_cpumask_backtrace() is invoked, for example by the RCU CPU stall watchdog, we may: - Deadlock due to use of synchronous IPIs with interrupts disabled, causing the CPU that's attempting to generate the backtrace output to hang itself. - Not succeed in generating the desired output from remote CPUs. - Produce warnings about this from smp_call_function_many(), for example: [42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks: [42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0 [42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0 [42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33) [42760.568927] ------------[ cut here ]------------ [42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c [42760.587839] Modules linked in: [42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2 [42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8 [42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000 [42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0 [42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0 [42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca [42760.669424] ... [42760.673919] Call Trace: [42760.678672] [<27fde568>] show_stack+0x70/0xf0 [42760.685417] [<84751641>] dump_stack+0xaa/0xd0 [42760.692188] [<699d671c>] __warn+0x80/0x92 [42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36 [42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c [42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a [42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98 [42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac [42760.737476] [<059b3b43>] update_process_times+0x18/0x34 [42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38 [42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50 [42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226 [42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a [42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a [42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168 [42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86 [42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14 [42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.820086] [<c7521934>] do_IRQ+0x16/0x20 [42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94 [42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78 [42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c [42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c [42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98 [42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44 [42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e [42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66 [42760.883907] [<b3fce717>] exit_mmap+0x76/0xea [42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a [42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c [42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e [42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e [42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c [42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c [42760.931765] ---[ end trace 02aa09da9dc52a60 ]--- [42760.938342] ------------[ cut here ]------------ [42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8 ... This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async IPIs & smp_call_function_single_async() in order to resolve this problem. We ensure use of the pre-allocated call_single_data_t structures is serialized by maintaining a cpumask indicating that they're busy, and refusing to attempt to send an IPI when a CPU's bit is set in this mask. This should only happen if a CPU hasn't responded to a previous backtrace IPI - ie. if it's hung - and we print a warning to the console in this case. I've marked this for stable branches as far back as v4.9, to which it applies cleanly. Strictly speaking the faulty MIPS implementation can be traced further back to commit 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel versions v3.19 through v4.8 will require further work to backport due to the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods"). Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19597/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.9+ Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
2018-06-22 17:55:46 +00:00
csd = &per_cpu(backtrace_csd, cpu);
smp_call_function_single_async(cpu, csd);
}
}
nmi_backtrace: add more trigger_*_cpu_backtrace() methods Patch series "improvements to the nmi_backtrace code" v9. This patch series modifies the trigger_xxx_backtrace() NMI-based remote backtracing code to make it more flexible, and makes a few small improvements along the way. The motivation comes from the task isolation code, where there are scenarios where we want to be able to diagnose a case where some cpu is about to interrupt a task-isolated cpu. It can be helpful to see both where the interrupting cpu is, and also an approximation of where the cpu that is being interrupted is. The nmi_backtrace framework allows us to discover the stack of the interrupted cpu. I've tested that the change works as desired on tile, and build-tested x86, arm, mips, and sparc64. For x86 I confirmed that the generic cpuidle stuff as well as the architecture-specific routines are in the new cpuidle section. For arm, mips, and sparc I just build-tested it and made sure the generic cpuidle routines were in the new cpuidle section, but I didn't attempt to figure out which the platform-specific idle routines might be. That might be more usefully done by someone with platform experience in follow-up patches. This patch (of 4): Currently you can only request a backtrace of either all cpus, or all cpus but yourself. It can also be helpful to request a remote backtrace of a single cpu, and since we want that, the logical extension is to support a cpumask as the underlying primitive. This change modifies the existing lib/nmi_backtrace.c code to take a cpumask as its basic primitive, and modifies the linux/nmi.h code to use the new "cpumask" method instead. The existing clients of nmi_backtrace (arm and x86) are converted to using the new cpumask approach in this change. The other users of the backtracing API (sparc64 and mips) are converted to use the cpumask approach rather than the all/allbutself approach. The mips code ignored the "include_self" boolean but with this change it will now also dump a local backtrace if requested. Link: http://lkml.kernel.org/r/1472487169-14923-2-git-send-email-cmetcalf@mellanox.com Signed-off-by: Chris Metcalf <cmetcalf@mellanox.com> Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm] Reviewed-by: Aaron Tomlin <atomlin@redhat.com> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@elte.hu> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: David Miller <davem@davemloft.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-10-08 00:02:45 +00:00
MIPS: Use async IPIs for arch_trigger_cpumask_backtrace() The current MIPS implementation of arch_trigger_cpumask_backtrace() is broken because it attempts to use synchronous IPIs despite the fact that it may be run with interrupts disabled. This means that when arch_trigger_cpumask_backtrace() is invoked, for example by the RCU CPU stall watchdog, we may: - Deadlock due to use of synchronous IPIs with interrupts disabled, causing the CPU that's attempting to generate the backtrace output to hang itself. - Not succeed in generating the desired output from remote CPUs. - Produce warnings about this from smp_call_function_many(), for example: [42760.526910] INFO: rcu_sched detected stalls on CPUs/tasks: [42760.535755] 0-...!: (1 GPs behind) idle=ade/140000000000000/0 softirq=526944/526945 fqs=0 [42760.547874] 1-...!: (0 ticks this GP) idle=e4a/140000000000000/0 softirq=547885/547885 fqs=0 [42760.559869] (detected by 2, t=2162 jiffies, g=266689, c=266688, q=33) [42760.568927] ------------[ cut here ]------------ [42760.576146] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:416 smp_call_function_many+0x88/0x20c [42760.587839] Modules linked in: [42760.593152] CPU: 2 PID: 1216 Comm: sh Not tainted 4.15.4-00373-gee058bb4d0c2 #2 [42760.603767] Stack : 8e09bd20 8e09bd20 8e09bd20 fffffff0 00000007 00000006 00000000 8e09bca8 [42760.616937] 95b2b379 95b2b379 807a0080 00000007 81944518 0000018a 00000032 00000000 [42760.630095] 00000000 00000030 80000000 00000000 806eca74 00000009 8017e2b8 000001a0 [42760.643169] 00000000 00000002 00000000 8e09baa4 00000008 808b8008 86d69080 8e09bca0 [42760.656282] 8e09ad50 805e20aa 00000000 00000000 00000000 8017e2b8 00000009 801070ca [42760.669424] ... [42760.673919] Call Trace: [42760.678672] [<27fde568>] show_stack+0x70/0xf0 [42760.685417] [<84751641>] dump_stack+0xaa/0xd0 [42760.692188] [<699d671c>] __warn+0x80/0x92 [42760.698549] [<68915d41>] warn_slowpath_null+0x28/0x36 [42760.705912] [<f7c76c1c>] smp_call_function_many+0x88/0x20c [42760.713696] [<6bbdfc2a>] arch_trigger_cpumask_backtrace+0x30/0x4a [42760.722216] [<f845bd33>] rcu_dump_cpu_stacks+0x6a/0x98 [42760.729580] [<796e7629>] rcu_check_callbacks+0x672/0x6ac [42760.737476] [<059b3b43>] update_process_times+0x18/0x34 [42760.744981] [<6eb94941>] tick_sched_handle.isra.5+0x26/0x38 [42760.752793] [<478d3d70>] tick_sched_timer+0x1c/0x50 [42760.759882] [<e56ea39f>] __hrtimer_run_queues+0xc6/0x226 [42760.767418] [<e88bbcae>] hrtimer_interrupt+0x88/0x19a [42760.775031] [<6765a19e>] gic_compare_interrupt+0x2e/0x3a [42760.782761] [<0558bf5f>] handle_percpu_devid_irq+0x78/0x168 [42760.790795] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.798117] [<1b6d462c>] gic_handle_local_int+0x38/0x86 [42760.805545] [<b2ada1c7>] gic_irq_dispatch+0xa/0x14 [42760.812534] [<90c11ba2>] generic_handle_irq+0x1e/0x2c [42760.820086] [<c7521934>] do_IRQ+0x16/0x20 [42760.826274] [<9aef3ce6>] plat_irq_dispatch+0x62/0x94 [42760.833458] [<6a94b53c>] except_vec_vi_end+0x70/0x78 [42760.840655] [<22284043>] smp_call_function_many+0x1ba/0x20c [42760.848501] [<54022b58>] smp_call_function+0x1e/0x2c [42760.855693] [<ab9fc705>] flush_tlb_mm+0x2a/0x98 [42760.862730] [<0844cdd0>] tlb_flush_mmu+0x1c/0x44 [42760.869628] [<cb259b74>] arch_tlb_finish_mmu+0x26/0x3e [42760.877021] [<1aeaaf74>] tlb_finish_mmu+0x18/0x66 [42760.883907] [<b3fce717>] exit_mmap+0x76/0xea [42760.890428] [<c4c8a2f6>] mmput+0x80/0x11a [42760.896632] [<a41a08f4>] do_exit+0x1f4/0x80c [42760.903158] [<ee01cef6>] do_group_exit+0x20/0x7e [42760.909990] [<13fa8d54>] __wake_up_parent+0x0/0x1e [42760.917045] [<46cf89d0>] smp_call_function_many+0x1a2/0x20c [42760.924893] [<8c21a93b>] syscall_common+0x14/0x1c [42760.931765] ---[ end trace 02aa09da9dc52a60 ]--- [42760.938342] ------------[ cut here ]------------ [42760.945311] WARNING: CPU: 2 PID: 1216 at kernel/smp.c:291 smp_call_function_single+0xee/0xf8 ... This patch switches MIPS' arch_trigger_cpumask_backtrace() to use async IPIs & smp_call_function_single_async() in order to resolve this problem. We ensure use of the pre-allocated call_single_data_t structures is serialized by maintaining a cpumask indicating that they're busy, and refusing to attempt to send an IPI when a CPU's bit is set in this mask. This should only happen if a CPU hasn't responded to a previous backtrace IPI - ie. if it's hung - and we print a warning to the console in this case. I've marked this for stable branches as far back as v4.9, to which it applies cleanly. Strictly speaking the faulty MIPS implementation can be traced further back to commit 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") in v3.19, but kernel versions v3.19 through v4.8 will require further work to backport due to the rework performed in commit 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods"). Signed-off-by: Paul Burton <paul.burton@mips.com> Patchwork: https://patchwork.linux-mips.org/patch/19597/ Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org # v4.9+ Fixes: 856839b76836 ("MIPS: Add arch_trigger_all_cpu_backtrace() function") Fixes: 9a01c3ed5cdb ("nmi_backtrace: add more trigger_*_cpu_backtrace() methods")
2018-06-22 17:55:46 +00:00
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
}
2015-01-08 12:17:37 +00:00
int mips_get_process_fp_mode(struct task_struct *task)
{
int value = 0;
if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
value |= PR_FP_MODE_FR;
if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
value |= PR_FP_MODE_FRE;
return value;
}
MIPS: Schedule on CPUs we need to lose FPU for a mode switch Commit 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") ensures that we react to PR_SET_FP_MODE prctl syscalls quickly by broadcasting an IPI in order to cause CPUs to lose FPU access when necessary. Whilst it achieves that, unfortunately it causes all sorts of strange race conditions because: 1) The IPI may arrive at a point where the FPU is in the process of being enabled, but that process is not yet complete leading to a state we aren't prepared to handle. For example: [ 370.215903] do_cpu invoked from kernel context![#1]: [ 370.221064] CPU: 0 PID: 963 Comm: fp-prctl Not tainted 4.9.0-rc5-00323-g210db32-dirty #226 [ 370.229420] task: a8000000fd672e00 task.stack: a8000000fd630000 [ 370.235399] $ 0 : 0000000000000000 0000000000000001 0000000000000001 a8000000fd630000 [ 370.243882] $ 4 : a8000000fd672e00 0000000000000000 0000000000000453 0000000000000000 [ 370.252317] $ 8 : 0000000000000000 a8000000fd637c28 1000000000000000 0000000000000010 [ 370.260753] $12 : 00000000140084e0 ffffffff80109c00 0000000000000000 0000000000000002 [ 370.269179] $16 : ffffffff8092f080 a8000000fd672e00 ffffffff80107fe8 a8000000fd485000 [ 370.277612] $20 : ffffffff8084d328 ffffffff80940000 0000000000000009 ffffffff80930000 [ 370.286038] $24 : 0000000000000000 900000001612048c [ 370.294476] $28 : a8000000fd630000 a8000000fd637ac0 ffffffff80937300 ffffffff8010807c [ 370.302909] Hi : 0000000000000000 [ 370.306595] Lo : 0000000000000200 [ 370.310376] epc : ffffffff80115d38 _save_fp+0x10/0xa0 [ 370.315784] ra : ffffffff8010807c prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.322707] Status: 140084e2 KX SX UX KERNEL EXL [ 370.327980] Cause : 1080002c (ExcCode 0b) [ 370.332091] PrId : 0001a428 (MIPS P6600) [ 370.336179] Modules linked in: [ 370.339486] Process fp-prctl (pid: 963, threadinfo=a8000000fd630000, task=a8000000fd672e00, tls=00000000756e67d0) [ 370.349724] Stack : 0000000000000000 a8000000fd557dc0 0000000000000000 ffffffff801ca8e0 [ 370.358161] 0000000000000000 a8000000fd637b9c 0000000000000009 ffffffff80923780 [ 370.366575] ffffffff80850000 ffffffff8011610c 00000000000000b8 ffffffff801a5084 [ 370.374989] ffffffff8084a370 ffffffff8084a388 ffffffff80923780 ffffffff80923828 [ 370.383395] 0000000000010000 ffffffff809237a8 0000000000020000 ffffffff80a40000 [ 370.391817] 000000000000007c 00000000004a0000 00000000756dedd0 ffffffff801a5188 [ 370.400230] a800000002014900 0000000000000001 ffffffff80923780 0000000080923828 [ 370.408644] ffffffff80923780 ffffffff80923780 ffffffff80923828 ffffffff801a521c [ 370.417066] ffffffff80923780 ffffffff80923828 0000000000010000 ffffffff801a8f84 [ 370.425472] ffffffff80a40000 a8000000fd637c20 ffffffff80a39240 0000000000000001 [ 370.433885] ... [ 370.436562] Call Trace: [ 370.439222] [<ffffffff80115d38>] _save_fp+0x10/0xa0 [ 370.444305] [<ffffffff8010807c>] prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.451035] [<ffffffff801ca8e0>] flush_smp_call_function_queue+0xf8/0x230 [ 370.457991] [<ffffffff8011610c>] ipi_call_interrupt+0xc/0x20 [ 370.463814] [<ffffffff801a5084>] __handle_irq_event_percpu+0xc4/0x1a8 [ 370.470404] [<ffffffff801a5188>] handle_irq_event_percpu+0x20/0x68 [ 370.476734] [<ffffffff801a521c>] handle_irq_event+0x4c/0x88 [ 370.482486] [<ffffffff801a8f84>] handle_edge_irq+0x12c/0x210 [ 370.488316] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.494280] [<ffffffff804a2dbc>] gic_handle_shared_int+0x194/0x268 [ 370.500616] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.506529] [<ffffffff80107e60>] do_IRQ+0x18/0x28 [ 370.511445] [<ffffffff804a1524>] plat_irq_dispatch+0xc4/0x140 [ 370.517339] [<ffffffff80106230>] ret_from_irq+0x0/0x4 [ 370.522583] [<ffffffff8010fad4>] do_ri+0x4fc/0x7e8 [ 370.527546] [<ffffffff80106220>] ret_from_exception+0x0/0x10 2) The IPI may arrive during kernel use of the FPU, since we generally only disable preemption around use of the FPU & leave interrupts enabled. This can lead to us unexpectedly losing access to the FPU in places where it previously had not been possible. For example: do_cpu invoked from kernel context![#2]: CPU: 2 PID: 7338 Comm: fp-prctl Tainted: G D 4.7.0-00424-g49b0c82 #2 task: 838e4000 ti: 88d38000 task.ti: 88d38000 $ 0 : 00000000 00000001 ffffffff 88d3fef8 $ 4 : 838e4000 88d38004 00000000 00000001 $ 8 : 3400fc01 801f8020 808e9100 24000000 $12 : dbffffff 807b69d8 807b0000 00000000 $16 : 00000000 80786150 00400fc4 809c0398 $20 : 809c0338 0040273c 88d3ff28 808e9d30 $24 : 808e9d30 00400fb4 $28 : 88d38000 88d3fe88 00000000 8011a2ac Hi : 0040273c Lo : 88d3ff28 epc : 80114178 _restore_fp+0x10/0xa0 ra : 8011a2ac mipsr2_decoder+0xd5c/0x1660 Status: 1400fc03 KERNEL EXL IE Cause : 1080002c (ExcCode 0b) PrId : 0001a920 (MIPS I6400) Modules linked in: Process fp-prctl (pid: 7338, threadinfo=88d38000, task=838e4000, tls=766527d0) Stack : 00000000 00000000 00000000 88d3fe98 00000000 00000000 809c0398 809c0338 808e9100 00000000 88d3ff28 00400fc4 00400fc4 0040273c 7fb69e18 004a0000 004a0000 004a0000 7664add0 8010de18 00000000 00000000 88d3fef8 88d3ff28 808e9100 00000000 766527d0 8010e534 000c0000 85755000 8181d580 00000000 00000000 00000000 004a0000 00000000 766527d0 7fb69e18 004a0000 80105c20 ... Call Trace: [<80114178>] _restore_fp+0x10/0xa0 [<8011a2ac>] mipsr2_decoder+0xd5c/0x1660 [<8010de18>] do_ri+0x90/0x6b8 [<80105c20>] ret_from_exception+0x0/0x10 At first glance a simple fix may seem to be to disable interrupts around kernel use of the FPU rather than merely preemption, however this would introduce further overhead outside of the mode switch path & doesn't solve the third problem: 3) The IPI may arrive whilst the kernel is running code that will lead to a preempt_disable() call & FPU usage soon. If this happens then the IPI will be serviced & we'll proceed to enable an FPU whilst the mode switch is in progress, leading to strange & inconsistent behaviour. Further to all of this is a separate but related problem: 4) There are various paths through which we may enable the FPU without the user having triggered a coprocessor 1 disabled exception. These paths are those in which we emulate instructions & then enable the FPU with the expectation that the user might execute an FP instruction shortly afterwards. However these paths have not previously checked whether an FP mode switch is underway for the task, and therefore could enable the FPU whilst such a mode switch is in progress leading to strange & inconsistent behaviour for user code. This patch fixes all of the above by taking a step back & re-examining our approach to FP mode switches. Up until now we have taken these basic steps: a) Prevent any threads that are part of the affected process from being able to obtain ownership of the FPU. b) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. c) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. d) Allow threads to obtain ownership of the FPU again. This approach is however more complex than necessary. All that we really require is that the mode switch has occurred for all threads that are part of the affected process before mips_set_process_fp_mode(), and thus the PR_SET_FP_MODE prctl() syscall, returns. This doesn't require that we stop threads from owning or using an FPU whilst a mode switch occurs, only that we force them to relinquish it after the mode switch has occurred such that they next own an FPU with the correct mode configured. Our basic steps therefore simplify to: A) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. B) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. We implement B) by forcing each CPU which might be running a thread which is part of the affected process to schedule a no-op function, which causes the affected thread to lose its FPU ownership when it is descheduled. The end result is simpler FP mode switching with less overhead in the FPU enable path (ie. enable_restore_fp_context()) and fewer moving parts. Signed-off-by: Paul Burton <paul.burton@mips.com> Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Fixes: 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: stable <stable@vger.kernel.org> # v4.0+
2017-12-19 23:11:08 +00:00
static long prepare_for_fp_mode_switch(void *unused)
MIPS: Force CPUs to lose FP context during mode switches Commit 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") added support for the PR_SET_FP_MODE prctl, which allows a userland program to modify its FP mode at runtime. This is most notably required if dynamic linking leads to the FP mode requirement changing at runtime from that indicated in the initial executable's ELF header. In order to avoid overhead in the general FP context restore code, it aimed to have threads in the process become unable to enable the FPU during a mode switch & have the thread calling the prctl syscall wait for all other threads in the process to be context switched at least once. Once that happens we can know that no thread in the process whose mode will be switched has live FP context, and it's safe to perform the mode switch. However in the (rare) case of modeswitches occurring in multithreaded programs this can lead to indeterminate delays for the thread invoking the prctl syscall, and the code monitoring for those context switches was woefully inadequate for all but the simplest cases. Fix this by broadcasting an IPI if other CPUs may have live FP context for an affected thread, with a handler causing those CPUs to relinquish their FPU ownership. Threads will then be allowed to continue running but will stall on the wait_on_atomic_t in enable_restore_fp_context if they attempt to use FP again whilst the mode switch is still in progress. The end result is less fragile poking at scheduler context switch counts & a more expedient completion of the mode switch. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Reviewed-by: Maciej W. Rozycki <macro@imgtec.com> Cc: Adam Buchbinder <adam.buchbinder@gmail.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: stable <stable@vger.kernel.org> # v4.0+ Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/13145/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-04-21 11:43:58 +00:00
{
MIPS: Schedule on CPUs we need to lose FPU for a mode switch Commit 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") ensures that we react to PR_SET_FP_MODE prctl syscalls quickly by broadcasting an IPI in order to cause CPUs to lose FPU access when necessary. Whilst it achieves that, unfortunately it causes all sorts of strange race conditions because: 1) The IPI may arrive at a point where the FPU is in the process of being enabled, but that process is not yet complete leading to a state we aren't prepared to handle. For example: [ 370.215903] do_cpu invoked from kernel context![#1]: [ 370.221064] CPU: 0 PID: 963 Comm: fp-prctl Not tainted 4.9.0-rc5-00323-g210db32-dirty #226 [ 370.229420] task: a8000000fd672e00 task.stack: a8000000fd630000 [ 370.235399] $ 0 : 0000000000000000 0000000000000001 0000000000000001 a8000000fd630000 [ 370.243882] $ 4 : a8000000fd672e00 0000000000000000 0000000000000453 0000000000000000 [ 370.252317] $ 8 : 0000000000000000 a8000000fd637c28 1000000000000000 0000000000000010 [ 370.260753] $12 : 00000000140084e0 ffffffff80109c00 0000000000000000 0000000000000002 [ 370.269179] $16 : ffffffff8092f080 a8000000fd672e00 ffffffff80107fe8 a8000000fd485000 [ 370.277612] $20 : ffffffff8084d328 ffffffff80940000 0000000000000009 ffffffff80930000 [ 370.286038] $24 : 0000000000000000 900000001612048c [ 370.294476] $28 : a8000000fd630000 a8000000fd637ac0 ffffffff80937300 ffffffff8010807c [ 370.302909] Hi : 0000000000000000 [ 370.306595] Lo : 0000000000000200 [ 370.310376] epc : ffffffff80115d38 _save_fp+0x10/0xa0 [ 370.315784] ra : ffffffff8010807c prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.322707] Status: 140084e2 KX SX UX KERNEL EXL [ 370.327980] Cause : 1080002c (ExcCode 0b) [ 370.332091] PrId : 0001a428 (MIPS P6600) [ 370.336179] Modules linked in: [ 370.339486] Process fp-prctl (pid: 963, threadinfo=a8000000fd630000, task=a8000000fd672e00, tls=00000000756e67d0) [ 370.349724] Stack : 0000000000000000 a8000000fd557dc0 0000000000000000 ffffffff801ca8e0 [ 370.358161] 0000000000000000 a8000000fd637b9c 0000000000000009 ffffffff80923780 [ 370.366575] ffffffff80850000 ffffffff8011610c 00000000000000b8 ffffffff801a5084 [ 370.374989] ffffffff8084a370 ffffffff8084a388 ffffffff80923780 ffffffff80923828 [ 370.383395] 0000000000010000 ffffffff809237a8 0000000000020000 ffffffff80a40000 [ 370.391817] 000000000000007c 00000000004a0000 00000000756dedd0 ffffffff801a5188 [ 370.400230] a800000002014900 0000000000000001 ffffffff80923780 0000000080923828 [ 370.408644] ffffffff80923780 ffffffff80923780 ffffffff80923828 ffffffff801a521c [ 370.417066] ffffffff80923780 ffffffff80923828 0000000000010000 ffffffff801a8f84 [ 370.425472] ffffffff80a40000 a8000000fd637c20 ffffffff80a39240 0000000000000001 [ 370.433885] ... [ 370.436562] Call Trace: [ 370.439222] [<ffffffff80115d38>] _save_fp+0x10/0xa0 [ 370.444305] [<ffffffff8010807c>] prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.451035] [<ffffffff801ca8e0>] flush_smp_call_function_queue+0xf8/0x230 [ 370.457991] [<ffffffff8011610c>] ipi_call_interrupt+0xc/0x20 [ 370.463814] [<ffffffff801a5084>] __handle_irq_event_percpu+0xc4/0x1a8 [ 370.470404] [<ffffffff801a5188>] handle_irq_event_percpu+0x20/0x68 [ 370.476734] [<ffffffff801a521c>] handle_irq_event+0x4c/0x88 [ 370.482486] [<ffffffff801a8f84>] handle_edge_irq+0x12c/0x210 [ 370.488316] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.494280] [<ffffffff804a2dbc>] gic_handle_shared_int+0x194/0x268 [ 370.500616] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.506529] [<ffffffff80107e60>] do_IRQ+0x18/0x28 [ 370.511445] [<ffffffff804a1524>] plat_irq_dispatch+0xc4/0x140 [ 370.517339] [<ffffffff80106230>] ret_from_irq+0x0/0x4 [ 370.522583] [<ffffffff8010fad4>] do_ri+0x4fc/0x7e8 [ 370.527546] [<ffffffff80106220>] ret_from_exception+0x0/0x10 2) The IPI may arrive during kernel use of the FPU, since we generally only disable preemption around use of the FPU & leave interrupts enabled. This can lead to us unexpectedly losing access to the FPU in places where it previously had not been possible. For example: do_cpu invoked from kernel context![#2]: CPU: 2 PID: 7338 Comm: fp-prctl Tainted: G D 4.7.0-00424-g49b0c82 #2 task: 838e4000 ti: 88d38000 task.ti: 88d38000 $ 0 : 00000000 00000001 ffffffff 88d3fef8 $ 4 : 838e4000 88d38004 00000000 00000001 $ 8 : 3400fc01 801f8020 808e9100 24000000 $12 : dbffffff 807b69d8 807b0000 00000000 $16 : 00000000 80786150 00400fc4 809c0398 $20 : 809c0338 0040273c 88d3ff28 808e9d30 $24 : 808e9d30 00400fb4 $28 : 88d38000 88d3fe88 00000000 8011a2ac Hi : 0040273c Lo : 88d3ff28 epc : 80114178 _restore_fp+0x10/0xa0 ra : 8011a2ac mipsr2_decoder+0xd5c/0x1660 Status: 1400fc03 KERNEL EXL IE Cause : 1080002c (ExcCode 0b) PrId : 0001a920 (MIPS I6400) Modules linked in: Process fp-prctl (pid: 7338, threadinfo=88d38000, task=838e4000, tls=766527d0) Stack : 00000000 00000000 00000000 88d3fe98 00000000 00000000 809c0398 809c0338 808e9100 00000000 88d3ff28 00400fc4 00400fc4 0040273c 7fb69e18 004a0000 004a0000 004a0000 7664add0 8010de18 00000000 00000000 88d3fef8 88d3ff28 808e9100 00000000 766527d0 8010e534 000c0000 85755000 8181d580 00000000 00000000 00000000 004a0000 00000000 766527d0 7fb69e18 004a0000 80105c20 ... Call Trace: [<80114178>] _restore_fp+0x10/0xa0 [<8011a2ac>] mipsr2_decoder+0xd5c/0x1660 [<8010de18>] do_ri+0x90/0x6b8 [<80105c20>] ret_from_exception+0x0/0x10 At first glance a simple fix may seem to be to disable interrupts around kernel use of the FPU rather than merely preemption, however this would introduce further overhead outside of the mode switch path & doesn't solve the third problem: 3) The IPI may arrive whilst the kernel is running code that will lead to a preempt_disable() call & FPU usage soon. If this happens then the IPI will be serviced & we'll proceed to enable an FPU whilst the mode switch is in progress, leading to strange & inconsistent behaviour. Further to all of this is a separate but related problem: 4) There are various paths through which we may enable the FPU without the user having triggered a coprocessor 1 disabled exception. These paths are those in which we emulate instructions & then enable the FPU with the expectation that the user might execute an FP instruction shortly afterwards. However these paths have not previously checked whether an FP mode switch is underway for the task, and therefore could enable the FPU whilst such a mode switch is in progress leading to strange & inconsistent behaviour for user code. This patch fixes all of the above by taking a step back & re-examining our approach to FP mode switches. Up until now we have taken these basic steps: a) Prevent any threads that are part of the affected process from being able to obtain ownership of the FPU. b) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. c) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. d) Allow threads to obtain ownership of the FPU again. This approach is however more complex than necessary. All that we really require is that the mode switch has occurred for all threads that are part of the affected process before mips_set_process_fp_mode(), and thus the PR_SET_FP_MODE prctl() syscall, returns. This doesn't require that we stop threads from owning or using an FPU whilst a mode switch occurs, only that we force them to relinquish it after the mode switch has occurred such that they next own an FPU with the correct mode configured. Our basic steps therefore simplify to: A) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. B) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. We implement B) by forcing each CPU which might be running a thread which is part of the affected process to schedule a no-op function, which causes the affected thread to lose its FPU ownership when it is descheduled. The end result is simpler FP mode switching with less overhead in the FPU enable path (ie. enable_restore_fp_context()) and fewer moving parts. Signed-off-by: Paul Burton <paul.burton@mips.com> Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Fixes: 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: stable <stable@vger.kernel.org> # v4.0+
2017-12-19 23:11:08 +00:00
/*
* This is icky, but we use this to simply ensure that all CPUs have
* context switched, regardless of whether they were previously running
* kernel or user code. This ensures that no CPU that a mode-switching
* program may execute on keeps its FPU enabled (& in the old mode)
* throughout the mode switch.
MIPS: Schedule on CPUs we need to lose FPU for a mode switch Commit 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") ensures that we react to PR_SET_FP_MODE prctl syscalls quickly by broadcasting an IPI in order to cause CPUs to lose FPU access when necessary. Whilst it achieves that, unfortunately it causes all sorts of strange race conditions because: 1) The IPI may arrive at a point where the FPU is in the process of being enabled, but that process is not yet complete leading to a state we aren't prepared to handle. For example: [ 370.215903] do_cpu invoked from kernel context![#1]: [ 370.221064] CPU: 0 PID: 963 Comm: fp-prctl Not tainted 4.9.0-rc5-00323-g210db32-dirty #226 [ 370.229420] task: a8000000fd672e00 task.stack: a8000000fd630000 [ 370.235399] $ 0 : 0000000000000000 0000000000000001 0000000000000001 a8000000fd630000 [ 370.243882] $ 4 : a8000000fd672e00 0000000000000000 0000000000000453 0000000000000000 [ 370.252317] $ 8 : 0000000000000000 a8000000fd637c28 1000000000000000 0000000000000010 [ 370.260753] $12 : 00000000140084e0 ffffffff80109c00 0000000000000000 0000000000000002 [ 370.269179] $16 : ffffffff8092f080 a8000000fd672e00 ffffffff80107fe8 a8000000fd485000 [ 370.277612] $20 : ffffffff8084d328 ffffffff80940000 0000000000000009 ffffffff80930000 [ 370.286038] $24 : 0000000000000000 900000001612048c [ 370.294476] $28 : a8000000fd630000 a8000000fd637ac0 ffffffff80937300 ffffffff8010807c [ 370.302909] Hi : 0000000000000000 [ 370.306595] Lo : 0000000000000200 [ 370.310376] epc : ffffffff80115d38 _save_fp+0x10/0xa0 [ 370.315784] ra : ffffffff8010807c prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.322707] Status: 140084e2 KX SX UX KERNEL EXL [ 370.327980] Cause : 1080002c (ExcCode 0b) [ 370.332091] PrId : 0001a428 (MIPS P6600) [ 370.336179] Modules linked in: [ 370.339486] Process fp-prctl (pid: 963, threadinfo=a8000000fd630000, task=a8000000fd672e00, tls=00000000756e67d0) [ 370.349724] Stack : 0000000000000000 a8000000fd557dc0 0000000000000000 ffffffff801ca8e0 [ 370.358161] 0000000000000000 a8000000fd637b9c 0000000000000009 ffffffff80923780 [ 370.366575] ffffffff80850000 ffffffff8011610c 00000000000000b8 ffffffff801a5084 [ 370.374989] ffffffff8084a370 ffffffff8084a388 ffffffff80923780 ffffffff80923828 [ 370.383395] 0000000000010000 ffffffff809237a8 0000000000020000 ffffffff80a40000 [ 370.391817] 000000000000007c 00000000004a0000 00000000756dedd0 ffffffff801a5188 [ 370.400230] a800000002014900 0000000000000001 ffffffff80923780 0000000080923828 [ 370.408644] ffffffff80923780 ffffffff80923780 ffffffff80923828 ffffffff801a521c [ 370.417066] ffffffff80923780 ffffffff80923828 0000000000010000 ffffffff801a8f84 [ 370.425472] ffffffff80a40000 a8000000fd637c20 ffffffff80a39240 0000000000000001 [ 370.433885] ... [ 370.436562] Call Trace: [ 370.439222] [<ffffffff80115d38>] _save_fp+0x10/0xa0 [ 370.444305] [<ffffffff8010807c>] prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.451035] [<ffffffff801ca8e0>] flush_smp_call_function_queue+0xf8/0x230 [ 370.457991] [<ffffffff8011610c>] ipi_call_interrupt+0xc/0x20 [ 370.463814] [<ffffffff801a5084>] __handle_irq_event_percpu+0xc4/0x1a8 [ 370.470404] [<ffffffff801a5188>] handle_irq_event_percpu+0x20/0x68 [ 370.476734] [<ffffffff801a521c>] handle_irq_event+0x4c/0x88 [ 370.482486] [<ffffffff801a8f84>] handle_edge_irq+0x12c/0x210 [ 370.488316] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.494280] [<ffffffff804a2dbc>] gic_handle_shared_int+0x194/0x268 [ 370.500616] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.506529] [<ffffffff80107e60>] do_IRQ+0x18/0x28 [ 370.511445] [<ffffffff804a1524>] plat_irq_dispatch+0xc4/0x140 [ 370.517339] [<ffffffff80106230>] ret_from_irq+0x0/0x4 [ 370.522583] [<ffffffff8010fad4>] do_ri+0x4fc/0x7e8 [ 370.527546] [<ffffffff80106220>] ret_from_exception+0x0/0x10 2) The IPI may arrive during kernel use of the FPU, since we generally only disable preemption around use of the FPU & leave interrupts enabled. This can lead to us unexpectedly losing access to the FPU in places where it previously had not been possible. For example: do_cpu invoked from kernel context![#2]: CPU: 2 PID: 7338 Comm: fp-prctl Tainted: G D 4.7.0-00424-g49b0c82 #2 task: 838e4000 ti: 88d38000 task.ti: 88d38000 $ 0 : 00000000 00000001 ffffffff 88d3fef8 $ 4 : 838e4000 88d38004 00000000 00000001 $ 8 : 3400fc01 801f8020 808e9100 24000000 $12 : dbffffff 807b69d8 807b0000 00000000 $16 : 00000000 80786150 00400fc4 809c0398 $20 : 809c0338 0040273c 88d3ff28 808e9d30 $24 : 808e9d30 00400fb4 $28 : 88d38000 88d3fe88 00000000 8011a2ac Hi : 0040273c Lo : 88d3ff28 epc : 80114178 _restore_fp+0x10/0xa0 ra : 8011a2ac mipsr2_decoder+0xd5c/0x1660 Status: 1400fc03 KERNEL EXL IE Cause : 1080002c (ExcCode 0b) PrId : 0001a920 (MIPS I6400) Modules linked in: Process fp-prctl (pid: 7338, threadinfo=88d38000, task=838e4000, tls=766527d0) Stack : 00000000 00000000 00000000 88d3fe98 00000000 00000000 809c0398 809c0338 808e9100 00000000 88d3ff28 00400fc4 00400fc4 0040273c 7fb69e18 004a0000 004a0000 004a0000 7664add0 8010de18 00000000 00000000 88d3fef8 88d3ff28 808e9100 00000000 766527d0 8010e534 000c0000 85755000 8181d580 00000000 00000000 00000000 004a0000 00000000 766527d0 7fb69e18 004a0000 80105c20 ... Call Trace: [<80114178>] _restore_fp+0x10/0xa0 [<8011a2ac>] mipsr2_decoder+0xd5c/0x1660 [<8010de18>] do_ri+0x90/0x6b8 [<80105c20>] ret_from_exception+0x0/0x10 At first glance a simple fix may seem to be to disable interrupts around kernel use of the FPU rather than merely preemption, however this would introduce further overhead outside of the mode switch path & doesn't solve the third problem: 3) The IPI may arrive whilst the kernel is running code that will lead to a preempt_disable() call & FPU usage soon. If this happens then the IPI will be serviced & we'll proceed to enable an FPU whilst the mode switch is in progress, leading to strange & inconsistent behaviour. Further to all of this is a separate but related problem: 4) There are various paths through which we may enable the FPU without the user having triggered a coprocessor 1 disabled exception. These paths are those in which we emulate instructions & then enable the FPU with the expectation that the user might execute an FP instruction shortly afterwards. However these paths have not previously checked whether an FP mode switch is underway for the task, and therefore could enable the FPU whilst such a mode switch is in progress leading to strange & inconsistent behaviour for user code. This patch fixes all of the above by taking a step back & re-examining our approach to FP mode switches. Up until now we have taken these basic steps: a) Prevent any threads that are part of the affected process from being able to obtain ownership of the FPU. b) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. c) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. d) Allow threads to obtain ownership of the FPU again. This approach is however more complex than necessary. All that we really require is that the mode switch has occurred for all threads that are part of the affected process before mips_set_process_fp_mode(), and thus the PR_SET_FP_MODE prctl() syscall, returns. This doesn't require that we stop threads from owning or using an FPU whilst a mode switch occurs, only that we force them to relinquish it after the mode switch has occurred such that they next own an FPU with the correct mode configured. Our basic steps therefore simplify to: A) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. B) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. We implement B) by forcing each CPU which might be running a thread which is part of the affected process to schedule a no-op function, which causes the affected thread to lose its FPU ownership when it is descheduled. The end result is simpler FP mode switching with less overhead in the FPU enable path (ie. enable_restore_fp_context()) and fewer moving parts. Signed-off-by: Paul Burton <paul.burton@mips.com> Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Fixes: 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: stable <stable@vger.kernel.org> # v4.0+
2017-12-19 23:11:08 +00:00
*/
return 0;
MIPS: Force CPUs to lose FP context during mode switches Commit 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") added support for the PR_SET_FP_MODE prctl, which allows a userland program to modify its FP mode at runtime. This is most notably required if dynamic linking leads to the FP mode requirement changing at runtime from that indicated in the initial executable's ELF header. In order to avoid overhead in the general FP context restore code, it aimed to have threads in the process become unable to enable the FPU during a mode switch & have the thread calling the prctl syscall wait for all other threads in the process to be context switched at least once. Once that happens we can know that no thread in the process whose mode will be switched has live FP context, and it's safe to perform the mode switch. However in the (rare) case of modeswitches occurring in multithreaded programs this can lead to indeterminate delays for the thread invoking the prctl syscall, and the code monitoring for those context switches was woefully inadequate for all but the simplest cases. Fix this by broadcasting an IPI if other CPUs may have live FP context for an affected thread, with a handler causing those CPUs to relinquish their FPU ownership. Threads will then be allowed to continue running but will stall on the wait_on_atomic_t in enable_restore_fp_context if they attempt to use FP again whilst the mode switch is still in progress. The end result is less fragile poking at scheduler context switch counts & a more expedient completion of the mode switch. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Reviewed-by: Maciej W. Rozycki <macro@imgtec.com> Cc: Adam Buchbinder <adam.buchbinder@gmail.com> Cc: James Hogan <james.hogan@imgtec.com> Cc: stable <stable@vger.kernel.org> # v4.0+ Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/13145/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-04-21 11:43:58 +00:00
}
2015-01-08 12:17:37 +00:00
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
struct task_struct *t;
MIPS: Schedule on CPUs we need to lose FPU for a mode switch Commit 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") ensures that we react to PR_SET_FP_MODE prctl syscalls quickly by broadcasting an IPI in order to cause CPUs to lose FPU access when necessary. Whilst it achieves that, unfortunately it causes all sorts of strange race conditions because: 1) The IPI may arrive at a point where the FPU is in the process of being enabled, but that process is not yet complete leading to a state we aren't prepared to handle. For example: [ 370.215903] do_cpu invoked from kernel context![#1]: [ 370.221064] CPU: 0 PID: 963 Comm: fp-prctl Not tainted 4.9.0-rc5-00323-g210db32-dirty #226 [ 370.229420] task: a8000000fd672e00 task.stack: a8000000fd630000 [ 370.235399] $ 0 : 0000000000000000 0000000000000001 0000000000000001 a8000000fd630000 [ 370.243882] $ 4 : a8000000fd672e00 0000000000000000 0000000000000453 0000000000000000 [ 370.252317] $ 8 : 0000000000000000 a8000000fd637c28 1000000000000000 0000000000000010 [ 370.260753] $12 : 00000000140084e0 ffffffff80109c00 0000000000000000 0000000000000002 [ 370.269179] $16 : ffffffff8092f080 a8000000fd672e00 ffffffff80107fe8 a8000000fd485000 [ 370.277612] $20 : ffffffff8084d328 ffffffff80940000 0000000000000009 ffffffff80930000 [ 370.286038] $24 : 0000000000000000 900000001612048c [ 370.294476] $28 : a8000000fd630000 a8000000fd637ac0 ffffffff80937300 ffffffff8010807c [ 370.302909] Hi : 0000000000000000 [ 370.306595] Lo : 0000000000000200 [ 370.310376] epc : ffffffff80115d38 _save_fp+0x10/0xa0 [ 370.315784] ra : ffffffff8010807c prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.322707] Status: 140084e2 KX SX UX KERNEL EXL [ 370.327980] Cause : 1080002c (ExcCode 0b) [ 370.332091] PrId : 0001a428 (MIPS P6600) [ 370.336179] Modules linked in: [ 370.339486] Process fp-prctl (pid: 963, threadinfo=a8000000fd630000, task=a8000000fd672e00, tls=00000000756e67d0) [ 370.349724] Stack : 0000000000000000 a8000000fd557dc0 0000000000000000 ffffffff801ca8e0 [ 370.358161] 0000000000000000 a8000000fd637b9c 0000000000000009 ffffffff80923780 [ 370.366575] ffffffff80850000 ffffffff8011610c 00000000000000b8 ffffffff801a5084 [ 370.374989] ffffffff8084a370 ffffffff8084a388 ffffffff80923780 ffffffff80923828 [ 370.383395] 0000000000010000 ffffffff809237a8 0000000000020000 ffffffff80a40000 [ 370.391817] 000000000000007c 00000000004a0000 00000000756dedd0 ffffffff801a5188 [ 370.400230] a800000002014900 0000000000000001 ffffffff80923780 0000000080923828 [ 370.408644] ffffffff80923780 ffffffff80923780 ffffffff80923828 ffffffff801a521c [ 370.417066] ffffffff80923780 ffffffff80923828 0000000000010000 ffffffff801a8f84 [ 370.425472] ffffffff80a40000 a8000000fd637c20 ffffffff80a39240 0000000000000001 [ 370.433885] ... [ 370.436562] Call Trace: [ 370.439222] [<ffffffff80115d38>] _save_fp+0x10/0xa0 [ 370.444305] [<ffffffff8010807c>] prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.451035] [<ffffffff801ca8e0>] flush_smp_call_function_queue+0xf8/0x230 [ 370.457991] [<ffffffff8011610c>] ipi_call_interrupt+0xc/0x20 [ 370.463814] [<ffffffff801a5084>] __handle_irq_event_percpu+0xc4/0x1a8 [ 370.470404] [<ffffffff801a5188>] handle_irq_event_percpu+0x20/0x68 [ 370.476734] [<ffffffff801a521c>] handle_irq_event+0x4c/0x88 [ 370.482486] [<ffffffff801a8f84>] handle_edge_irq+0x12c/0x210 [ 370.488316] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.494280] [<ffffffff804a2dbc>] gic_handle_shared_int+0x194/0x268 [ 370.500616] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.506529] [<ffffffff80107e60>] do_IRQ+0x18/0x28 [ 370.511445] [<ffffffff804a1524>] plat_irq_dispatch+0xc4/0x140 [ 370.517339] [<ffffffff80106230>] ret_from_irq+0x0/0x4 [ 370.522583] [<ffffffff8010fad4>] do_ri+0x4fc/0x7e8 [ 370.527546] [<ffffffff80106220>] ret_from_exception+0x0/0x10 2) The IPI may arrive during kernel use of the FPU, since we generally only disable preemption around use of the FPU & leave interrupts enabled. This can lead to us unexpectedly losing access to the FPU in places where it previously had not been possible. For example: do_cpu invoked from kernel context![#2]: CPU: 2 PID: 7338 Comm: fp-prctl Tainted: G D 4.7.0-00424-g49b0c82 #2 task: 838e4000 ti: 88d38000 task.ti: 88d38000 $ 0 : 00000000 00000001 ffffffff 88d3fef8 $ 4 : 838e4000 88d38004 00000000 00000001 $ 8 : 3400fc01 801f8020 808e9100 24000000 $12 : dbffffff 807b69d8 807b0000 00000000 $16 : 00000000 80786150 00400fc4 809c0398 $20 : 809c0338 0040273c 88d3ff28 808e9d30 $24 : 808e9d30 00400fb4 $28 : 88d38000 88d3fe88 00000000 8011a2ac Hi : 0040273c Lo : 88d3ff28 epc : 80114178 _restore_fp+0x10/0xa0 ra : 8011a2ac mipsr2_decoder+0xd5c/0x1660 Status: 1400fc03 KERNEL EXL IE Cause : 1080002c (ExcCode 0b) PrId : 0001a920 (MIPS I6400) Modules linked in: Process fp-prctl (pid: 7338, threadinfo=88d38000, task=838e4000, tls=766527d0) Stack : 00000000 00000000 00000000 88d3fe98 00000000 00000000 809c0398 809c0338 808e9100 00000000 88d3ff28 00400fc4 00400fc4 0040273c 7fb69e18 004a0000 004a0000 004a0000 7664add0 8010de18 00000000 00000000 88d3fef8 88d3ff28 808e9100 00000000 766527d0 8010e534 000c0000 85755000 8181d580 00000000 00000000 00000000 004a0000 00000000 766527d0 7fb69e18 004a0000 80105c20 ... Call Trace: [<80114178>] _restore_fp+0x10/0xa0 [<8011a2ac>] mipsr2_decoder+0xd5c/0x1660 [<8010de18>] do_ri+0x90/0x6b8 [<80105c20>] ret_from_exception+0x0/0x10 At first glance a simple fix may seem to be to disable interrupts around kernel use of the FPU rather than merely preemption, however this would introduce further overhead outside of the mode switch path & doesn't solve the third problem: 3) The IPI may arrive whilst the kernel is running code that will lead to a preempt_disable() call & FPU usage soon. If this happens then the IPI will be serviced & we'll proceed to enable an FPU whilst the mode switch is in progress, leading to strange & inconsistent behaviour. Further to all of this is a separate but related problem: 4) There are various paths through which we may enable the FPU without the user having triggered a coprocessor 1 disabled exception. These paths are those in which we emulate instructions & then enable the FPU with the expectation that the user might execute an FP instruction shortly afterwards. However these paths have not previously checked whether an FP mode switch is underway for the task, and therefore could enable the FPU whilst such a mode switch is in progress leading to strange & inconsistent behaviour for user code. This patch fixes all of the above by taking a step back & re-examining our approach to FP mode switches. Up until now we have taken these basic steps: a) Prevent any threads that are part of the affected process from being able to obtain ownership of the FPU. b) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. c) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. d) Allow threads to obtain ownership of the FPU again. This approach is however more complex than necessary. All that we really require is that the mode switch has occurred for all threads that are part of the affected process before mips_set_process_fp_mode(), and thus the PR_SET_FP_MODE prctl() syscall, returns. This doesn't require that we stop threads from owning or using an FPU whilst a mode switch occurs, only that we force them to relinquish it after the mode switch has occurred such that they next own an FPU with the correct mode configured. Our basic steps therefore simplify to: A) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. B) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. We implement B) by forcing each CPU which might be running a thread which is part of the affected process to schedule a no-op function, which causes the affected thread to lose its FPU ownership when it is descheduled. The end result is simpler FP mode switching with less overhead in the FPU enable path (ie. enable_restore_fp_context()) and fewer moving parts. Signed-off-by: Paul Burton <paul.burton@mips.com> Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Fixes: 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: stable <stable@vger.kernel.org> # v4.0+
2017-12-19 23:11:08 +00:00
struct cpumask process_cpus;
int cpu;
2015-01-08 12:17:37 +00:00
/* If nothing to change, return right away, successfully. */
if (value == mips_get_process_fp_mode(task))
return 0;
/* Only accept a mode change if 64-bit FP enabled for o32. */
if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return -EOPNOTSUPP;
/* And only for o32 tasks. */
if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
return -EOPNOTSUPP;
2015-01-08 12:17:37 +00:00
/* Check the value is valid */
if (value & ~known_bits)
return -EOPNOTSUPP;
MIPS: prctl: Disallow FRE without FR with PR_SET_FP_MODE requests Having PR_FP_MODE_FRE (i.e. Config5.FRE) set without PR_FP_MODE_FR (i.e. Status.FR) is not supported as the lone purpose of Config5.FRE is to emulate Status.FR=0 handling on FPU hardware that has Status.FR=1 hardwired[1][2]. Also we do not handle this case elsewhere, and assume throughout our code that TIF_HYBRID_FPREGS and TIF_32BIT_FPREGS cannot be set both at once for a task, leading to inconsistent behaviour if this does happen. Return unsuccessfully then from prctl(2) PR_SET_FP_MODE calls requesting PR_FP_MODE_FRE to be set with PR_FP_MODE_FR clear. This corresponds to modes allowed by `mips_set_personality_fp'. References: [1] "MIPS Architecture For Programmers, Vol. III: MIPS32 / microMIPS32 Privileged Resource Architecture", Imagination Technologies, Document Number: MD00090, Revision 6.02, July 10, 2015, Table 9.69 "Config5 Register Field Descriptions", p. 262 [2] "MIPS Architecture For Programmers, Volume III: MIPS64 / microMIPS64 Privileged Resource Architecture", Imagination Technologies, Document Number: MD00091, Revision 6.03, December 22, 2015, Table 9.72 "Config5 Register Field Descriptions", p. 288 Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Signed-off-by: Maciej W. Rozycki <macro@mips.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: <stable@vger.kernel.org> # 4.0+ Patchwork: https://patchwork.linux-mips.org/patch/19327/ Signed-off-by: James Hogan <jhogan@kernel.org>
2018-05-15 22:04:44 +00:00
/* Setting FRE without FR is not supported. */
if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
return -EOPNOTSUPP;
2015-01-08 12:17:37 +00:00
/* Avoid inadvertently triggering emulation */
if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
!(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
2015-01-08 12:17:37 +00:00
return -EOPNOTSUPP;
if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
2015-01-08 12:17:37 +00:00
return -EOPNOTSUPP;
/* FR = 0 not supported in MIPS R6 */
if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP;
MIPS: Schedule on CPUs we need to lose FPU for a mode switch Commit 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") ensures that we react to PR_SET_FP_MODE prctl syscalls quickly by broadcasting an IPI in order to cause CPUs to lose FPU access when necessary. Whilst it achieves that, unfortunately it causes all sorts of strange race conditions because: 1) The IPI may arrive at a point where the FPU is in the process of being enabled, but that process is not yet complete leading to a state we aren't prepared to handle. For example: [ 370.215903] do_cpu invoked from kernel context![#1]: [ 370.221064] CPU: 0 PID: 963 Comm: fp-prctl Not tainted 4.9.0-rc5-00323-g210db32-dirty #226 [ 370.229420] task: a8000000fd672e00 task.stack: a8000000fd630000 [ 370.235399] $ 0 : 0000000000000000 0000000000000001 0000000000000001 a8000000fd630000 [ 370.243882] $ 4 : a8000000fd672e00 0000000000000000 0000000000000453 0000000000000000 [ 370.252317] $ 8 : 0000000000000000 a8000000fd637c28 1000000000000000 0000000000000010 [ 370.260753] $12 : 00000000140084e0 ffffffff80109c00 0000000000000000 0000000000000002 [ 370.269179] $16 : ffffffff8092f080 a8000000fd672e00 ffffffff80107fe8 a8000000fd485000 [ 370.277612] $20 : ffffffff8084d328 ffffffff80940000 0000000000000009 ffffffff80930000 [ 370.286038] $24 : 0000000000000000 900000001612048c [ 370.294476] $28 : a8000000fd630000 a8000000fd637ac0 ffffffff80937300 ffffffff8010807c [ 370.302909] Hi : 0000000000000000 [ 370.306595] Lo : 0000000000000200 [ 370.310376] epc : ffffffff80115d38 _save_fp+0x10/0xa0 [ 370.315784] ra : ffffffff8010807c prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.322707] Status: 140084e2 KX SX UX KERNEL EXL [ 370.327980] Cause : 1080002c (ExcCode 0b) [ 370.332091] PrId : 0001a428 (MIPS P6600) [ 370.336179] Modules linked in: [ 370.339486] Process fp-prctl (pid: 963, threadinfo=a8000000fd630000, task=a8000000fd672e00, tls=00000000756e67d0) [ 370.349724] Stack : 0000000000000000 a8000000fd557dc0 0000000000000000 ffffffff801ca8e0 [ 370.358161] 0000000000000000 a8000000fd637b9c 0000000000000009 ffffffff80923780 [ 370.366575] ffffffff80850000 ffffffff8011610c 00000000000000b8 ffffffff801a5084 [ 370.374989] ffffffff8084a370 ffffffff8084a388 ffffffff80923780 ffffffff80923828 [ 370.383395] 0000000000010000 ffffffff809237a8 0000000000020000 ffffffff80a40000 [ 370.391817] 000000000000007c 00000000004a0000 00000000756dedd0 ffffffff801a5188 [ 370.400230] a800000002014900 0000000000000001 ffffffff80923780 0000000080923828 [ 370.408644] ffffffff80923780 ffffffff80923780 ffffffff80923828 ffffffff801a521c [ 370.417066] ffffffff80923780 ffffffff80923828 0000000000010000 ffffffff801a8f84 [ 370.425472] ffffffff80a40000 a8000000fd637c20 ffffffff80a39240 0000000000000001 [ 370.433885] ... [ 370.436562] Call Trace: [ 370.439222] [<ffffffff80115d38>] _save_fp+0x10/0xa0 [ 370.444305] [<ffffffff8010807c>] prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.451035] [<ffffffff801ca8e0>] flush_smp_call_function_queue+0xf8/0x230 [ 370.457991] [<ffffffff8011610c>] ipi_call_interrupt+0xc/0x20 [ 370.463814] [<ffffffff801a5084>] __handle_irq_event_percpu+0xc4/0x1a8 [ 370.470404] [<ffffffff801a5188>] handle_irq_event_percpu+0x20/0x68 [ 370.476734] [<ffffffff801a521c>] handle_irq_event+0x4c/0x88 [ 370.482486] [<ffffffff801a8f84>] handle_edge_irq+0x12c/0x210 [ 370.488316] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.494280] [<ffffffff804a2dbc>] gic_handle_shared_int+0x194/0x268 [ 370.500616] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.506529] [<ffffffff80107e60>] do_IRQ+0x18/0x28 [ 370.511445] [<ffffffff804a1524>] plat_irq_dispatch+0xc4/0x140 [ 370.517339] [<ffffffff80106230>] ret_from_irq+0x0/0x4 [ 370.522583] [<ffffffff8010fad4>] do_ri+0x4fc/0x7e8 [ 370.527546] [<ffffffff80106220>] ret_from_exception+0x0/0x10 2) The IPI may arrive during kernel use of the FPU, since we generally only disable preemption around use of the FPU & leave interrupts enabled. This can lead to us unexpectedly losing access to the FPU in places where it previously had not been possible. For example: do_cpu invoked from kernel context![#2]: CPU: 2 PID: 7338 Comm: fp-prctl Tainted: G D 4.7.0-00424-g49b0c82 #2 task: 838e4000 ti: 88d38000 task.ti: 88d38000 $ 0 : 00000000 00000001 ffffffff 88d3fef8 $ 4 : 838e4000 88d38004 00000000 00000001 $ 8 : 3400fc01 801f8020 808e9100 24000000 $12 : dbffffff 807b69d8 807b0000 00000000 $16 : 00000000 80786150 00400fc4 809c0398 $20 : 809c0338 0040273c 88d3ff28 808e9d30 $24 : 808e9d30 00400fb4 $28 : 88d38000 88d3fe88 00000000 8011a2ac Hi : 0040273c Lo : 88d3ff28 epc : 80114178 _restore_fp+0x10/0xa0 ra : 8011a2ac mipsr2_decoder+0xd5c/0x1660 Status: 1400fc03 KERNEL EXL IE Cause : 1080002c (ExcCode 0b) PrId : 0001a920 (MIPS I6400) Modules linked in: Process fp-prctl (pid: 7338, threadinfo=88d38000, task=838e4000, tls=766527d0) Stack : 00000000 00000000 00000000 88d3fe98 00000000 00000000 809c0398 809c0338 808e9100 00000000 88d3ff28 00400fc4 00400fc4 0040273c 7fb69e18 004a0000 004a0000 004a0000 7664add0 8010de18 00000000 00000000 88d3fef8 88d3ff28 808e9100 00000000 766527d0 8010e534 000c0000 85755000 8181d580 00000000 00000000 00000000 004a0000 00000000 766527d0 7fb69e18 004a0000 80105c20 ... Call Trace: [<80114178>] _restore_fp+0x10/0xa0 [<8011a2ac>] mipsr2_decoder+0xd5c/0x1660 [<8010de18>] do_ri+0x90/0x6b8 [<80105c20>] ret_from_exception+0x0/0x10 At first glance a simple fix may seem to be to disable interrupts around kernel use of the FPU rather than merely preemption, however this would introduce further overhead outside of the mode switch path & doesn't solve the third problem: 3) The IPI may arrive whilst the kernel is running code that will lead to a preempt_disable() call & FPU usage soon. If this happens then the IPI will be serviced & we'll proceed to enable an FPU whilst the mode switch is in progress, leading to strange & inconsistent behaviour. Further to all of this is a separate but related problem: 4) There are various paths through which we may enable the FPU without the user having triggered a coprocessor 1 disabled exception. These paths are those in which we emulate instructions & then enable the FPU with the expectation that the user might execute an FP instruction shortly afterwards. However these paths have not previously checked whether an FP mode switch is underway for the task, and therefore could enable the FPU whilst such a mode switch is in progress leading to strange & inconsistent behaviour for user code. This patch fixes all of the above by taking a step back & re-examining our approach to FP mode switches. Up until now we have taken these basic steps: a) Prevent any threads that are part of the affected process from being able to obtain ownership of the FPU. b) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. c) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. d) Allow threads to obtain ownership of the FPU again. This approach is however more complex than necessary. All that we really require is that the mode switch has occurred for all threads that are part of the affected process before mips_set_process_fp_mode(), and thus the PR_SET_FP_MODE prctl() syscall, returns. This doesn't require that we stop threads from owning or using an FPU whilst a mode switch occurs, only that we force them to relinquish it after the mode switch has occurred such that they next own an FPU with the correct mode configured. Our basic steps therefore simplify to: A) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. B) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. We implement B) by forcing each CPU which might be running a thread which is part of the affected process to schedule a no-op function, which causes the affected thread to lose its FPU ownership when it is descheduled. The end result is simpler FP mode switching with less overhead in the FPU enable path (ie. enable_restore_fp_context()) and fewer moving parts. Signed-off-by: Paul Burton <paul.burton@mips.com> Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Fixes: 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: stable <stable@vger.kernel.org> # v4.0+
2017-12-19 23:11:08 +00:00
/* Indicate the new FP mode in each thread */
2015-01-08 12:17:37 +00:00
for_each_thread(task, t) {
/* Update desired FP register width */
if (value & PR_FP_MODE_FR) {
clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
} else {
set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
}
/* Update desired FP single layout */
if (value & PR_FP_MODE_FRE)
set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
else
clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
}
MIPS: Schedule on CPUs we need to lose FPU for a mode switch Commit 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") ensures that we react to PR_SET_FP_MODE prctl syscalls quickly by broadcasting an IPI in order to cause CPUs to lose FPU access when necessary. Whilst it achieves that, unfortunately it causes all sorts of strange race conditions because: 1) The IPI may arrive at a point where the FPU is in the process of being enabled, but that process is not yet complete leading to a state we aren't prepared to handle. For example: [ 370.215903] do_cpu invoked from kernel context![#1]: [ 370.221064] CPU: 0 PID: 963 Comm: fp-prctl Not tainted 4.9.0-rc5-00323-g210db32-dirty #226 [ 370.229420] task: a8000000fd672e00 task.stack: a8000000fd630000 [ 370.235399] $ 0 : 0000000000000000 0000000000000001 0000000000000001 a8000000fd630000 [ 370.243882] $ 4 : a8000000fd672e00 0000000000000000 0000000000000453 0000000000000000 [ 370.252317] $ 8 : 0000000000000000 a8000000fd637c28 1000000000000000 0000000000000010 [ 370.260753] $12 : 00000000140084e0 ffffffff80109c00 0000000000000000 0000000000000002 [ 370.269179] $16 : ffffffff8092f080 a8000000fd672e00 ffffffff80107fe8 a8000000fd485000 [ 370.277612] $20 : ffffffff8084d328 ffffffff80940000 0000000000000009 ffffffff80930000 [ 370.286038] $24 : 0000000000000000 900000001612048c [ 370.294476] $28 : a8000000fd630000 a8000000fd637ac0 ffffffff80937300 ffffffff8010807c [ 370.302909] Hi : 0000000000000000 [ 370.306595] Lo : 0000000000000200 [ 370.310376] epc : ffffffff80115d38 _save_fp+0x10/0xa0 [ 370.315784] ra : ffffffff8010807c prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.322707] Status: 140084e2 KX SX UX KERNEL EXL [ 370.327980] Cause : 1080002c (ExcCode 0b) [ 370.332091] PrId : 0001a428 (MIPS P6600) [ 370.336179] Modules linked in: [ 370.339486] Process fp-prctl (pid: 963, threadinfo=a8000000fd630000, task=a8000000fd672e00, tls=00000000756e67d0) [ 370.349724] Stack : 0000000000000000 a8000000fd557dc0 0000000000000000 ffffffff801ca8e0 [ 370.358161] 0000000000000000 a8000000fd637b9c 0000000000000009 ffffffff80923780 [ 370.366575] ffffffff80850000 ffffffff8011610c 00000000000000b8 ffffffff801a5084 [ 370.374989] ffffffff8084a370 ffffffff8084a388 ffffffff80923780 ffffffff80923828 [ 370.383395] 0000000000010000 ffffffff809237a8 0000000000020000 ffffffff80a40000 [ 370.391817] 000000000000007c 00000000004a0000 00000000756dedd0 ffffffff801a5188 [ 370.400230] a800000002014900 0000000000000001 ffffffff80923780 0000000080923828 [ 370.408644] ffffffff80923780 ffffffff80923780 ffffffff80923828 ffffffff801a521c [ 370.417066] ffffffff80923780 ffffffff80923828 0000000000010000 ffffffff801a8f84 [ 370.425472] ffffffff80a40000 a8000000fd637c20 ffffffff80a39240 0000000000000001 [ 370.433885] ... [ 370.436562] Call Trace: [ 370.439222] [<ffffffff80115d38>] _save_fp+0x10/0xa0 [ 370.444305] [<ffffffff8010807c>] prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.451035] [<ffffffff801ca8e0>] flush_smp_call_function_queue+0xf8/0x230 [ 370.457991] [<ffffffff8011610c>] ipi_call_interrupt+0xc/0x20 [ 370.463814] [<ffffffff801a5084>] __handle_irq_event_percpu+0xc4/0x1a8 [ 370.470404] [<ffffffff801a5188>] handle_irq_event_percpu+0x20/0x68 [ 370.476734] [<ffffffff801a521c>] handle_irq_event+0x4c/0x88 [ 370.482486] [<ffffffff801a8f84>] handle_edge_irq+0x12c/0x210 [ 370.488316] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.494280] [<ffffffff804a2dbc>] gic_handle_shared_int+0x194/0x268 [ 370.500616] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.506529] [<ffffffff80107e60>] do_IRQ+0x18/0x28 [ 370.511445] [<ffffffff804a1524>] plat_irq_dispatch+0xc4/0x140 [ 370.517339] [<ffffffff80106230>] ret_from_irq+0x0/0x4 [ 370.522583] [<ffffffff8010fad4>] do_ri+0x4fc/0x7e8 [ 370.527546] [<ffffffff80106220>] ret_from_exception+0x0/0x10 2) The IPI may arrive during kernel use of the FPU, since we generally only disable preemption around use of the FPU & leave interrupts enabled. This can lead to us unexpectedly losing access to the FPU in places where it previously had not been possible. For example: do_cpu invoked from kernel context![#2]: CPU: 2 PID: 7338 Comm: fp-prctl Tainted: G D 4.7.0-00424-g49b0c82 #2 task: 838e4000 ti: 88d38000 task.ti: 88d38000 $ 0 : 00000000 00000001 ffffffff 88d3fef8 $ 4 : 838e4000 88d38004 00000000 00000001 $ 8 : 3400fc01 801f8020 808e9100 24000000 $12 : dbffffff 807b69d8 807b0000 00000000 $16 : 00000000 80786150 00400fc4 809c0398 $20 : 809c0338 0040273c 88d3ff28 808e9d30 $24 : 808e9d30 00400fb4 $28 : 88d38000 88d3fe88 00000000 8011a2ac Hi : 0040273c Lo : 88d3ff28 epc : 80114178 _restore_fp+0x10/0xa0 ra : 8011a2ac mipsr2_decoder+0xd5c/0x1660 Status: 1400fc03 KERNEL EXL IE Cause : 1080002c (ExcCode 0b) PrId : 0001a920 (MIPS I6400) Modules linked in: Process fp-prctl (pid: 7338, threadinfo=88d38000, task=838e4000, tls=766527d0) Stack : 00000000 00000000 00000000 88d3fe98 00000000 00000000 809c0398 809c0338 808e9100 00000000 88d3ff28 00400fc4 00400fc4 0040273c 7fb69e18 004a0000 004a0000 004a0000 7664add0 8010de18 00000000 00000000 88d3fef8 88d3ff28 808e9100 00000000 766527d0 8010e534 000c0000 85755000 8181d580 00000000 00000000 00000000 004a0000 00000000 766527d0 7fb69e18 004a0000 80105c20 ... Call Trace: [<80114178>] _restore_fp+0x10/0xa0 [<8011a2ac>] mipsr2_decoder+0xd5c/0x1660 [<8010de18>] do_ri+0x90/0x6b8 [<80105c20>] ret_from_exception+0x0/0x10 At first glance a simple fix may seem to be to disable interrupts around kernel use of the FPU rather than merely preemption, however this would introduce further overhead outside of the mode switch path & doesn't solve the third problem: 3) The IPI may arrive whilst the kernel is running code that will lead to a preempt_disable() call & FPU usage soon. If this happens then the IPI will be serviced & we'll proceed to enable an FPU whilst the mode switch is in progress, leading to strange & inconsistent behaviour. Further to all of this is a separate but related problem: 4) There are various paths through which we may enable the FPU without the user having triggered a coprocessor 1 disabled exception. These paths are those in which we emulate instructions & then enable the FPU with the expectation that the user might execute an FP instruction shortly afterwards. However these paths have not previously checked whether an FP mode switch is underway for the task, and therefore could enable the FPU whilst such a mode switch is in progress leading to strange & inconsistent behaviour for user code. This patch fixes all of the above by taking a step back & re-examining our approach to FP mode switches. Up until now we have taken these basic steps: a) Prevent any threads that are part of the affected process from being able to obtain ownership of the FPU. b) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. c) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. d) Allow threads to obtain ownership of the FPU again. This approach is however more complex than necessary. All that we really require is that the mode switch has occurred for all threads that are part of the affected process before mips_set_process_fp_mode(), and thus the PR_SET_FP_MODE prctl() syscall, returns. This doesn't require that we stop threads from owning or using an FPU whilst a mode switch occurs, only that we force them to relinquish it after the mode switch has occurred such that they next own an FPU with the correct mode configured. Our basic steps therefore simplify to: A) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. B) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. We implement B) by forcing each CPU which might be running a thread which is part of the affected process to schedule a no-op function, which causes the affected thread to lose its FPU ownership when it is descheduled. The end result is simpler FP mode switching with less overhead in the FPU enable path (ie. enable_restore_fp_context()) and fewer moving parts. Signed-off-by: Paul Burton <paul.burton@mips.com> Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Fixes: 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: stable <stable@vger.kernel.org> # v4.0+
2017-12-19 23:11:08 +00:00
/*
* We need to ensure that all threads in the process have switched mode
* before returning, in order to allow userland to not worry about
* races. We can do this by forcing all CPUs that any thread in the
* process may be running on to schedule something else - in this case
* prepare_for_fp_mode_switch().
*
* We begin by generating a mask of all CPUs that any thread in the
* process may be running on.
*/
cpumask_clear(&process_cpus);
for_each_thread(task, t)
cpumask_set_cpu(task_cpu(t), &process_cpus);
/*
* Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
*
* The CPUs may have rescheduled already since we switched mode or
* generated the cpumask, but that doesn't matter. If the task in this
* process is scheduled out then our scheduling
* prepare_for_fp_mode_switch() will simply be redundant. If it's
* scheduled in then it will already have picked up the new FP mode
* whilst doing so.
*/
cpus_read_lock();
MIPS: Schedule on CPUs we need to lose FPU for a mode switch Commit 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") ensures that we react to PR_SET_FP_MODE prctl syscalls quickly by broadcasting an IPI in order to cause CPUs to lose FPU access when necessary. Whilst it achieves that, unfortunately it causes all sorts of strange race conditions because: 1) The IPI may arrive at a point where the FPU is in the process of being enabled, but that process is not yet complete leading to a state we aren't prepared to handle. For example: [ 370.215903] do_cpu invoked from kernel context![#1]: [ 370.221064] CPU: 0 PID: 963 Comm: fp-prctl Not tainted 4.9.0-rc5-00323-g210db32-dirty #226 [ 370.229420] task: a8000000fd672e00 task.stack: a8000000fd630000 [ 370.235399] $ 0 : 0000000000000000 0000000000000001 0000000000000001 a8000000fd630000 [ 370.243882] $ 4 : a8000000fd672e00 0000000000000000 0000000000000453 0000000000000000 [ 370.252317] $ 8 : 0000000000000000 a8000000fd637c28 1000000000000000 0000000000000010 [ 370.260753] $12 : 00000000140084e0 ffffffff80109c00 0000000000000000 0000000000000002 [ 370.269179] $16 : ffffffff8092f080 a8000000fd672e00 ffffffff80107fe8 a8000000fd485000 [ 370.277612] $20 : ffffffff8084d328 ffffffff80940000 0000000000000009 ffffffff80930000 [ 370.286038] $24 : 0000000000000000 900000001612048c [ 370.294476] $28 : a8000000fd630000 a8000000fd637ac0 ffffffff80937300 ffffffff8010807c [ 370.302909] Hi : 0000000000000000 [ 370.306595] Lo : 0000000000000200 [ 370.310376] epc : ffffffff80115d38 _save_fp+0x10/0xa0 [ 370.315784] ra : ffffffff8010807c prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.322707] Status: 140084e2 KX SX UX KERNEL EXL [ 370.327980] Cause : 1080002c (ExcCode 0b) [ 370.332091] PrId : 0001a428 (MIPS P6600) [ 370.336179] Modules linked in: [ 370.339486] Process fp-prctl (pid: 963, threadinfo=a8000000fd630000, task=a8000000fd672e00, tls=00000000756e67d0) [ 370.349724] Stack : 0000000000000000 a8000000fd557dc0 0000000000000000 ffffffff801ca8e0 [ 370.358161] 0000000000000000 a8000000fd637b9c 0000000000000009 ffffffff80923780 [ 370.366575] ffffffff80850000 ffffffff8011610c 00000000000000b8 ffffffff801a5084 [ 370.374989] ffffffff8084a370 ffffffff8084a388 ffffffff80923780 ffffffff80923828 [ 370.383395] 0000000000010000 ffffffff809237a8 0000000000020000 ffffffff80a40000 [ 370.391817] 000000000000007c 00000000004a0000 00000000756dedd0 ffffffff801a5188 [ 370.400230] a800000002014900 0000000000000001 ffffffff80923780 0000000080923828 [ 370.408644] ffffffff80923780 ffffffff80923780 ffffffff80923828 ffffffff801a521c [ 370.417066] ffffffff80923780 ffffffff80923828 0000000000010000 ffffffff801a8f84 [ 370.425472] ffffffff80a40000 a8000000fd637c20 ffffffff80a39240 0000000000000001 [ 370.433885] ... [ 370.436562] Call Trace: [ 370.439222] [<ffffffff80115d38>] _save_fp+0x10/0xa0 [ 370.444305] [<ffffffff8010807c>] prepare_for_fp_mode_switch+0x94/0x1b0 [ 370.451035] [<ffffffff801ca8e0>] flush_smp_call_function_queue+0xf8/0x230 [ 370.457991] [<ffffffff8011610c>] ipi_call_interrupt+0xc/0x20 [ 370.463814] [<ffffffff801a5084>] __handle_irq_event_percpu+0xc4/0x1a8 [ 370.470404] [<ffffffff801a5188>] handle_irq_event_percpu+0x20/0x68 [ 370.476734] [<ffffffff801a521c>] handle_irq_event+0x4c/0x88 [ 370.482486] [<ffffffff801a8f84>] handle_edge_irq+0x12c/0x210 [ 370.488316] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.494280] [<ffffffff804a2dbc>] gic_handle_shared_int+0x194/0x268 [ 370.500616] [<ffffffff801a47a0>] generic_handle_irq+0x38/0x48 [ 370.506529] [<ffffffff80107e60>] do_IRQ+0x18/0x28 [ 370.511445] [<ffffffff804a1524>] plat_irq_dispatch+0xc4/0x140 [ 370.517339] [<ffffffff80106230>] ret_from_irq+0x0/0x4 [ 370.522583] [<ffffffff8010fad4>] do_ri+0x4fc/0x7e8 [ 370.527546] [<ffffffff80106220>] ret_from_exception+0x0/0x10 2) The IPI may arrive during kernel use of the FPU, since we generally only disable preemption around use of the FPU & leave interrupts enabled. This can lead to us unexpectedly losing access to the FPU in places where it previously had not been possible. For example: do_cpu invoked from kernel context![#2]: CPU: 2 PID: 7338 Comm: fp-prctl Tainted: G D 4.7.0-00424-g49b0c82 #2 task: 838e4000 ti: 88d38000 task.ti: 88d38000 $ 0 : 00000000 00000001 ffffffff 88d3fef8 $ 4 : 838e4000 88d38004 00000000 00000001 $ 8 : 3400fc01 801f8020 808e9100 24000000 $12 : dbffffff 807b69d8 807b0000 00000000 $16 : 00000000 80786150 00400fc4 809c0398 $20 : 809c0338 0040273c 88d3ff28 808e9d30 $24 : 808e9d30 00400fb4 $28 : 88d38000 88d3fe88 00000000 8011a2ac Hi : 0040273c Lo : 88d3ff28 epc : 80114178 _restore_fp+0x10/0xa0 ra : 8011a2ac mipsr2_decoder+0xd5c/0x1660 Status: 1400fc03 KERNEL EXL IE Cause : 1080002c (ExcCode 0b) PrId : 0001a920 (MIPS I6400) Modules linked in: Process fp-prctl (pid: 7338, threadinfo=88d38000, task=838e4000, tls=766527d0) Stack : 00000000 00000000 00000000 88d3fe98 00000000 00000000 809c0398 809c0338 808e9100 00000000 88d3ff28 00400fc4 00400fc4 0040273c 7fb69e18 004a0000 004a0000 004a0000 7664add0 8010de18 00000000 00000000 88d3fef8 88d3ff28 808e9100 00000000 766527d0 8010e534 000c0000 85755000 8181d580 00000000 00000000 00000000 004a0000 00000000 766527d0 7fb69e18 004a0000 80105c20 ... Call Trace: [<80114178>] _restore_fp+0x10/0xa0 [<8011a2ac>] mipsr2_decoder+0xd5c/0x1660 [<8010de18>] do_ri+0x90/0x6b8 [<80105c20>] ret_from_exception+0x0/0x10 At first glance a simple fix may seem to be to disable interrupts around kernel use of the FPU rather than merely preemption, however this would introduce further overhead outside of the mode switch path & doesn't solve the third problem: 3) The IPI may arrive whilst the kernel is running code that will lead to a preempt_disable() call & FPU usage soon. If this happens then the IPI will be serviced & we'll proceed to enable an FPU whilst the mode switch is in progress, leading to strange & inconsistent behaviour. Further to all of this is a separate but related problem: 4) There are various paths through which we may enable the FPU without the user having triggered a coprocessor 1 disabled exception. These paths are those in which we emulate instructions & then enable the FPU with the expectation that the user might execute an FP instruction shortly afterwards. However these paths have not previously checked whether an FP mode switch is underway for the task, and therefore could enable the FPU whilst such a mode switch is in progress leading to strange & inconsistent behaviour for user code. This patch fixes all of the above by taking a step back & re-examining our approach to FP mode switches. Up until now we have taken these basic steps: a) Prevent any threads that are part of the affected process from being able to obtain ownership of the FPU. b) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. c) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. d) Allow threads to obtain ownership of the FPU again. This approach is however more complex than necessary. All that we really require is that the mode switch has occurred for all threads that are part of the affected process before mips_set_process_fp_mode(), and thus the PR_SET_FP_MODE prctl() syscall, returns. This doesn't require that we stop threads from owning or using an FPU whilst a mode switch occurs, only that we force them to relinquish it after the mode switch has occurred such that they next own an FPU with the correct mode configured. Our basic steps therefore simplify to: A) Set the thread flags for each thread that is part of the affected process to reflect the new FP mode. B) Cause any threads that are part of the affected process and already have ownership of an FPU to lose it. We implement B) by forcing each CPU which might be running a thread which is part of the affected process to schedule a no-op function, which causes the affected thread to lose its FPU ownership when it is descheduled. The end result is simpler FP mode switching with less overhead in the FPU enable path (ie. enable_restore_fp_context()) and fewer moving parts. Signed-off-by: Paul Burton <paul.burton@mips.com> Fixes: 9791554b45a2 ("MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS") Fixes: 6b8322576e9d ("MIPS: Force CPUs to lose FP context during mode switches") Cc: James Hogan <jhogan@kernel.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: stable <stable@vger.kernel.org> # v4.0+
2017-12-19 23:11:08 +00:00
for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
cpus_read_unlock();
2015-01-08 12:17:37 +00:00
return 0;
}
#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
{
unsigned int i;
for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
/* k0/k1 are copied as zero. */
if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
uregs[i] = 0;
else
uregs[i] = regs->regs[i - MIPS32_EF_R0];
}
uregs[MIPS32_EF_LO] = regs->lo;
uregs[MIPS32_EF_HI] = regs->hi;
uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
}
#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
#ifdef CONFIG_64BIT
void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
{
unsigned int i;
for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
/* k0/k1 are copied as zero. */
if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
uregs[i] = 0;
else
uregs[i] = regs->regs[i - MIPS64_EF_R0];
}
uregs[MIPS64_EF_LO] = regs->lo;
uregs[MIPS64_EF_HI] = regs->hi;
uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
}
#endif /* CONFIG_64BIT */