mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
44e215352c
show_trace_log_lvl() provides x86 platform-specific way to unwind backtrace with a given log level. Unfortunately, registers dump(s) are not printed with the same log level - instead, KERN_DEFAULT is always used. Arista's switches uses quite common setup with rsyslog, where only urgent messages goes to console (console_log_level=KERN_ERR), everything else goes into /var/log/ as the console baud-rate often is indecently slow (9600 bps). Backtrace dumps without registers printed have proven to be as useful as morning standups. Furthermore, in order to introduce KERN_UNSUPPRESSED (which I believe is still the most elegant way to fix raciness of sysrq[1]) the log level should be passed down the stack to register dumping functions. Besides, there is a potential use-case for printing traces with KERN_DEBUG level [2] (where registers dump shouldn't appear with higher log level). Add log_lvl parameter to __show_regs(). Keep the used log level intact to separate visible change. [1]: https://lore.kernel.org/lkml/20190528002412.1625-1-dima@arista.com/ [2]: https://lore.kernel.org/linux-doc/20190724170249.9644-1-dima@arista.com/ Signed-off-by: Dmitry Safonov <dima@arista.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Petr Mladek <pmladek@suse.com> Link: https://lkml.kernel.org/r/20200629144847.492794-3-dima@arista.com
227 lines
6.2 KiB
C
227 lines
6.2 KiB
C
/*
|
|
* Copyright (C) 1995 Linus Torvalds
|
|
*
|
|
* Pentium III FXSR, SSE support
|
|
* Gareth Hughes <gareth@valinux.com>, May 2000
|
|
*/
|
|
|
|
/*
|
|
* This file handles the architecture-dependent parts of process handling..
|
|
*/
|
|
|
|
#include <linux/cpu.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/task.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/elfcore.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/stddef.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/user.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/reboot.h>
|
|
#include <linux/mc146818rtc.h>
|
|
#include <linux/export.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/personality.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/prctl.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/io.h>
|
|
#include <linux/kdebug.h>
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <asm/ldt.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/fpu/internal.h>
|
|
#include <asm/desc.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/cpu.h>
|
|
#include <asm/debugreg.h>
|
|
#include <asm/switch_to.h>
|
|
#include <asm/vm86.h>
|
|
#include <asm/resctrl.h>
|
|
#include <asm/proto.h>
|
|
|
|
#include "process.h"
|
|
|
|
void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
|
|
const char *log_lvl)
|
|
{
|
|
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
|
|
unsigned long d0, d1, d2, d3, d6, d7;
|
|
unsigned short gs;
|
|
|
|
if (user_mode(regs))
|
|
gs = get_user_gs(regs);
|
|
else
|
|
savesegment(gs, gs);
|
|
|
|
show_ip(regs, log_lvl);
|
|
|
|
printk("%sEAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
|
|
log_lvl, regs->ax, regs->bx, regs->cx, regs->dx);
|
|
printk("%sESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
|
|
log_lvl, regs->si, regs->di, regs->bp, regs->sp);
|
|
printk("%sDS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
|
|
log_lvl, (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, regs->ss, regs->flags);
|
|
|
|
if (mode != SHOW_REGS_ALL)
|
|
return;
|
|
|
|
cr0 = read_cr0();
|
|
cr2 = read_cr2();
|
|
cr3 = __read_cr3();
|
|
cr4 = __read_cr4();
|
|
printk("%sCR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
|
|
log_lvl, cr0, cr2, cr3, cr4);
|
|
|
|
get_debugreg(d0, 0);
|
|
get_debugreg(d1, 1);
|
|
get_debugreg(d2, 2);
|
|
get_debugreg(d3, 3);
|
|
get_debugreg(d6, 6);
|
|
get_debugreg(d7, 7);
|
|
|
|
/* Only print out debug registers if they are in their non-default state. */
|
|
if ((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
|
|
(d6 == DR6_RESERVED) && (d7 == 0x400))
|
|
return;
|
|
|
|
printk("%sDR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
|
|
log_lvl, d0, d1, d2, d3);
|
|
printk("%sDR6: %08lx DR7: %08lx\n",
|
|
log_lvl, d6, d7);
|
|
}
|
|
|
|
void release_thread(struct task_struct *dead_task)
|
|
{
|
|
BUG_ON(dead_task->mm);
|
|
release_vm86_irqs(dead_task);
|
|
}
|
|
|
|
void
|
|
start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
|
|
{
|
|
set_user_gs(regs, 0);
|
|
regs->fs = 0;
|
|
regs->ds = __USER_DS;
|
|
regs->es = __USER_DS;
|
|
regs->ss = __USER_DS;
|
|
regs->cs = __USER_CS;
|
|
regs->ip = new_ip;
|
|
regs->sp = new_sp;
|
|
regs->flags = X86_EFLAGS_IF;
|
|
}
|
|
EXPORT_SYMBOL_GPL(start_thread);
|
|
|
|
|
|
/*
|
|
* switch_to(x,y) should switch tasks from x to y.
|
|
*
|
|
* We fsave/fwait so that an exception goes off at the right time
|
|
* (as a call from the fsave or fwait in effect) rather than to
|
|
* the wrong process. Lazy FP saving no longer makes any sense
|
|
* with modern CPU's, and this simplifies a lot of things (SMP
|
|
* and UP become the same).
|
|
*
|
|
* NOTE! We used to use the x86 hardware context switching. The
|
|
* reason for not using it any more becomes apparent when you
|
|
* try to recover gracefully from saved state that is no longer
|
|
* valid (stale segment register values in particular). With the
|
|
* hardware task-switch, there is no way to fix up bad state in
|
|
* a reasonable manner.
|
|
*
|
|
* The fact that Intel documents the hardware task-switching to
|
|
* be slow is a fairly red herring - this code is not noticeably
|
|
* faster. However, there _is_ some room for improvement here,
|
|
* so the performance issues may eventually be a valid point.
|
|
* More important, however, is the fact that this allows us much
|
|
* more flexibility.
|
|
*
|
|
* The return value (in %ax) will be the "prev" task after
|
|
* the task-switch, and shows up in ret_from_fork in entry.S,
|
|
* for example.
|
|
*/
|
|
__visible __notrace_funcgraph struct task_struct *
|
|
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
|
{
|
|
struct thread_struct *prev = &prev_p->thread,
|
|
*next = &next_p->thread;
|
|
struct fpu *prev_fpu = &prev->fpu;
|
|
struct fpu *next_fpu = &next->fpu;
|
|
int cpu = smp_processor_id();
|
|
|
|
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
|
|
|
|
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
|
|
switch_fpu_prepare(prev_fpu, cpu);
|
|
|
|
/*
|
|
* Save away %gs. No need to save %fs, as it was saved on the
|
|
* stack on entry. No need to save %es and %ds, as those are
|
|
* always kernel segments while inside the kernel. Doing this
|
|
* before setting the new TLS descriptors avoids the situation
|
|
* where we temporarily have non-reloadable segments in %fs
|
|
* and %gs. This could be an issue if the NMI handler ever
|
|
* used %fs or %gs (it does not today), or if the kernel is
|
|
* running inside of a hypervisor layer.
|
|
*/
|
|
lazy_save_gs(prev->gs);
|
|
|
|
/*
|
|
* Load the per-thread Thread-Local Storage descriptor.
|
|
*/
|
|
load_TLS(next, cpu);
|
|
|
|
switch_to_extra(prev_p, next_p);
|
|
|
|
/*
|
|
* Leave lazy mode, flushing any hypercalls made here.
|
|
* This must be done before restoring TLS segments so
|
|
* the GDT and LDT are properly updated.
|
|
*/
|
|
arch_end_context_switch(next_p);
|
|
|
|
/*
|
|
* Reload esp0 and cpu_current_top_of_stack. This changes
|
|
* current_thread_info(). Refresh the SYSENTER configuration in
|
|
* case prev or next is vm86.
|
|
*/
|
|
update_task_stack(next_p);
|
|
refresh_sysenter_cs(next);
|
|
this_cpu_write(cpu_current_top_of_stack,
|
|
(unsigned long)task_stack_page(next_p) +
|
|
THREAD_SIZE);
|
|
|
|
/*
|
|
* Restore %gs if needed (which is common)
|
|
*/
|
|
if (prev->gs | next->gs)
|
|
lazy_load_gs(next->gs);
|
|
|
|
this_cpu_write(current_task, next_p);
|
|
|
|
switch_fpu_finish(next_fpu);
|
|
|
|
/* Load the Intel cache allocation PQR MSR. */
|
|
resctrl_sched_in();
|
|
|
|
return prev_p;
|
|
}
|
|
|
|
SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
|
|
{
|
|
return do_arch_prctl_common(current, option, arg2);
|
|
}
|