2019-05-19 12:08:55 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2016-09-19 21:04:18 +00:00
|
|
|
#include <linux/extable.h>
|
2016-12-24 19:46:01 +00:00
|
|
|
#include <linux/uaccess.h>
|
2017-02-08 17:51:35 +00:00
|
|
|
#include <linux/sched/debug.h>
|
2017-11-24 08:42:21 +00:00
|
|
|
#include <xen/xen.h>
|
2017-02-08 17:51:35 +00:00
|
|
|
|
x86/fpu: Reinitialize FPU registers if restoring FPU state fails
Userspace can change the FPU state of a task using the ptrace() or
rt_sigreturn() system calls. Because reserved bits in the FPU state can
cause the XRSTOR instruction to fail, the kernel has to carefully
validate that no reserved bits or other invalid values are being set.
Unfortunately, there have been bugs in this validation code. For
example, we were not checking that the 'xcomp_bv' field in the
xstate_header was 0. As-is, such bugs are exploitable to read the FPU
registers of other processes on the system. To do so, an attacker can
create a task, assign to it an invalid FPU state, then spin in a loop
and monitor the values of the FPU registers. Because the task's FPU
registers are not being restored, sometimes the FPU registers will have
the values from another process.
This is likely to continue to be a problem in the future because the
validation done by the CPU instructions like XRSTOR is not immediately
visible to kernel developers. Nor will invalid FPU states ever be
encountered during ordinary use --- they will only be seen during
fuzzing or exploits. There can even be reserved bits outside the
xstate_header which are easy to forget about. For example, the MXCSR
register contains reserved bits, which were not validated by the
KVM_SET_XSAVE ioctl until commit a575813bfe4b ("KVM: x86: Fix load
damaged SSEx MXCSR register").
Therefore, mitigate this class of vulnerability by restoring the FPU
registers from init_fpstate if restoring from the task's state fails.
We actually used to do this, but it was (perhaps unwisely) removed by
commit 9ccc27a5d297 ("x86/fpu: Remove error return values from
copy_kernel_to_*regs() functions"). This new patch is also a bit
different. First, it only clears the registers, not also the bad
in-memory state; this is simpler and makes it easier to make the
mitigation cover all callers of __copy_kernel_to_fpregs(). Second, it
does the register clearing in an exception handler so that no extra
instructions are added to context switches. In fact, we *remove*
instructions, since previously we were always zeroing the register
containing 'err' even if CONFIG_X86_DEBUG_FPU was disabled.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Kevin Hao <haokexin@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Halcrow <mhalcrow@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Cc: kernel-hardening@lists.openwall.com
Link: http://lkml.kernel.org/r/20170922174156.16780-4-ebiggers3@gmail.com
Link: http://lkml.kernel.org/r/20170923130016.21448-27-mingo@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-09-23 13:00:09 +00:00
|
|
|
#include <asm/fpu/internal.h>
|
2016-04-02 14:01:33 +00:00
|
|
|
#include <asm/traps.h>
|
2016-07-04 22:31:27 +00:00
|
|
|
#include <asm/kdebug.h>
|
2008-01-30 12:31:41 +00:00
|
|
|
|
2016-02-17 18:20:12 +00:00
|
|
|
typedef bool (*ex_handler_t)(const struct exception_table_entry *,
|
2018-08-28 20:14:19 +00:00
|
|
|
struct pt_regs *, int, unsigned long,
|
|
|
|
unsigned long);
|
2016-02-17 18:20:12 +00:00
|
|
|
|
2012-04-21 00:12:48 +00:00
|
|
|
static inline unsigned long
|
|
|
|
ex_fixup_addr(const struct exception_table_entry *x)
|
|
|
|
{
|
|
|
|
return (unsigned long)&x->fixup + x->fixup;
|
|
|
|
}
|
2016-02-17 18:20:12 +00:00
|
|
|
static inline ex_handler_t
|
|
|
|
ex_fixup_handler(const struct exception_table_entry *x)
|
|
|
|
{
|
|
|
|
return (ex_handler_t)((unsigned long)&x->handler + x->handler);
|
|
|
|
}
|
2008-01-30 12:31:41 +00:00
|
|
|
|
2017-12-22 00:18:20 +00:00
|
|
|
__visible bool ex_handler_default(const struct exception_table_entry *fixup,
|
2018-08-28 20:14:19 +00:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2008-01-30 12:31:41 +00:00
|
|
|
{
|
2016-02-17 18:20:12 +00:00
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_default);
|
|
|
|
|
2017-12-22 00:18:20 +00:00
|
|
|
__visible bool ex_handler_fault(const struct exception_table_entry *fixup,
|
2018-08-28 20:14:19 +00:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-02-17 18:20:12 +00:00
|
|
|
{
|
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
regs->ax = trapnr;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ex_handler_fault);
|
|
|
|
|
x86/fpu: Reinitialize FPU registers if restoring FPU state fails
Userspace can change the FPU state of a task using the ptrace() or
rt_sigreturn() system calls. Because reserved bits in the FPU state can
cause the XRSTOR instruction to fail, the kernel has to carefully
validate that no reserved bits or other invalid values are being set.
Unfortunately, there have been bugs in this validation code. For
example, we were not checking that the 'xcomp_bv' field in the
xstate_header was 0. As-is, such bugs are exploitable to read the FPU
registers of other processes on the system. To do so, an attacker can
create a task, assign to it an invalid FPU state, then spin in a loop
and monitor the values of the FPU registers. Because the task's FPU
registers are not being restored, sometimes the FPU registers will have
the values from another process.
This is likely to continue to be a problem in the future because the
validation done by the CPU instructions like XRSTOR is not immediately
visible to kernel developers. Nor will invalid FPU states ever be
encountered during ordinary use --- they will only be seen during
fuzzing or exploits. There can even be reserved bits outside the
xstate_header which are easy to forget about. For example, the MXCSR
register contains reserved bits, which were not validated by the
KVM_SET_XSAVE ioctl until commit a575813bfe4b ("KVM: x86: Fix load
damaged SSEx MXCSR register").
Therefore, mitigate this class of vulnerability by restoring the FPU
registers from init_fpstate if restoring from the task's state fails.
We actually used to do this, but it was (perhaps unwisely) removed by
commit 9ccc27a5d297 ("x86/fpu: Remove error return values from
copy_kernel_to_*regs() functions"). This new patch is also a bit
different. First, it only clears the registers, not also the bad
in-memory state; this is simpler and makes it easier to make the
mitigation cover all callers of __copy_kernel_to_fpregs(). Second, it
does the register clearing in an exception handler so that no extra
instructions are added to context switches. In fact, we *remove*
instructions, since previously we were always zeroing the register
containing 'err' even if CONFIG_X86_DEBUG_FPU was disabled.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Kevin Hao <haokexin@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Halcrow <mhalcrow@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Cc: kernel-hardening@lists.openwall.com
Link: http://lkml.kernel.org/r/20170922174156.16780-4-ebiggers3@gmail.com
Link: http://lkml.kernel.org/r/20170923130016.21448-27-mingo@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-09-23 13:00:09 +00:00
|
|
|
/*
|
|
|
|
* Handler for when we fail to restore a task's FPU state. We should never get
|
|
|
|
* here because the FPU state of a task using the FPU (task->thread.fpu.state)
|
|
|
|
* should always be valid. However, past bugs have allowed userspace to set
|
|
|
|
* reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
|
|
|
|
* These caused XRSTOR to fail when switching to the task, leaking the FPU
|
|
|
|
* registers of the task previously executing on the CPU. Mitigate this class
|
|
|
|
* of vulnerability by restoring from the initial state (essentially, zeroing
|
|
|
|
* out all the FPU registers) if we can't restore from the task's FPU state.
|
|
|
|
*/
|
2017-12-22 00:18:20 +00:00
|
|
|
__visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
|
2018-08-28 20:14:19 +00:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
x86/fpu: Reinitialize FPU registers if restoring FPU state fails
Userspace can change the FPU state of a task using the ptrace() or
rt_sigreturn() system calls. Because reserved bits in the FPU state can
cause the XRSTOR instruction to fail, the kernel has to carefully
validate that no reserved bits or other invalid values are being set.
Unfortunately, there have been bugs in this validation code. For
example, we were not checking that the 'xcomp_bv' field in the
xstate_header was 0. As-is, such bugs are exploitable to read the FPU
registers of other processes on the system. To do so, an attacker can
create a task, assign to it an invalid FPU state, then spin in a loop
and monitor the values of the FPU registers. Because the task's FPU
registers are not being restored, sometimes the FPU registers will have
the values from another process.
This is likely to continue to be a problem in the future because the
validation done by the CPU instructions like XRSTOR is not immediately
visible to kernel developers. Nor will invalid FPU states ever be
encountered during ordinary use --- they will only be seen during
fuzzing or exploits. There can even be reserved bits outside the
xstate_header which are easy to forget about. For example, the MXCSR
register contains reserved bits, which were not validated by the
KVM_SET_XSAVE ioctl until commit a575813bfe4b ("KVM: x86: Fix load
damaged SSEx MXCSR register").
Therefore, mitigate this class of vulnerability by restoring the FPU
registers from init_fpstate if restoring from the task's state fails.
We actually used to do this, but it was (perhaps unwisely) removed by
commit 9ccc27a5d297 ("x86/fpu: Remove error return values from
copy_kernel_to_*regs() functions"). This new patch is also a bit
different. First, it only clears the registers, not also the bad
in-memory state; this is simpler and makes it easier to make the
mitigation cover all callers of __copy_kernel_to_fpregs(). Second, it
does the register clearing in an exception handler so that no extra
instructions are added to context switches. In fact, we *remove*
instructions, since previously we were always zeroing the register
containing 'err' even if CONFIG_X86_DEBUG_FPU was disabled.
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Kevin Hao <haokexin@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Halcrow <mhalcrow@google.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <wanpeng.li@hotmail.com>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Cc: kernel-hardening@lists.openwall.com
Link: http://lkml.kernel.org/r/20170922174156.16780-4-ebiggers3@gmail.com
Link: http://lkml.kernel.org/r/20170923130016.21448-27-mingo@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-09-23 13:00:09 +00:00
|
|
|
{
|
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
|
|
|
|
WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
|
|
|
|
(void *)instruction_pointer(regs));
|
|
|
|
|
|
|
|
__copy_kernel_to_fpregs(&init_fpstate, -1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(ex_handler_fprestore);
|
|
|
|
|
2018-08-28 20:14:18 +00:00
|
|
|
__visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
|
2018-08-28 20:14:19 +00:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2018-08-28 20:14:18 +00:00
|
|
|
{
|
x86-64: add warning for non-canonical user access address dereferences
This adds a warning (once) for any kernel dereference that has a user
exception handler, but accesses a non-canonical address. It basically
is a simpler - and more limited - version of commit 9da3f2b74054
("x86/fault: BUG() when uaccess helpers fault on kernel addresses") that
got reverted.
Note that unlike that original commit, this only causes a warning,
because there are real situations where we currently can do this
(notably speculative argument fetching for uprobes etc). Also, unlike
that original commit, this _only_ triggers for #GP accesses, so the
cases of valid kernel pointers that cross into a non-mapped page aren't
affected.
The intent of this is two-fold:
- the uprobe/tracing accesses really do need to be more careful. In
particular, from a portability standpoint it's just wrong to think
that "a pointer is a pointer", and use the same logic for any random
pointer value you find on the stack. It may _work_ on x86-64, but it
doesn't necessarily work on other architectures (where the same
pointer value can be either a kernel pointer _or_ a user pointer, and
you really need to be much more careful in how you try to access it)
The warning can hopefully end up being a reminder that just any
random pointer access won't do.
- Kees in particular wanted a way to actually report invalid uses of
wild pointers to user space accessors, instead of just silently
failing them. Automated fuzzers want a way to get reports if the
kernel ever uses invalid values that the fuzzer fed it.
The non-canonical address range is a fair chunk of the address space,
and with this you can teach syzkaller to feed in invalid pointer
values and find cases where we do not properly validate user
addresses (possibly due to bad uses of "set_fs()").
Acked-by: Kees Cook <keescook@chromium.org>
Cc: Jann Horn <jannh@google.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-02-26 17:16:04 +00:00
|
|
|
WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
|
2018-08-28 20:14:18 +00:00
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_uaccess);
|
|
|
|
|
2017-12-22 00:18:20 +00:00
|
|
|
__visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
|
2018-08-28 20:14:19 +00:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-04-02 14:01:37 +00:00
|
|
|
{
|
2019-03-25 19:32:28 +00:00
|
|
|
if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
|
2016-07-04 22:31:27 +00:00
|
|
|
(unsigned int)regs->cx, regs->ip, (void *)regs->ip))
|
|
|
|
show_stack_regs(regs);
|
2016-04-02 14:01:37 +00:00
|
|
|
|
|
|
|
/* Pretend that the read succeeded and returned 0. */
|
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
regs->ax = 0;
|
|
|
|
regs->dx = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
|
|
|
|
|
2017-12-22 00:18:20 +00:00
|
|
|
__visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
|
2018-08-28 20:14:19 +00:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-04-02 14:01:37 +00:00
|
|
|
{
|
2019-03-25 19:32:28 +00:00
|
|
|
if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
|
2016-07-04 22:31:27 +00:00
|
|
|
(unsigned int)regs->cx, (unsigned int)regs->dx,
|
|
|
|
(unsigned int)regs->ax, regs->ip, (void *)regs->ip))
|
|
|
|
show_stack_regs(regs);
|
2016-04-02 14:01:37 +00:00
|
|
|
|
|
|
|
/* Pretend that the write succeeded. */
|
|
|
|
regs->ip = ex_fixup_addr(fixup);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
|
|
|
|
|
2017-12-22 00:18:20 +00:00
|
|
|
__visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
|
2018-08-28 20:14:19 +00:00
|
|
|
struct pt_regs *regs, int trapnr,
|
|
|
|
unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-04-26 19:23:26 +00:00
|
|
|
{
|
|
|
|
if (static_cpu_has(X86_BUG_NULL_SEG))
|
|
|
|
asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
|
|
|
|
asm volatile ("mov %0, %%fs" : : "rm" (0));
|
2018-08-28 20:14:19 +00:00
|
|
|
return ex_handler_default(fixup, regs, trapnr, error_code, fault_addr);
|
2016-04-26 19:23:26 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(ex_handler_clear_fs);
|
|
|
|
|
2017-12-22 00:18:20 +00:00
|
|
|
__visible bool ex_has_fault_handler(unsigned long ip)
|
2016-02-17 18:20:12 +00:00
|
|
|
{
|
|
|
|
const struct exception_table_entry *e;
|
|
|
|
ex_handler_t handler;
|
|
|
|
|
|
|
|
e = search_exception_tables(ip);
|
|
|
|
if (!e)
|
|
|
|
return false;
|
|
|
|
handler = ex_fixup_handler(e);
|
|
|
|
|
|
|
|
return handler == ex_handler_fault;
|
|
|
|
}
|
|
|
|
|
2018-08-28 20:14:19 +00:00
|
|
|
int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
|
|
|
|
unsigned long fault_addr)
|
2016-02-17 18:20:12 +00:00
|
|
|
{
|
|
|
|
const struct exception_table_entry *e;
|
|
|
|
ex_handler_t handler;
|
2008-01-30 12:31:41 +00:00
|
|
|
|
|
|
|
#ifdef CONFIG_PNPBIOS
|
|
|
|
if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
|
|
|
|
extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
|
|
|
|
extern u32 pnp_bios_is_utter_crap;
|
|
|
|
pnp_bios_is_utter_crap = 1;
|
|
|
|
printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
|
|
|
|
__asm__ volatile(
|
|
|
|
"movl %0, %%esp\n\t"
|
|
|
|
"jmp *%1\n\t"
|
|
|
|
: : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
|
|
|
|
panic("do_trap: can't hit this");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-02-17 18:20:12 +00:00
|
|
|
e = search_exception_tables(regs->ip);
|
|
|
|
if (!e)
|
|
|
|
return 0;
|
2008-01-30 12:31:41 +00:00
|
|
|
|
2016-02-17 18:20:12 +00:00
|
|
|
handler = ex_fixup_handler(e);
|
2018-08-28 20:14:19 +00:00
|
|
|
return handler(e, regs, trapnr, error_code, fault_addr);
|
2008-01-30 12:31:41 +00:00
|
|
|
}
|
2012-04-19 22:24:20 +00:00
|
|
|
|
2016-04-02 14:01:34 +00:00
|
|
|
extern unsigned int early_recursion_flag;
|
|
|
|
|
2012-04-19 22:24:20 +00:00
|
|
|
/* Restricted version used during very early boot */
|
2016-04-02 14:01:34 +00:00
|
|
|
void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
|
2012-04-19 22:24:20 +00:00
|
|
|
{
|
2016-04-02 14:01:33 +00:00
|
|
|
/* Ignore early NMIs. */
|
|
|
|
if (trapnr == X86_TRAP_NMI)
|
2016-04-02 14:01:34 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (early_recursion_flag > 2)
|
|
|
|
goto halt_loop;
|
|
|
|
|
2016-11-20 02:42:40 +00:00
|
|
|
/*
|
|
|
|
* Old CPUs leave the high bits of CS on the stack
|
|
|
|
* undefined. I'm not sure which CPUs do this, but at least
|
|
|
|
* the 486 DX works this way.
|
2017-11-24 08:42:21 +00:00
|
|
|
* Xen pv domains are not using the default __KERNEL_CS.
|
2016-11-20 02:42:40 +00:00
|
|
|
*/
|
2017-11-24 08:42:21 +00:00
|
|
|
if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
|
2016-04-02 14:01:34 +00:00
|
|
|
goto fail;
|
2016-04-02 14:01:33 +00:00
|
|
|
|
2016-04-04 15:46:22 +00:00
|
|
|
/*
|
|
|
|
* The full exception fixup machinery is available as soon as
|
|
|
|
* the early IDT is loaded. This means that it is the
|
|
|
|
* responsibility of extable users to either function correctly
|
|
|
|
* when handlers are invoked early or to simply avoid causing
|
|
|
|
* exceptions before they're ready to handle them.
|
|
|
|
*
|
|
|
|
* This is better than filtering which handlers can be used,
|
|
|
|
* because refusing to call a handler here is guaranteed to
|
|
|
|
* result in a hard-to-debug panic.
|
|
|
|
*
|
|
|
|
* Keep in mind that not all vectors actually get here. Early
|
2018-08-28 20:14:19 +00:00
|
|
|
* page faults, for example, are special.
|
2016-04-04 15:46:22 +00:00
|
|
|
*/
|
2018-08-28 20:14:19 +00:00
|
|
|
if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
|
2016-04-02 14:01:35 +00:00
|
|
|
return;
|
2016-04-02 14:01:34 +00:00
|
|
|
|
2020-06-12 03:26:38 +00:00
|
|
|
if (trapnr == X86_TRAP_UD) {
|
|
|
|
if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
|
|
|
|
/* Skip the ud2. */
|
|
|
|
regs->ip += LEN_UD2;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this was a BUG and report_bug returns or if this
|
|
|
|
* was just a normal #UD, we want to continue onward and
|
|
|
|
* crash.
|
|
|
|
*/
|
|
|
|
}
|
2017-06-12 11:52:46 +00:00
|
|
|
|
2016-04-02 14:01:34 +00:00
|
|
|
fail:
|
|
|
|
early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
|
|
|
|
(unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
|
|
|
|
regs->orig_ax, read_cr2());
|
|
|
|
|
|
|
|
show_regs(regs);
|
|
|
|
|
|
|
|
halt_loop:
|
|
|
|
while (true)
|
|
|
|
halt();
|
2012-04-19 22:24:20 +00:00
|
|
|
}
|