Merge branch 'x86/asm' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into for-next/asm

As agreed with Boris, merge in the 'x86/asm' branch from -tip so that we
can select the new 'ARCH_USE_SYM_ANNOTATIONS' Kconfig symbol, which is
required by the BTI kernel patches.

* 'x86/asm' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/asm: Provide a Kconfig symbol for disabling old assembly annotations
  x86/32: Remove CONFIG_DOUBLEFAULT
This commit is contained in:
Will Deacon 2020-04-30 17:39:42 +01:00
commit bf60333977
12 changed files with 11 additions and 31 deletions

View File

@ -91,6 +91,7 @@ config X86
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_USE_SYM_ANNOTATIONS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_DEFAULT_BPF_JIT if X86_64
select ARCH_WANTS_DYNAMIC_TASK_STRUCT

View File

@ -99,15 +99,6 @@ config DEBUG_WX
If in doubt, say "Y".
config DOUBLEFAULT
default y
bool "Enable doublefault exception handler" if EXPERT && X86_32
---help---
This option allows trapping of rare doublefault exceptions that
would otherwise cause a system to silently reboot. Disabling this
option saves about 4k and might cause you much additional grey
hair.
config DEBUG_TLBFLUSH
bool "Set upper limit of TLB entries to flush one-by-one"
depends on DEBUG_KERNEL

View File

@ -1536,7 +1536,6 @@ SYM_CODE_START(debug)
jmp common_exception
SYM_CODE_END(debug)
#ifdef CONFIG_DOUBLEFAULT
SYM_CODE_START(double_fault)
1:
/*
@ -1576,7 +1575,6 @@ SYM_CODE_START(double_fault)
hlt
jmp 1b
SYM_CODE_END(double_fault)
#endif
/*
* NMI is doubly nasty. It can happen on the first instruction of

View File

@ -2,7 +2,7 @@
#ifndef _ASM_X86_DOUBLEFAULT_H
#define _ASM_X86_DOUBLEFAULT_H
#if defined(CONFIG_X86_32) && defined(CONFIG_DOUBLEFAULT)
#ifdef CONFIG_X86_32
extern void doublefault_init_cpu_tss(void);
#else
static inline void doublefault_init_cpu_tss(void)

View File

@ -69,9 +69,7 @@ dotraplinkage void do_overflow(struct pt_regs *regs, long error_code);
dotraplinkage void do_bounds(struct pt_regs *regs, long error_code);
dotraplinkage void do_invalid_op(struct pt_regs *regs, long error_code);
dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code);
#if defined(CONFIG_X86_64) || defined(CONFIG_DOUBLEFAULT)
dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsigned long cr2);
#endif
dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *regs, long error_code);
dotraplinkage void do_invalid_TSS(struct pt_regs *regs, long error_code);
dotraplinkage void do_segment_not_present(struct pt_regs *regs, long error_code);

View File

@ -102,9 +102,7 @@ obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
obj-y += kprobes/
obj-$(CONFIG_MODULES) += module.o
ifeq ($(CONFIG_X86_32),y)
obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
endif
obj-$(CONFIG_X86_32) += doublefault_32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_VM86) += vm86_32.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o

View File

@ -87,7 +87,6 @@ static bool in_softirq_stack(unsigned long *stack, struct stack_info *info)
static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info)
{
#ifdef CONFIG_DOUBLEFAULT
struct cpu_entry_area *cea = get_cpu_entry_area(raw_smp_processor_id());
struct doublefault_stack *ss = &cea->doublefault_stack;
@ -103,9 +102,6 @@ static bool in_doublefault_stack(unsigned long *stack, struct stack_info *info)
info->next_sp = (unsigned long *)this_cpu_read(cpu_tss_rw.x86_tss.sp);
return true;
#else
return false;
#endif
}

View File

@ -326,7 +326,6 @@ __visible void __noreturn handle_stack_overflow(const char *message,
}
#endif
#if defined(CONFIG_X86_64) || defined(CONFIG_DOUBLEFAULT)
/*
* Runs on an IST stack for x86_64 and on a special task stack for x86_32.
*
@ -450,7 +449,6 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code, unsign
die("double fault", regs, error_code);
panic("Machine halted.");
}
#endif
dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
{

View File

@ -17,7 +17,7 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
#endif
#if defined(CONFIG_X86_32) && defined(CONFIG_DOUBLEFAULT)
#ifdef CONFIG_X86_32
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
#endif
@ -114,12 +114,10 @@ static void __init percpu_setup_exception_stacks(unsigned int cpu)
#else
static inline void percpu_setup_exception_stacks(unsigned int cpu)
{
#ifdef CONFIG_DOUBLEFAULT
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
cea_map_percpu_pages(&cea->doublefault_stack,
&per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
#endif
}
#endif

View File

@ -105,7 +105,7 @@
/* === DEPRECATED annotations === */
#ifndef CONFIG_X86
#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
#ifndef GLOBAL
/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */
#define GLOBAL(name) \
@ -118,10 +118,10 @@
#define ENTRY(name) \
SYM_FUNC_START(name)
#endif
#endif /* CONFIG_X86 */
#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
#endif /* LINKER_SCRIPT */
#ifndef CONFIG_X86
#ifndef CONFIG_ARCH_USE_SYM_ANNOTATIONS
#ifndef WEAK
/* deprecated, use SYM_FUNC_START_WEAK* */
#define WEAK(name) \
@ -143,7 +143,7 @@
#define ENDPROC(name) \
SYM_FUNC_END(name)
#endif
#endif /* CONFIG_X86 */
#endif /* CONFIG_ARCH_USE_SYM_ANNOTATIONS */
/* === generic annotations === */

View File

@ -80,6 +80,9 @@ config ARCH_USE_CMPXCHG_LOCKREF
config ARCH_HAS_FAST_MULTIPLIER
bool
config ARCH_USE_SYM_ANNOTATIONS
bool
config INDIRECT_PIO
bool "Access I/O in non-MMIO mode"
depends on ARM64

View File

@ -58,7 +58,6 @@ CONFIG_RCU_EQS_DEBUG=y
CONFIG_USER_STACKTRACE_SUPPORT=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_DOUBLEFAULT=y
CONFIG_X86_DEBUG_FPU=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_DEBUG_PAGEALLOC=y