mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 00:48:50 +00:00
a89dfde3dc
This ensures that a NOP is a NOP and not a random other instruction that is also a NOP. It allows simplification of dynamic code patching that wants to verify existing code before writing new instructions (ftrace, jump_label, static_call, etc..). Differentiating on NOPs is not a feature. This pessimises 32bit (DONTCARE) and 32bit on 64bit CPUs (CARELESS). 32bit is not a performance target. Everything x86_64 since AMD K10 (2007) and Intel IvyBridge (2012) is fine with using NOPL (as opposed to prefix NOP). And per FEATURE_NOPL being required for x86_64, all x86_64 CPUs can use NOPL. So stop caring about NOPs, simplify things and get on with life. [ The problem seems to be that some uarchs can only decode NOPL on a single front-end port while others have severe decode penalties for excessive prefixes. All modern uarchs can handle both, except Atom, which has prefix penalties. ] [ Also, much doubt you can actually measure any of this on normal workloads. ] After this, FEATURE_NOPL is unused except for required-features for x86_64. FEATURE_K8 is only used for PTI. [ bp: Kernel build measurements showed ~0.3s slowdown on Sandybridge which is hardly a slowdown. Get rid of X86_FEATURE_K7, while at it. ] Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov <bp@suse.de> Acked-by: Alexei Starovoitov <alexei.starovoitov@gmail.com> # bpf Acked-by: Linus Torvalds <torvalds@linuxfoundation.org> Link: https://lkml.kernel.org/r/20210312115749.065275711@infradead.org
137 lines
3.4 KiB
C
137 lines
3.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* jump label x86 support
|
|
*
|
|
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
|
|
*
|
|
*/
|
|
#include <linux/jump_label.h>
|
|
#include <linux/memory.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/module.h>
|
|
#include <linux/list.h>
|
|
#include <linux/jhash.h>
|
|
#include <linux/cpu.h>
|
|
#include <asm/kprobes.h>
|
|
#include <asm/alternative.h>
|
|
#include <asm/text-patching.h>
|
|
|
|
static void bug_at(const void *ip, int line)
|
|
{
|
|
/*
|
|
* The location is not an op that we were expecting.
|
|
* Something went wrong. Crash the box, as something could be
|
|
* corrupting the kernel.
|
|
*/
|
|
pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
|
|
BUG();
|
|
}
|
|
|
|
static const void *
|
|
__jump_label_set_jump_code(struct jump_entry *entry, enum jump_label_type type)
|
|
{
|
|
const void *expect, *code;
|
|
const void *addr, *dest;
|
|
int line;
|
|
|
|
addr = (void *)jump_entry_code(entry);
|
|
dest = (void *)jump_entry_target(entry);
|
|
|
|
code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
|
|
|
|
if (type == JUMP_LABEL_JMP) {
|
|
expect = x86_nops[5]; line = __LINE__;
|
|
} else {
|
|
expect = code; line = __LINE__;
|
|
}
|
|
|
|
if (memcmp(addr, expect, JUMP_LABEL_NOP_SIZE))
|
|
bug_at(addr, line);
|
|
|
|
if (type == JUMP_LABEL_NOP)
|
|
code = x86_nops[5];
|
|
|
|
return code;
|
|
}
|
|
|
|
static inline void __jump_label_transform(struct jump_entry *entry,
|
|
enum jump_label_type type,
|
|
int init)
|
|
{
|
|
const void *opcode = __jump_label_set_jump_code(entry, type);
|
|
|
|
/*
|
|
* As long as only a single processor is running and the code is still
|
|
* not marked as RO, text_poke_early() can be used; Checking that
|
|
* system_state is SYSTEM_BOOTING guarantees it. It will be set to
|
|
* SYSTEM_SCHEDULING before other cores are awaken and before the
|
|
* code is write-protected.
|
|
*
|
|
* At the time the change is being done, just ignore whether we
|
|
* are doing nop -> jump or jump -> nop transition, and assume
|
|
* always nop being the 'currently valid' instruction
|
|
*/
|
|
if (init || system_state == SYSTEM_BOOTING) {
|
|
text_poke_early((void *)jump_entry_code(entry), opcode,
|
|
JUMP_LABEL_NOP_SIZE);
|
|
return;
|
|
}
|
|
|
|
text_poke_bp((void *)jump_entry_code(entry), opcode, JUMP_LABEL_NOP_SIZE, NULL);
|
|
}
|
|
|
|
static void __ref jump_label_transform(struct jump_entry *entry,
|
|
enum jump_label_type type,
|
|
int init)
|
|
{
|
|
mutex_lock(&text_mutex);
|
|
__jump_label_transform(entry, type, init);
|
|
mutex_unlock(&text_mutex);
|
|
}
|
|
|
|
void arch_jump_label_transform(struct jump_entry *entry,
|
|
enum jump_label_type type)
|
|
{
|
|
jump_label_transform(entry, type, 0);
|
|
}
|
|
|
|
bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
|
enum jump_label_type type)
|
|
{
|
|
const void *opcode;
|
|
|
|
if (system_state == SYSTEM_BOOTING) {
|
|
/*
|
|
* Fallback to the non-batching mode.
|
|
*/
|
|
arch_jump_label_transform(entry, type);
|
|
return true;
|
|
}
|
|
|
|
mutex_lock(&text_mutex);
|
|
opcode = __jump_label_set_jump_code(entry, type);
|
|
text_poke_queue((void *)jump_entry_code(entry),
|
|
opcode, JUMP_LABEL_NOP_SIZE, NULL);
|
|
mutex_unlock(&text_mutex);
|
|
return true;
|
|
}
|
|
|
|
void arch_jump_label_transform_apply(void)
|
|
{
|
|
mutex_lock(&text_mutex);
|
|
text_poke_finish();
|
|
mutex_unlock(&text_mutex);
|
|
}
|
|
|
|
static enum {
|
|
JL_STATE_START,
|
|
JL_STATE_NO_UPDATE,
|
|
JL_STATE_UPDATE,
|
|
} jlstate __initdata_or_module = JL_STATE_START;
|
|
|
|
__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
|
|
enum jump_label_type type)
|
|
{
|
|
if (jlstate == JL_STATE_UPDATE)
|
|
jump_label_transform(entry, type, 1);
|
|
}
|