x86/ftrace: Clean up ftrace_regs_caller

When ftrace_regs_caller was created, it was designed to preserve flags as
much as possible as it needed to act just like a breakpoint triggered on the
same location. But the design is over complicated as it treated all
operations as modifying flags. But push, mov and lea do not modify flags.
This means the code can become more simplified by allowing flags to be
stored further down.

Making ftrace_regs_caller simpler will also be useful in implementing fentry
logic.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20170316135328.36123c3e@gandalf.local.home
Link: http://lkml.kernel.org/r/20170323143445.917292592@goodmis.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Steven Rostedt (VMware) 2017-03-23 10:33:51 -04:00 committed by Thomas Gleixner
parent e6928e58d4
commit ff04b440d2

View file

@ -54,23 +54,27 @@ WEAK(ftrace_stub)
END(ftrace_caller) END(ftrace_caller)
ENTRY(ftrace_regs_caller) ENTRY(ftrace_regs_caller)
pushf /* push flags before compare (in cs location) */
/* /*
* i386 does not save SS and ESP when coming from kernel. * i386 does not save SS and ESP when coming from kernel.
* Instead, to get sp, &regs->sp is used (see ptrace.h). * Instead, to get sp, &regs->sp is used (see ptrace.h).
* Unfortunately, that means eflags must be at the same location * Unfortunately, that means eflags must be at the same location
* as the current return ip is. We move the return ip into the * as the current return ip is. We move the return ip into the
* ip location, and move flags into the return ip location. * regs->ip location, and move flags into the return ip location.
*/ */
pushl 4(%esp) /* save return ip into ip slot */ pushl $__KERNEL_CS
pushl 4(%esp) /* Save the return ip */
pushl $0 /* Load 0 into orig_ax */ pushl $0 /* Load 0 into orig_ax */
pushl %gs pushl %gs
pushl %fs pushl %fs
pushl %es pushl %es
pushl %ds pushl %ds
pushl %eax pushl %eax
/* Get flags and place them into the return ip slot */
pushf
popl %eax
movl %eax, 8*4(%esp)
pushl %ebp pushl %ebp
pushl %edi pushl %edi
pushl %esi pushl %esi
@ -78,11 +82,6 @@ ENTRY(ftrace_regs_caller)
pushl %ecx pushl %ecx
pushl %ebx pushl %ebx
movl 13*4(%esp), %eax /* Get the saved flags */
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
/* clobbering return ip */
movl $__KERNEL_CS, 13*4(%esp)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */ movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
@ -93,10 +92,14 @@ GLOBAL(ftrace_regs_call)
call ftrace_stub call ftrace_stub
addl $4, %esp /* Skip pt_regs */ addl $4, %esp /* Skip pt_regs */
movl 14*4(%esp), %eax /* Move flags back into cs */
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ /* restore flags */
movl 12*4(%esp), %eax /* Get return ip from regs->ip */ push 14*4(%esp)
movl %eax, 14*4(%esp) /* Put return ip back for ret */ popf
/* Move return ip back to its original location */
movl 12*4(%esp), %eax
movl %eax, 14*4(%esp)
popl %ebx popl %ebx
popl %ecx popl %ecx
@ -109,12 +112,11 @@ GLOBAL(ftrace_regs_call)
popl %es popl %es
popl %fs popl %fs
popl %gs popl %gs
addl $8, %esp /* Skip orig_ax and ip */
popf /* Pop flags at end (no addl to corrupt flags) */
jmp .Lftrace_ret
popf /* use lea to not affect flags */
jmp ftrace_stub lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
jmp .Lftrace_ret
#else /* ! CONFIG_DYNAMIC_FTRACE */ #else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount) ENTRY(mcount)