diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-15 16:05:47 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-15 16:05:47 -0700 |
commit | d2d8b146043ae7e250aef1fb312971f6f479d487 (patch) | |
tree | 22db8758a5aa0bc850ba8f83fe57b1f679924d0a /arch/x86/kernel | |
parent | 2bbacd1a92788ee334c7e92b765ea16ebab68dfe (diff) | |
parent | 693713cbdb3a4bda5a8a678c31f06560bbb14657 (diff) | |
download | linux-d2d8b146043ae7e250aef1fb312971f6f479d487.tar.gz linux-d2d8b146043ae7e250aef1fb312971f6f479d487.tar.bz2 linux-d2d8b146043ae7e250aef1fb312971f6f479d487.zip |
Merge tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt:
"The major changes in this tracing update includes:
- Removal of non-DYNAMIC_FTRACE from 32bit x86
- Removal of mcount support from x86
- Emulating a call from int3 on x86_64, fixes live kernel patching
- Consolidated Tracing Error logs file
Minor updates:
- Removal of klp_check_compiler_support()
- kdb ftrace dumping output changes
- Accessing and creating ftrace instances from inside the kernel
- Clean up of #define if macro
- Introduction of TRACE_EVENT_NOP() to disable trace events based on
config options
And other minor fixes and clean ups"
* tag 'trace-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (44 commits)
x86: Hide the int3_emulate_call/jmp functions from UML
livepatch: Remove klp_check_compiler_support()
ftrace/x86: Remove mcount support
ftrace/x86_32: Remove support for non DYNAMIC_FTRACE
tracing: Simplify "if" macro code
tracing: Fix documentation about disabling options using trace_options
tracing: Replace kzalloc with kcalloc
tracing: Fix partial reading of trace event's id file
tracing: Allow RCU to run between postponed startup tests
tracing: Fix white space issues in parse_pred() function
tracing: Eliminate const char[] auto variables
ring-buffer: Fix mispelling of Calculate
tracing: probeevent: Fix to make the type of $comm string
tracing: probeevent: Do not accumulate on ret variable
tracing: uprobes: Re-enable $comm support for uprobe events
ftrace/x86_64: Emulate call function while updating in breakpoint handler
x86_64: Allow breakpoints to emulate call instructions
x86_64: Add gap to int3 to allow for call emulation
tracing: kdb: Allow ftdump to skip all but the last few entries
tracing: Add trace_total_entries() / trace_total_entries_cpu()
...
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/ftrace.c | 32 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace_32.S | 75 | ||||
-rw-r--r-- | arch/x86/kernel/ftrace_64.S | 28 |
3 files changed, 33 insertions, 102 deletions
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 0caf8122d680..0927bb158ffc 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -29,6 +29,7 @@ #include <asm/kprobes.h> #include <asm/ftrace.h> #include <asm/nops.h> +#include <asm/text-patching.h> #ifdef CONFIG_DYNAMIC_FTRACE @@ -231,6 +232,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, } static unsigned long ftrace_update_func; +static unsigned long ftrace_update_func_call; static int update_ftrace_func(unsigned long ip, void *new) { @@ -259,6 +261,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func) unsigned char *new; int ret; + ftrace_update_func_call = (unsigned long)func; + new = ftrace_call_replace(ip, (unsigned long)func); ret = update_ftrace_func(ip, new); @@ -294,13 +298,28 @@ int ftrace_int3_handler(struct pt_regs *regs) if (WARN_ON_ONCE(!regs)) return 0; - ip = regs->ip - 1; - if (!ftrace_location(ip) && !is_ftrace_caller(ip)) - return 0; + ip = regs->ip - INT3_INSN_SIZE; - regs->ip += MCOUNT_INSN_SIZE - 1; +#ifdef CONFIG_X86_64 + if (ftrace_location(ip)) { + int3_emulate_call(regs, (unsigned long)ftrace_regs_caller); + return 1; + } else if (is_ftrace_caller(ip)) { + if (!ftrace_update_func_call) { + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); + return 1; + } + int3_emulate_call(regs, ftrace_update_func_call); + return 1; + } +#else + if (ftrace_location(ip) || is_ftrace_caller(ip)) { + int3_emulate_jmp(regs, ip + CALL_INSN_SIZE); + return 1; + } +#endif - return 1; + return 0; } NOKPROBE_SYMBOL(ftrace_int3_handler); @@ -865,6 +884,8 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) func = ftrace_ops_get_func(ops); + ftrace_update_func_call = (unsigned long)func; + /* Do a safe modify in case the trampoline is executing */ new = ftrace_call_replace(ip, (unsigned long)func); ret = update_ftrace_func(ip, new); @@ -966,6 +987,7 @@ static int ftrace_mod_jmp(unsigned long ip, void *func) { unsigned char *new; + ftrace_update_func_call = 0UL; new = ftrace_jmp_replace(ip, (unsigned long)func); return update_ftrace_func(ip, new); diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S index 4c8440de3355..2ba914a34b06 100644 --- a/arch/x86/kernel/ftrace_32.S +++ b/arch/x86/kernel/ftrace_32.S @@ -10,22 +10,10 @@ #include <asm/ftrace.h> #include <asm/nospec-branch.h> -#ifdef CC_USING_FENTRY # define function_hook __fentry__ EXPORT_SYMBOL(__fentry__) -#else -# define function_hook mcount -EXPORT_SYMBOL(mcount) -#endif - -#ifdef CONFIG_DYNAMIC_FTRACE - -/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */ -#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER) -# define USING_FRAME_POINTER -#endif -#ifdef USING_FRAME_POINTER +#ifdef CONFIG_FRAME_POINTER # define MCOUNT_FRAME 1 /* using frame = true */ #else # define MCOUNT_FRAME 0 /* using frame = false */ @@ -37,8 +25,7 @@ END(function_hook) ENTRY(ftrace_caller) -#ifdef USING_FRAME_POINTER -# ifdef CC_USING_FENTRY +#ifdef CONFIG_FRAME_POINTER /* * Frame pointers are of ip followed by bp. * Since fentry is an immediate jump, we are left with @@ -49,7 +36,7 @@ ENTRY(ftrace_caller) pushl %ebp movl %esp, %ebp pushl 2*4(%esp) /* function ip */ -# endif + /* For mcount, the function ip is directly above */ pushl %ebp movl %esp, %ebp @@ -59,7 +46,7 @@ ENTRY(ftrace_caller) pushl %edx pushl $0 /* Pass NULL as regs pointer */ -#ifdef USING_FRAME_POINTER +#ifdef CONFIG_FRAME_POINTER /* Load parent ebp into edx */ movl 4*4(%esp), %edx #else @@ -82,13 +69,11 @@ ftrace_call: popl %edx popl %ecx popl %eax -#ifdef USING_FRAME_POINTER +#ifdef CONFIG_FRAME_POINTER popl %ebp -# ifdef CC_USING_FENTRY addl $4,%esp /* skip function ip */ popl %ebp /* this is the orig bp */ addl $4, %esp /* skip parent ip */ -# endif #endif .Lftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -133,11 +118,7 @@ ENTRY(ftrace_regs_caller) movl 12*4(%esp), %eax /* Load ip (1st parameter) */ subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ -#ifdef CC_USING_FENTRY movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */ -#else - movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ -#endif movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ pushl %esp /* Save pt_regs as 4th parameter */ @@ -170,43 +151,6 @@ GLOBAL(ftrace_regs_call) lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */ jmp .Lftrace_ret -#else /* ! CONFIG_DYNAMIC_FTRACE */ - -ENTRY(function_hook) - cmpl $__PAGE_OFFSET, %esp - jb ftrace_stub /* Paging not enabled yet? */ - - cmpl $ftrace_stub, ftrace_trace_function - jnz .Ltrace -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - cmpl $ftrace_stub, ftrace_graph_return - jnz ftrace_graph_caller - - cmpl $ftrace_graph_entry_stub, ftrace_graph_entry - jnz ftrace_graph_caller -#endif -.globl ftrace_stub -ftrace_stub: - ret - - /* taken from glibc */ -.Ltrace: - pushl %eax - pushl %ecx - pushl %edx - movl 0xc(%esp), %eax - movl 0x4(%ebp), %edx - subl $MCOUNT_INSN_SIZE, %eax - - movl ftrace_trace_function, %ecx - CALL_NOSPEC %ecx - - popl %edx - popl %ecx - popl %eax - jmp ftrace_stub -END(function_hook) -#endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) @@ -215,13 +159,8 @@ ENTRY(ftrace_graph_caller) pushl %edx movl 3*4(%esp), %eax /* Even with frame pointers, fentry doesn't have one here */ -#ifdef CC_USING_FENTRY lea 4*4(%esp), %edx movl $0, %ecx -#else - lea 0x4(%ebp), %edx - movl (%ebp), %ecx -#endif subl $MCOUNT_INSN_SIZE, %eax call prepare_ftrace_return popl %edx @@ -234,11 +173,7 @@ END(ftrace_graph_caller) return_to_handler: pushl %eax pushl %edx -#ifdef CC_USING_FENTRY movl $0, %eax -#else - movl %ebp, %eax -#endif call ftrace_return_to_handler movl %eax, %ecx popl %edx diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 75f2b36b41a6..10eb2760ef2c 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -13,22 +13,12 @@ .code64 .section .entry.text, "ax" -#ifdef CC_USING_FENTRY # define function_hook __fentry__ EXPORT_SYMBOL(__fentry__) -#else -# define function_hook mcount -EXPORT_SYMBOL(mcount) -#endif #ifdef CONFIG_FRAME_POINTER -# ifdef CC_USING_FENTRY /* Save parent and function stack frames (rip and rbp) */ # define MCOUNT_FRAME_SIZE (8+16*2) -# else -/* Save just function stack frame (rip and rbp) */ -# define MCOUNT_FRAME_SIZE (8+16) -# endif #else /* No need to save a stack frame */ # define MCOUNT_FRAME_SIZE 0 @@ -75,17 +65,13 @@ EXPORT_SYMBOL(mcount) * fentry is called before the stack frame is set up, where as mcount * is called afterward. */ -#ifdef CC_USING_FENTRY + /* Save the parent pointer (skip orig rbp and our return address) */ pushq \added+8*2(%rsp) pushq %rbp movq %rsp, %rbp /* Save the return address (now skip orig rbp, rbp and parent) */ pushq \added+8*3(%rsp) -#else - /* Can't assume that rip is before this (unless added was zero) */ - pushq \added+8(%rsp) -#endif pushq %rbp movq %rsp, %rbp #endif /* CONFIG_FRAME_POINTER */ @@ -113,12 +99,7 @@ EXPORT_SYMBOL(mcount) movq %rdx, RBP(%rsp) /* Copy the parent address into %rsi (second parameter) */ -#ifdef CC_USING_FENTRY movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi -#else - /* %rdx contains original %rbp */ - movq 8(%rdx), %rsi -#endif /* Move RIP to its proper location */ movq MCOUNT_REG_SIZE+\added(%rsp), %rdi @@ -303,15 +284,8 @@ ENTRY(ftrace_graph_caller) /* Saves rbp into %rdx and fills first parameter */ save_mcount_regs -#ifdef CC_USING_FENTRY leaq MCOUNT_REG_SIZE+8(%rsp), %rsi movq $0, %rdx /* No framepointers needed */ -#else - /* Save address of the return address of traced function */ - leaq 8(%rdx), %rsi - /* ftrace does sanity checks against frame pointers */ - movq (%rdx), %rdx -#endif call prepare_ftrace_return restore_mcount_regs |