diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 13:32:18 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 13:32:18 +0100 |
commit | c05f3642f4304dd081876e77a68555b6aba4483f (patch) | |
tree | 915baf10c3518d162c00e62b5d855cf92282ed79 /tools/perf/util/thread-stack.c | |
parent | 0200fbdd431519d730b5d399a12840ec832b27cc (diff) | |
parent | dda93b45389f025fd3422d22cc31cc1ea6040305 (diff) | |
download | linux-c05f3642f4304dd081876e77a68555b6aba4483f.tar.gz linux-c05f3642f4304dd081876e77a68555b6aba4483f.tar.bz2 linux-c05f3642f4304dd081876e77a68555b6aba4483f.zip |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"The main updates in this cycle were:
- Lots of perf tooling changes too voluminous to list (big perf trace
and perf stat improvements, lots of libtraceevent reorganization,
etc.), so I'll list the authors and refer to the changelog for
details:
Benjamin Peterson, Jérémie Galarneau, Kim Phillips, Peter
Zijlstra, Ravi Bangoria, Sangwon Hong, Sean V Kelley, Steven
Rostedt, Thomas Gleixner, Ding Xiang, Eduardo Habkost, Thomas
Richter, Andi Kleen, Sanskriti Sharma, Adrian Hunter, Tzvetomir
Stoyanov, Arnaldo Carvalho de Melo, Jiri Olsa.
... with the bulk of the changes written by Jiri Olsa, Tzvetomir
Stoyanov and Arnaldo Carvalho de Melo.
- Continued intel_rdt work with a focus on playing well with perf
events. This also imported some non-perf RDT work due to
dependencies. (Reinette Chatre)
- Implement counter freezing for Arch Perfmon v4 (Skylake and newer).
This allows to speed up the PMI handler by avoiding unnecessary MSR
writes and make it more accurate. (Andi Kleen)
- kprobes cleanups and simplification (Masami Hiramatsu)
- Intel Goldmont PMU updates (Kan Liang)
- ... plus misc other fixes and updates"
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (155 commits)
kprobes/x86: Use preempt_enable() in optimized_callback()
x86/intel_rdt: Prevent pseudo-locking from using stale pointers
kprobes, x86/ptrace.h: Make regs_get_kernel_stack_nth() not fault on bad stack
perf/x86/intel: Export mem events only if there's PEBS support
x86/cpu: Drop pointless static qualifier in punit_dev_state_show()
x86/intel_rdt: Fix initial allocation to consider CDP
x86/intel_rdt: CBM overlap should also check for overlap with CDP peer
x86/intel_rdt: Introduce utility to obtain CDP peer
tools lib traceevent, perf tools: Move struct tep_handler definition in a local header file
tools lib traceevent: Separate out tep_strerror() for strerror_r() issues
perf python: More portable way to make CFLAGS work with clang
perf python: Make clang_has_option() work on Python 3
perf tools: Free temporary 'sys' string in read_event_files()
perf tools: Avoid double free in read_event_file()
perf tools: Free 'printk' string in parse_ftrace_printk()
perf tools: Cleanup trace-event-info 'tdata' leak
perf strbuf: Match va_{add,copy} with va_end
perf test: S390 does not support watchpoints in test 22
perf auxtrace: Include missing asm/bitsperlong.h to get BITS_PER_LONG
tools include: Adopt linux/bits.h
...
Diffstat (limited to 'tools/perf/util/thread-stack.c')
-rw-r--r-- | tools/perf/util/thread-stack.c | 51 |
1 files changed, 39 insertions, 12 deletions
diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c index dd17d6a38d3a..c091635bf7dc 100644 --- a/tools/perf/util/thread-stack.c +++ b/tools/perf/util/thread-stack.c @@ -36,6 +36,7 @@ * @branch_count: the branch count when the entry was created * @cp: call path * @no_call: a 'call' was not seen + * @trace_end: a 'call' but trace ended */ struct thread_stack_entry { u64 ret_addr; @@ -44,6 +45,7 @@ struct thread_stack_entry { u64 branch_count; struct call_path *cp; bool no_call; + bool trace_end; }; /** @@ -112,7 +114,8 @@ static struct thread_stack *thread_stack__new(struct thread *thread, return ts; } -static int thread_stack__push(struct thread_stack *ts, u64 ret_addr) +static int thread_stack__push(struct thread_stack *ts, u64 ret_addr, + bool trace_end) { int err = 0; @@ -124,6 +127,7 @@ static int thread_stack__push(struct thread_stack *ts, u64 ret_addr) } } + ts->stack[ts->cnt].trace_end = trace_end; ts->stack[ts->cnt++].ret_addr = ret_addr; return err; @@ -150,6 +154,18 @@ static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr) } } +static void thread_stack__pop_trace_end(struct thread_stack *ts) +{ + size_t i; + + for (i = ts->cnt; i; ) { + if (ts->stack[--i].trace_end) + ts->cnt = i; + else + return; + } +} + static bool thread_stack__in_kernel(struct thread_stack *ts) { if (!ts->cnt) @@ -254,10 +270,19 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip, ret_addr = from_ip + insn_len; if (ret_addr == to_ip) return 0; /* Zero-length calls are excluded */ - return thread_stack__push(thread->ts, ret_addr); - } else if (flags & PERF_IP_FLAG_RETURN) { - if (!from_ip) - return 0; + return thread_stack__push(thread->ts, ret_addr, + flags & PERF_IP_FLAG_TRACE_END); + } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) { + /* + * If the caller did not change the trace number (which would + * have flushed the stack) then try to make sense of the stack. + * Possibly, tracing began after returning to the current + * address, so try to pop that. Also, do not expect a call made + * when the trace ended, to return, so pop that. + */ + thread_stack__pop(thread->ts, to_ip); + thread_stack__pop_trace_end(thread->ts); + } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) { thread_stack__pop(thread->ts, to_ip); } @@ -332,7 +357,7 @@ void call_return_processor__free(struct call_return_processor *crp) static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr, u64 timestamp, u64 ref, struct call_path *cp, - bool no_call) + bool no_call, bool trace_end) { struct thread_stack_entry *tse; int err; @@ -350,6 +375,7 @@ static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr, tse->branch_count = ts->branch_count; tse->cp = cp; tse->no_call = no_call; + tse->trace_end = trace_end; return 0; } @@ -423,7 +449,7 @@ static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts, return -ENOMEM; return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp, - true); + true, false); } static int thread_stack__no_call_return(struct thread *thread, @@ -455,7 +481,7 @@ static int thread_stack__no_call_return(struct thread *thread, if (!cp) return -ENOMEM; return thread_stack__push_cp(ts, 0, sample->time, ref, - cp, true); + cp, true, false); } } else if (thread_stack__in_kernel(ts) && sample->ip < ks) { /* Return to userspace, so pop all kernel addresses */ @@ -480,7 +506,7 @@ static int thread_stack__no_call_return(struct thread *thread, return -ENOMEM; err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp, - true); + true, false); if (err) return err; @@ -500,7 +526,7 @@ static int thread_stack__trace_begin(struct thread *thread, /* Pop trace end */ tse = &ts->stack[ts->cnt - 1]; - if (tse->cp->sym == NULL && tse->cp->ip == 0) { + if (tse->trace_end) { err = thread_stack__call_return(thread, ts, --ts->cnt, timestamp, ref, false); if (err) @@ -529,7 +555,7 @@ static int thread_stack__trace_end(struct thread_stack *ts, ret_addr = sample->ip + sample->insn_len; return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp, - false); + false, true); } int thread_stack__process(struct thread *thread, struct comm *comm, @@ -579,6 +605,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm, ts->last_time = sample->time; if (sample->flags & PERF_IP_FLAG_CALL) { + bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END; struct call_path_root *cpr = ts->crp->cpr; struct call_path *cp; u64 ret_addr; @@ -596,7 +623,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm, if (!cp) return -ENOMEM; err = thread_stack__push_cp(ts, ret_addr, sample->time, ref, - cp, false); + cp, false, trace_end); } else if (sample->flags & PERF_IP_FLAG_RETURN) { if (!sample->ip || !sample->addr) return 0; |