linux-stable/arch/powerpc/kernel/perf_callchain.c
Ingo Molnar cdd6c482c9 perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!

In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.

Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.

All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)

The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.

Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.

User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)

This patch has been generated via the following script:

  FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')

  sed -i \
    -e 's/PERF_EVENT_/PERF_RECORD_/g' \
    -e 's/PERF_COUNTER/PERF_EVENT/g' \
    -e 's/perf_counter/perf_event/g' \
    -e 's/nb_counters/nb_events/g' \
    -e 's/swcounter/swevent/g' \
    -e 's/tpcounter_event/tp_event/g' \
    $FILES

  for N in $(find . -name perf_counter.[ch]); do
    M=$(echo $N | sed 's/perf_counter/perf_event/g')
    mv $N $M
  done

  FILES=$(find . -name perf_event.*)

  sed -i \
    -e 's/COUNTER_MASK/REG_MASK/g' \
    -e 's/COUNTER/EVENT/g' \
    -e 's/\<event\>/event_id/g' \
    -e 's/counter/event/g' \
    -e 's/Counter/Event/g' \
    $FILES

... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.

Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.

( NOTE: 'counters' are still the proper terminology when we deal
  with hardware registers - and these sed scripts are a bit
  over-eager in renaming them. I've undone some of that, but
  in case there's something left where 'counter' would be
  better than 'event' we can undo that on an individual basis
  instead of touching an otherwise nicely automated patch. )

Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 14:28:04 +02:00

527 lines
13 KiB
C

/*
* Performance counter callchain support - powerpc architecture code
*
* Copyright © 2009 Paul Mackerras, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
#ifdef CONFIG_PPC64
#include "ppc32.h"
#endif
/*
* Store another value in a callchain_entry.
*/
static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
{
unsigned int nr = entry->nr;
if (nr < PERF_MAX_STACK_DEPTH) {
entry->ip[nr] = ip;
entry->nr = nr + 1;
}
}
/*
* Is sp valid as the address of the next kernel stack frame after prev_sp?
* The next frame may be in a different stack area but should not go
* back down in the same stack area.
*/
static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
{
if (sp & 0xf)
return 0; /* must be 16-byte aligned */
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return 0;
if (sp >= prev_sp + STACK_FRAME_OVERHEAD)
return 1;
/*
* sp could decrease when we jump off an interrupt stack
* back to the regular process stack.
*/
if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
return 1;
return 0;
}
static void perf_callchain_kernel(struct pt_regs *regs,
struct perf_callchain_entry *entry)
{
unsigned long sp, next_sp;
unsigned long next_ip;
unsigned long lr;
long level = 0;
unsigned long *fp;
lr = regs->link;
sp = regs->gpr[1];
callchain_store(entry, PERF_CONTEXT_KERNEL);
callchain_store(entry, regs->nip);
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return;
for (;;) {
fp = (unsigned long *) sp;
next_sp = fp[0];
if (next_sp == sp + STACK_INT_FRAME_SIZE &&
fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
/*
* This looks like an interrupt frame for an
* interrupt that occurred in the kernel
*/
regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
next_ip = regs->nip;
lr = regs->link;
level = 0;
callchain_store(entry, PERF_CONTEXT_KERNEL);
} else {
if (level == 0)
next_ip = lr;
else
next_ip = fp[STACK_FRAME_LR_SAVE];
/*
* We can't tell which of the first two addresses
* we get are valid, but we can filter out the
* obviously bogus ones here. We replace them
* with 0 rather than removing them entirely so
* that userspace can tell which is which.
*/
if ((level == 1 && next_ip == lr) ||
(level <= 1 && !kernel_text_address(next_ip)))
next_ip = 0;
++level;
}
callchain_store(entry, next_ip);
if (!valid_next_sp(next_sp, sp))
return;
sp = next_sp;
}
}
#ifdef CONFIG_PPC64
#ifdef CONFIG_HUGETLB_PAGE
#define is_huge_psize(pagesize) (HPAGE_SHIFT && mmu_huge_psizes[pagesize])
#else
#define is_huge_psize(pagesize) 0
#endif
/*
* On 64-bit we don't want to invoke hash_page on user addresses from
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly.
*/
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
{
pgd_t *pgdir;
pte_t *ptep, pte;
int pagesize;
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
unsigned long pfn;
void *kaddr;
pgdir = current->mm->pgd;
if (!pgdir)
return -EFAULT;
pagesize = get_slice_psize(current->mm, addr);
/* align address to page boundary */
offset = addr & ((1ul << mmu_psize_defs[pagesize].shift) - 1);
addr -= offset;
if (is_huge_psize(pagesize))
ptep = huge_pte_offset(current->mm, addr);
else
ptep = find_linux_pte(pgdir, addr);
if (ptep == NULL)
return -EFAULT;
pte = *ptep;
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
return -EFAULT;
pfn = pte_pfn(pte);
if (!page_is_ram(pfn))
return -EFAULT;
/* no highmem to worry about here */
kaddr = pfn_to_kaddr(pfn);
memcpy(ret, kaddr + offset, nb);
return 0;
}
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
{
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
((unsigned long)ptr & 7))
return -EFAULT;
if (!__get_user_inatomic(*ret, ptr))
return 0;
return read_user_stack_slow(ptr, ret, 8);
}
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
{
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
((unsigned long)ptr & 3))
return -EFAULT;
if (!__get_user_inatomic(*ret, ptr))
return 0;
return read_user_stack_slow(ptr, ret, 4);
}
static inline int valid_user_sp(unsigned long sp, int is_64)
{
if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
return 0;
return 1;
}
/*
* 64-bit user processes use the same stack frame for RT and non-RT signals.
*/
struct signal_frame_64 {
char dummy[__SIGNAL_FRAMESIZE];
struct ucontext uc;
unsigned long unused[2];
unsigned int tramp[6];
struct siginfo *pinfo;
void *puc;
struct siginfo info;
char abigap[288];
};
static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
{
if (nip == fp + offsetof(struct signal_frame_64, tramp))
return 1;
if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
return 1;
return 0;
}
/*
* Do some sanity checking on the signal frame pointed to by sp.
* We check the pinfo and puc pointers in the frame.
*/
static int sane_signal_64_frame(unsigned long sp)
{
struct signal_frame_64 __user *sf;
unsigned long pinfo, puc;
sf = (struct signal_frame_64 __user *) sp;
if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
return 0;
return pinfo == (unsigned long) &sf->info &&
puc == (unsigned long) &sf->uc;
}
static void perf_callchain_user_64(struct pt_regs *regs,
struct perf_callchain_entry *entry)
{
unsigned long sp, next_sp;
unsigned long next_ip;
unsigned long lr;
long level = 0;
struct signal_frame_64 __user *sigframe;
unsigned long __user *fp, *uregs;
next_ip = regs->nip;
lr = regs->link;
sp = regs->gpr[1];
callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, next_ip);
for (;;) {
fp = (unsigned long __user *) sp;
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
return;
if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
return;
/*
* Note: the next_sp - sp >= signal frame size check
* is true when next_sp < sp, which can happen when
* transitioning from an alternate signal stack to the
* normal stack.
*/
if (next_sp - sp >= sizeof(struct signal_frame_64) &&
(is_sigreturn_64_address(next_ip, sp) ||
(level <= 1 && is_sigreturn_64_address(lr, sp))) &&
sane_signal_64_frame(sp)) {
/*
* This looks like an signal frame
*/
sigframe = (struct signal_frame_64 __user *) sp;
uregs = sigframe->uc.uc_mcontext.gp_regs;
if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
read_user_stack_64(&uregs[PT_LNK], &lr) ||
read_user_stack_64(&uregs[PT_R1], &sp))
return;
level = 0;
callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
}
static inline int current_is_64bit(void)
{
/*
* We can't use test_thread_flag() here because we may be on an
* interrupt stack, and the thread flags don't get copied over
* from the thread_info on the main stack to the interrupt stack.
*/
return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
}
#else /* CONFIG_PPC64 */
/*
* On 32-bit we just access the address and let hash_page create a
* HPTE if necessary, so there is no need to fall back to reading
* the page tables. Since this is called at interrupt level,
* do_page_fault() won't treat a DSI as a page fault.
*/
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
{
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
((unsigned long)ptr & 3))
return -EFAULT;
return __get_user_inatomic(*ret, ptr);
}
static inline void perf_callchain_user_64(struct pt_regs *regs,
struct perf_callchain_entry *entry)
{
}
static inline int current_is_64bit(void)
{
return 0;
}
static inline int valid_user_sp(unsigned long sp, int is_64)
{
if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
return 0;
return 1;
}
#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
#define sigcontext32 sigcontext
#define mcontext32 mcontext
#define ucontext32 ucontext
#define compat_siginfo_t struct siginfo
#endif /* CONFIG_PPC64 */
/*
* Layout for non-RT signal frames
*/
struct signal_frame_32 {
char dummy[__SIGNAL_FRAMESIZE32];
struct sigcontext32 sctx;
struct mcontext32 mctx;
int abigap[56];
};
/*
* Layout for RT signal frames
*/
struct rt_signal_frame_32 {
char dummy[__SIGNAL_FRAMESIZE32 + 16];
compat_siginfo_t info;
struct ucontext32 uc;
int abigap[56];
};
static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
{
if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
return 1;
if (vdso32_sigtramp && current->mm->context.vdso_base &&
nip == current->mm->context.vdso_base + vdso32_sigtramp)
return 1;
return 0;
}
static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
{
if (nip == fp + offsetof(struct rt_signal_frame_32,
uc.uc_mcontext.mc_pad))
return 1;
if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
return 1;
return 0;
}
static int sane_signal_32_frame(unsigned int sp)
{
struct signal_frame_32 __user *sf;
unsigned int regs;
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, &regs))
return 0;
return regs == (unsigned long) &sf->mctx;
}
static int sane_rt_signal_32_frame(unsigned int sp)
{
struct rt_signal_frame_32 __user *sf;
unsigned int regs;
sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, &regs))
return 0;
return regs == (unsigned long) &sf->uc.uc_mcontext;
}
static unsigned int __user *signal_frame_32_regs(unsigned int sp,
unsigned int next_sp, unsigned int next_ip)
{
struct mcontext32 __user *mctx = NULL;
struct signal_frame_32 __user *sf;
struct rt_signal_frame_32 __user *rt_sf;
/*
* Note: the next_sp - sp >= signal frame size check
* is true when next_sp < sp, for example, when
* transitioning from an alternate signal stack to the
* normal stack.
*/
if (next_sp - sp >= sizeof(struct signal_frame_32) &&
is_sigreturn_32_address(next_ip, sp) &&
sane_signal_32_frame(sp)) {
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
mctx = &sf->mctx;
}
if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
is_rt_sigreturn_32_address(next_ip, sp) &&
sane_rt_signal_32_frame(sp)) {
rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
mctx = &rt_sf->uc.uc_mcontext;
}
if (!mctx)
return NULL;
return mctx->mc_gregs;
}
static void perf_callchain_user_32(struct pt_regs *regs,
struct perf_callchain_entry *entry)
{
unsigned int sp, next_sp;
unsigned int next_ip;
unsigned int lr;
long level = 0;
unsigned int __user *fp, *uregs;
next_ip = regs->nip;
lr = regs->link;
sp = regs->gpr[1];
callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, next_ip);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned int __user *) (unsigned long) sp;
if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
return;
if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
return;
uregs = signal_frame_32_regs(sp, next_sp, next_ip);
if (!uregs && level <= 1)
uregs = signal_frame_32_regs(sp, next_sp, lr);
if (uregs) {
/*
* This looks like an signal frame, so restart
* the stack trace with the values in it.
*/
if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
read_user_stack_32(&uregs[PT_LNK], &lr) ||
read_user_stack_32(&uregs[PT_R1], &sp))
return;
level = 0;
callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
}
/*
* Since we can't get PMU interrupts inside a PMU interrupt handler,
* we don't need separate irq and nmi entries here.
*/
static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
{
struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
entry->nr = 0;
if (current->pid == 0) /* idle task? */
return entry;
if (!user_mode(regs)) {
perf_callchain_kernel(regs, entry);
if (current->mm)
regs = task_pt_regs(current);
else
regs = NULL;
}
if (regs) {
if (current_is_64bit())
perf_callchain_user_64(regs, entry);
else
perf_callchain_user_32(regs, entry);
}
return entry;
}