sh: oprofile: Use perf-events oprofile backend

Now that we've got a generic perf-events based oprofile backend we might
as well make use of it seeing as SH doesn't do anything special with its
oprofile backend. Also introduce a new CONFIG_HW_PERF_EVENTS symbol so
that we can fallback to using the timer interrupt for oprofile if the
CPU doesn't support perf events.

Also, to avoid a section mismatch warning we need to annotate
oprofile_arch_exit() with an __exit marker.

Signed-off-by: Matt Fleming <matt@console-pimps.org>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Robert Richter <robert.richter@amd.com>
This commit is contained in:
Matt Fleming 2010-09-10 20:36:23 +01:00 committed by Robert Richter
parent 3d90a00763
commit 86c8c04792
4 changed files with 41 additions and 126 deletions

View file

@ -249,6 +249,11 @@ config ARCH_SHMOBILE
select PM
select PM_RUNTIME
config CPU_HAS_PMU
depends on CPU_SH4 || CPU_SH4A
default y
bool
if SUPERH32
choice
@ -738,6 +743,14 @@ config GUSA_RB
LLSC, this should be more efficient than the other alternative of
disabling interrupts around the atomic sequence.
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS && CPU_HAS_PMU
default y
help
Enable hardware performance counter support for perf events. If
disabled, perf events will use software events only.
source "drivers/sh/Kconfig"
endmenu

View file

@ -6,4 +6,8 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \
timer_int.o )
ifeq ($(CONFIG_HW_PERF_EVENTS),y)
DRIVER_OBJS += $(addprefix ../../../drivers/oprofile/, oprofile_perf.o)
endif
oprofile-y := $(DRIVER_OBJS) common.o backtrace.o

View file

@ -17,114 +17,45 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <asm/processor.h>
#include "op_impl.h"
static struct op_sh_model *model;
static struct op_counter_config ctr[20];
#ifdef CONFIG_HW_PERF_EVENTS
extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
static int op_sh_setup(void)
char *op_name_from_perf_id(void)
{
/* Pre-compute the values to stuff in the hardware registers. */
model->reg_setup(ctr);
const char *pmu;
char buf[20];
int size;
/* Configure the registers on all cpus. */
on_each_cpu(model->cpu_setup, NULL, 1);
pmu = perf_pmu_name();
if (!pmu)
return NULL;
return 0;
}
size = snprintf(buf, sizeof(buf), "sh/%s", pmu);
if (size > -1 && size < sizeof(buf))
return buf;
static int op_sh_create_files(struct super_block *sb, struct dentry *root)
{
int i, ret = 0;
for (i = 0; i < model->num_counters; i++) {
struct dentry *dir;
char buf[4];
snprintf(buf, sizeof(buf), "%d", i);
dir = oprofilefs_mkdir(sb, root, buf);
ret |= oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
ret |= oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
ret |= oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
ret |= oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
if (model->create_files)
ret |= model->create_files(sb, dir);
else
ret |= oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
/* Dummy entries */
ret |= oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
}
return ret;
}
static int op_sh_start(void)
{
/* Enable performance monitoring for all counters. */
on_each_cpu(model->cpu_start, NULL, 1);
return 0;
}
static void op_sh_stop(void)
{
/* Disable performance monitoring for all counters. */
on_each_cpu(model->cpu_stop, NULL, 1);
return NULL;
}
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
struct op_sh_model *lmodel = NULL;
int ret;
/*
* Always assign the backtrace op. If the counter initialization
* fails, we fall back to the timer which will still make use of
* this.
*/
ops->backtrace = sh_backtrace;
/*
* XXX
*
* All of the SH7750/SH-4A counters have been converted to perf,
* this infrastructure hook is left for other users until they've
* had a chance to convert over, at which point all of this
* will be deleted.
*/
if (!lmodel)
return -ENODEV;
if (!(current_cpu_data.flags & CPU_HAS_PERF_COUNTER))
return -ENODEV;
ret = lmodel->init();
if (unlikely(ret != 0))
return ret;
model = lmodel;
ops->setup = op_sh_setup;
ops->create_files = op_sh_create_files;
ops->start = op_sh_start;
ops->stop = op_sh_stop;
ops->cpu_type = lmodel->cpu_type;
printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
lmodel->cpu_type);
return 0;
return oprofile_perf_init(ops);
}
void oprofile_arch_exit(void)
void __exit oprofile_arch_exit(void)
{
if (model && model->exit)
model->exit();
oprofile_perf_exit();
}
#else
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
pr_info("oprofile: hardware counters not available\n");
return -ENODEV;
}
void __exit oprofile_arch_exit(void) {}
#endif /* CONFIG_HW_PERF_EVENTS */

View file

@ -1,33 +0,0 @@
#ifndef __OP_IMPL_H
#define __OP_IMPL_H
/* Per-counter configuration as set via oprofilefs. */
struct op_counter_config {
unsigned long enabled;
unsigned long event;
unsigned long count;
/* Dummy values for userspace tool compliance */
unsigned long kernel;
unsigned long user;
unsigned long unit_mask;
};
/* Per-architecture configury and hooks. */
struct op_sh_model {
void (*reg_setup)(struct op_counter_config *);
int (*create_files)(struct super_block *sb, struct dentry *dir);
void (*cpu_setup)(void *dummy);
int (*init)(void);
void (*exit)(void);
void (*cpu_start)(void *args);
void (*cpu_stop)(void *args);
char *cpu_type;
unsigned char num_counters;
};
/* arch/sh/oprofile/common.c */
extern void sh_backtrace(struct pt_regs * const regs, unsigned int depth);
#endif /* __OP_IMPL_H */