perf/x86: Change x86_pmu::limit_period signature

In preparation for making it a static_call, change the signature.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20220829101321.573713839@infradead.org
This commit is contained in:
Peter Zijlstra 2022-05-10 21:28:25 +02:00
parent e577bb17a1
commit 28f0f3c44b
4 changed files with 20 additions and 22 deletions

View file

@ -1224,16 +1224,14 @@ static ssize_t amd_event_sysfs_show(char *page, u64 config)
return x86_event_sysfs_show(page, config, event); return x86_event_sysfs_show(page, config, event);
} }
static u64 amd_pmu_limit_period(struct perf_event *event, u64 left) static void amd_pmu_limit_period(struct perf_event *event, s64 *left)
{ {
/* /*
* Decrease period by the depth of the BRS feature to get the last N * Decrease period by the depth of the BRS feature to get the last N
* taken branches and approximate the desired period * taken branches and approximate the desired period
*/ */
if (has_branch_stack(event) && left > x86_pmu.lbr_nr) if (has_branch_stack(event) && *left > x86_pmu.lbr_nr)
left -= x86_pmu.lbr_nr; *left -= x86_pmu.lbr_nr;
return left;
} }
static __initconst const struct x86_pmu amd_pmu = { static __initconst const struct x86_pmu amd_pmu = {

View file

@ -621,8 +621,9 @@ int x86_pmu_hw_config(struct perf_event *event)
event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
if (event->attr.sample_period && x86_pmu.limit_period) { if (event->attr.sample_period && x86_pmu.limit_period) {
if (x86_pmu.limit_period(event, event->attr.sample_period) > s64 left = event->attr.sample_period;
event->attr.sample_period) x86_pmu.limit_period(event, &left);
if (left > event->attr.sample_period)
return -EINVAL; return -EINVAL;
} }
@ -1396,9 +1397,9 @@ int x86_perf_event_set_period(struct perf_event *event)
left = x86_pmu.max_period; left = x86_pmu.max_period;
if (x86_pmu.limit_period) if (x86_pmu.limit_period)
left = x86_pmu.limit_period(event, left); x86_pmu.limit_period(event, &left);
per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; this_cpu_write(pmc_prev_left[idx], left);
/* /*
* The hw event starts counting from this event offset, * The hw event starts counting from this event offset,
@ -2677,7 +2678,9 @@ static int x86_pmu_check_period(struct perf_event *event, u64 value)
return -EINVAL; return -EINVAL;
if (value && x86_pmu.limit_period) { if (value && x86_pmu.limit_period) {
if (x86_pmu.limit_period(event, value) > value) s64 left = value;
x86_pmu.limit_period(event, &left);
if (left > value)
return -EINVAL; return -EINVAL;
} }

View file

@ -4344,28 +4344,25 @@ static u8 adl_get_hybrid_cpu_type(void)
* Therefore the effective (average) period matches the requested period, * Therefore the effective (average) period matches the requested period,
* despite coarser hardware granularity. * despite coarser hardware granularity.
*/ */
static u64 bdw_limit_period(struct perf_event *event, u64 left) static void bdw_limit_period(struct perf_event *event, s64 *left)
{ {
if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
X86_CONFIG(.event=0xc0, .umask=0x01)) { X86_CONFIG(.event=0xc0, .umask=0x01)) {
if (left < 128) if (*left < 128)
left = 128; *left = 128;
left &= ~0x3fULL; *left &= ~0x3fULL;
} }
return left;
} }
static u64 nhm_limit_period(struct perf_event *event, u64 left) static void nhm_limit_period(struct perf_event *event, s64 *left)
{ {
return max(left, 32ULL); *left = max(*left, 32LL);
} }
static u64 spr_limit_period(struct perf_event *event, u64 left) static void spr_limit_period(struct perf_event *event, s64 *left)
{ {
if (event->attr.precise_ip == 3) if (event->attr.precise_ip == 3)
return max(left, 128ULL); *left = max(*left, 128LL);
return left;
} }
PMU_FORMAT_ATTR(event, "config:0-7" ); PMU_FORMAT_ATTR(event, "config:0-7" );

View file

@ -781,7 +781,7 @@ struct x86_pmu {
struct event_constraint *event_constraints; struct event_constraint *event_constraints;
struct x86_pmu_quirk *quirks; struct x86_pmu_quirk *quirks;
int perfctr_second_write; int perfctr_second_write;
u64 (*limit_period)(struct perf_event *event, u64 l); void (*limit_period)(struct perf_event *event, s64 *l);
/* PMI handler bits */ /* PMI handler bits */
unsigned int late_ack :1, unsigned int late_ack :1,