From eec399dd862762b9594df3659f15839a4e12f17a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 7 Feb 2020 13:38:54 +0100 Subject: [PATCH] x86/vdso: Move VDSO clocksource state tracking to callback All architectures which use the generic VDSO code have their own storage for the VDSO clock mode. That's pointless and just requires duplicate code. X86 abuses the function which retrieves the architecture specific clock mode storage to mark the clocksource as used in the VDSO. That's silly because this is invoked on every tick when the VDSO data is updated. Move this functionality to the clocksource::enable() callback so it gets invoked once when the clocksource is installed. This allows to make the clock mode storage generic. Signed-off-by: Thomas Gleixner Reviewed-by: Michael Kelley (Hyper-V parts) Reviewed-by: Vincenzo Frascino (VDSO parts) Acked-by: Juergen Gross (Xen parts) Link: https://lkml.kernel.org/r/20200207124402.934519777@linutronix.de --- arch/x86/entry/vdso/vma.c | 4 ++++ arch/x86/include/asm/clocksource.h | 12 +++++++++++ arch/x86/include/asm/mshyperv.h | 2 ++ arch/x86/include/asm/vdso/vsyscall.h | 10 +-------- arch/x86/include/asm/vgtod.h | 6 ------ arch/x86/kernel/kvmclock.c | 7 ++++++ arch/x86/kernel/tsc.c | 32 +++++++++++++++++----------- arch/x86/xen/time.c | 17 ++++++++++----- drivers/clocksource/hyperv_timer.c | 7 ++++++ 9 files changed, 65 insertions(+), 32 deletions(-) diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index c1b8496b5606..cce3e809f17e 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -38,6 +38,8 @@ struct vdso_data *arch_get_vdso_data(void *vvar_page) } #undef EMIT_VVAR +unsigned int vclocks_used __read_mostly; + #if defined(CONFIG_X86_64) unsigned int __read_mostly vdso64_enabled = 1; #endif @@ -445,6 +447,8 @@ __setup("vdso=", vdso_setup); static int __init init_vdso(void) { + BUILD_BUG_ON(VCLOCK_MAX >= 32); + init_vdso_image(&vdso_image_64); #ifdef CONFIG_X86_X32_ABI diff --git a/arch/x86/include/asm/clocksource.h b/arch/x86/include/asm/clocksource.h index dc4cfc888d6d..2450d6e02a5d 100644 --- a/arch/x86/include/asm/clocksource.h +++ b/arch/x86/include/asm/clocksource.h @@ -14,4 +14,16 @@ struct arch_clocksource_data { int vclock_mode; }; +extern unsigned int vclocks_used; + +static inline bool vclock_was_used(int vclock) +{ + return READ_ONCE(vclocks_used) & (1U << vclock); +} + +static inline void vclocks_set_used(unsigned int which) +{ + WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << which)); +} + #endif /* _ASM_X86_CLOCKSOURCE_H */ diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 6b79515abb82..f7cbc01f128e 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -47,6 +47,8 @@ typedef int (*hyperv_fill_flush_list_func)( wrmsrl(HV_X64_MSR_REFERENCE_TSC, val) #define hv_set_clocksource_vdso(val) \ ((val).archdata.vclock_mode = VCLOCK_HVCLOCK) +#define hv_enable_vdso_clocksource() \ + vclocks_set_used(VCLOCK_HVCLOCK); #define hv_get_raw_timer() rdtsc_ordered() void hyperv_callback_vector(void); diff --git a/arch/x86/include/asm/vdso/vsyscall.h b/arch/x86/include/asm/vdso/vsyscall.h index 0026ab2123ce..01f5733d14d6 100644 --- a/arch/x86/include/asm/vdso/vsyscall.h +++ b/arch/x86/include/asm/vdso/vsyscall.h @@ -10,8 +10,6 @@ #include #include -int vclocks_used __read_mostly; - DEFINE_VVAR(struct vdso_data, _vdso_data); /* * Update the vDSO data page to keep in sync with kernel timekeeping. @@ -26,13 +24,7 @@ struct vdso_data *__x86_get_k_vdso_data(void) static __always_inline int __x86_get_clock_mode(struct timekeeper *tk) { - int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode; - - /* Mark the new vclock used. */ - BUILD_BUG_ON(VCLOCK_MAX >= 32); - WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << vclock_mode)); - - return vclock_mode; + return tk->tkr_mono.clock->archdata.vclock_mode; } #define __arch_get_clock_mode __x86_get_clock_mode diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index a2638c6124ed..fc8e4cd342cc 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -15,10 +15,4 @@ typedef u64 gtod_long_t; typedef unsigned long gtod_long_t; #endif -extern int vclocks_used; -static inline bool vclock_was_used(int vclock) -{ - return READ_ONCE(vclocks_used) & (1 << vclock); -} - #endif /* _ASM_X86_VGTOD_H */ diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 904494b924c1..33f2cac25f13 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -159,12 +159,19 @@ bool kvm_check_and_clear_guest_paused(void) return ret; } +static int kvm_cs_enable(struct clocksource *cs) +{ + vclocks_set_used(VCLOCK_PVCLOCK); + return 0; +} + struct clocksource kvm_clock = { .name = "kvm-clock", .read = kvm_clock_get_cycles, .rating = 400, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .enable = kvm_cs_enable, }; EXPORT_SYMBOL_GPL(kvm_clock); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 7e322e2daaf5..742da141a30a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1108,17 +1108,24 @@ static void tsc_cs_tick_stable(struct clocksource *cs) sched_clock_tick_stable(); } +static int tsc_cs_enable(struct clocksource *cs) +{ + vclocks_set_used(VCLOCK_TSC); + return 0; +} + /* * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() */ static struct clocksource clocksource_tsc_early = { - .name = "tsc-early", - .rating = 299, - .read = read_tsc, - .mask = CLOCKSOURCE_MASK(64), - .flags = CLOCK_SOURCE_IS_CONTINUOUS | + .name = "tsc-early", + .rating = 299, + .read = read_tsc, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_MUST_VERIFY, - .archdata = { .vclock_mode = VCLOCK_TSC }, + .archdata = { .vclock_mode = VCLOCK_TSC }, + .enable = tsc_cs_enable, .resume = tsc_resume, .mark_unstable = tsc_cs_mark_unstable, .tick_stable = tsc_cs_tick_stable, @@ -1131,14 +1138,15 @@ static struct clocksource clocksource_tsc_early = { * been found good. */ static struct clocksource clocksource_tsc = { - .name = "tsc", - .rating = 300, - .read = read_tsc, - .mask = CLOCKSOURCE_MASK(64), - .flags = CLOCK_SOURCE_IS_CONTINUOUS | + .name = "tsc", + .rating = 300, + .read = read_tsc, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_MUST_VERIFY, - .archdata = { .vclock_mode = VCLOCK_TSC }, + .archdata = { .vclock_mode = VCLOCK_TSC }, + .enable = tsc_cs_enable, .resume = tsc_resume, .mark_unstable = tsc_cs_mark_unstable, .tick_stable = tsc_cs_tick_stable, diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index befbdd8b17f0..5d1568ff19ea 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -145,12 +145,19 @@ static struct notifier_block xen_pvclock_gtod_notifier = { .notifier_call = xen_pvclock_gtod_notify, }; +static int xen_cs_enable(struct clocksource *cs) +{ + vclocks_set_used(VCLOCK_PVCLOCK); + return 0; +} + static struct clocksource xen_clocksource __read_mostly = { - .name = "xen", - .rating = 400, - .read = xen_clocksource_get_cycles, - .mask = ~0, - .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .name = "xen", + .rating = 400, + .read = xen_clocksource_get_cycles, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .enable = xen_cs_enable, }; /* diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index 9d808d595ca8..a86859ccc61b 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -369,6 +369,12 @@ static void resume_hv_clock_tsc(struct clocksource *arg) hv_set_reference_tsc(tsc_msr); } +static int hv_cs_enable(struct clocksource *cs) +{ + hv_enable_vdso_clocksource(); + return 0; +} + static struct clocksource hyperv_cs_tsc = { .name = "hyperv_clocksource_tsc_page", .rating = 250, @@ -377,6 +383,7 @@ static struct clocksource hyperv_cs_tsc = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, .suspend= suspend_hv_clock_tsc, .resume = resume_hv_clock_tsc, + .enable = hv_cs_enable, }; static u64 notrace read_hv_clock_msr(void)