linux-stable/include/linux/sched/clock.h
Ingo Molnar 5689810360 sched/headers: Move scheduler clock interfaces to <linux/sched/clock.h>
Move the sched_clock interfaces into a separate header file, to reduce
the size of sched.h.

Include <linux/sched/clock.h> in all files that made use of one of the
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-03-02 08:42:42 +01:00

104 lines
2.5 KiB
C

#ifndef _LINUX_SCHED_CLOCK_H
#define _LINUX_SCHED_CLOCK_H
#include <linux/sched.h>
/*
* Do not use outside of architecture code which knows its limitations.
*
* sched_clock() has no promise of monotonicity or bounded drift between
* CPUs, use (which you should not) requires disabling IRQs.
*
* Please use one of the three interfaces below.
*/
extern unsigned long long notrace sched_clock(void);
/*
* See the comment in kernel/sched/clock.c
*/
extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_init_late(void)
{
}
static inline void sched_clock_tick(void)
{
}
static inline void clear_sched_clock_stable(void)
{
}
static inline void sched_clock_idle_sleep_event(void)
{
}
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
static inline u64 cpu_clock(int cpu)
{
return sched_clock();
}
static inline u64 local_clock(void)
{
return sched_clock();
}
#else
extern void sched_clock_init_late(void);
/*
* Architectures can set this to 1 if they have specified
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
* but then during bootup it turns out that sched_clock()
* is reliable after all:
*/
extern int sched_clock_stable(void);
extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
/*
* As outlined in clock.c, provides a fast, high resolution, nanosecond
* time source that is monotonic per cpu argument and has bounded drift
* between cpus.
*
* ######################### BIG FAT WARNING ##########################
* # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
* # go backwards !! #
* ####################################################################
*/
static inline u64 cpu_clock(int cpu)
{
return sched_clock_cpu(cpu);
}
static inline u64 local_clock(void)
{
return sched_clock_cpu(raw_smp_processor_id());
}
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* An i/f to runtime opt-in for irq time accounting based off of sched_clock.
* The reason for this explicit opt-in is not to have perf penalty with
* slow sched_clocks.
*/
extern void enable_sched_clock_irqtime(void);
extern void disable_sched_clock_irqtime(void);
#else
static inline void enable_sched_clock_irqtime(void) {}
static inline void disable_sched_clock_irqtime(void) {}
#endif
#endif /* _LINUX_SCHED_CLOCK_H */