From cebde6d681aa45f96111cfcffc1544cf2a0454ff Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 5 Jan 2015 11:18:10 +0100 Subject: [PATCH] sched/core: Validate rq_clock*() serialization rq->clock{,_task} are serialized by rq->lock, verify this. One immediate fail is the usage in scale_rt_capability, so 'annotate' that for now, there's more 'funny' there. Maybe change rq->lock into a raw_seqlock_t? (Only 32-bit is affected) Signed-off-by: Peter Zijlstra (Intel) Link: http://lkml.kernel.org/r/20150105103554.361872747@infradead.org Cc: Linus Torvalds Cc: umgwanakikbuti@gmail.com Signed-off-by: Ingo Molnar --- kernel/sched/fair.c | 2 +- kernel/sched/sched.h | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 2a0b302e51de..50ff90289293 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5948,8 +5948,8 @@ static unsigned long scale_rt_capacity(int cpu) */ age_stamp = ACCESS_ONCE(rq->age_stamp); avg = ACCESS_ONCE(rq->rt_avg); + delta = __rq_clock_broken(rq) - age_stamp; - delta = rq_clock(rq) - age_stamp; if (unlikely(delta < 0)) delta = 0; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 9a2a45c970e7..bd2373273a9e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -687,13 +687,20 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); #define cpu_curr(cpu) (cpu_rq(cpu)->curr) #define raw_rq() raw_cpu_ptr(&runqueues) +static inline u64 __rq_clock_broken(struct rq *rq) +{ + return ACCESS_ONCE(rq->clock); +} + static inline u64 rq_clock(struct rq *rq) { + lockdep_assert_held(&rq->lock); return rq->clock; } static inline u64 rq_clock_task(struct rq *rq) { + lockdep_assert_held(&rq->lock); return rq->clock_task; }