cputime: Consolidate vtime handling on context switch

The archs that implement virtual cputime accounting all
flush the cputime of a task when it gets descheduled
and sometimes set up some ground initialization for the
next task to account its cputime.

These archs all put their own hooks in their context
switch callbacks and handle the off-case themselves.

Consolidate this by creating a new account_switch_vtime()
callback called in generic code right after a context switch
and that these archs must implement to flush the prev task
cputime and initialize the next task cputime related state.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
This commit is contained in:
Frederic Weisbecker 2012-06-18 17:54:14 +02:00
parent 73fbec6044
commit baa36046d0
9 changed files with 17 additions and 23 deletions

View file

@ -30,13 +30,6 @@ extern struct task_struct *ia64_switch_to (void *next_task);
extern void ia64_save_extra (struct task_struct *task);
extern void ia64_load_extra (struct task_struct *task);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next);
# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n)
#else
# define IA64_ACCOUNT_ON_SWITCH(p,n)
#endif
#ifdef CONFIG_PERFMON
DECLARE_PER_CPU(unsigned long, pfm_syst_info);
# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
@ -49,7 +42,6 @@ extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct
|| PERFMON_IS_SYSWIDE())
#define __switch_to(prev,next,last) do { \
IA64_ACCOUNT_ON_SWITCH(prev, next); \
if (IA64_HAS_EXTRA_STATE(prev)) \
ia64_save_extra(prev); \
if (IA64_HAS_EXTRA_STATE(next)) \

View file

@ -88,10 +88,10 @@ extern cputime_t cycle_to_cputime(u64 cyc);
* accumulated times to the current process, and to prepare accounting on
* the next process.
*/
void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
void account_switch_vtime(struct task_struct *prev)
{
struct thread_info *pi = task_thread_info(prev);
struct thread_info *ni = task_thread_info(next);
struct thread_info *ni = task_thread_info(current);
cputime_t delta_stime, delta_utime;
__u64 now;

View file

@ -197,12 +197,6 @@ struct cpu_usage {
DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
#if defined(CONFIG_VIRT_CPU_ACCOUNTING)
#define account_process_vtime(tsk) account_process_tick(tsk, 0)
#else
#define account_process_vtime(tsk) do { } while (0)
#endif
extern void secondary_cpu_time_init(void);
DECLARE_PER_CPU(u64, decrementers_next_tb);

View file

@ -514,9 +514,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
local_irq_save(flags);
account_system_vtime(current);
account_process_vtime(current);
/*
* We can't take a PMU exception inside _switch() since there is a
* window where the kernel stack SLB and the kernel stack are out

View file

@ -366,6 +366,12 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
account_user_time(tsk, utime, utimescaled);
}
void account_switch_vtime(struct task_struct *prev)
{
account_system_vtime(prev);
account_process_tick(prev, 0);
}
#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
#define calc_cputime_factors()
#endif

View file

@ -89,12 +89,10 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \
} while (0)
extern void account_vtime(struct task_struct *, struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
#define finish_arch_switch(prev) do { \
set_fs(current->thread.mm_segment); \
account_vtime(prev, current); \
} while (0)
#endif /* __ASM_SWITCH_TO_H */

View file

@ -99,7 +99,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
return virt_timer_forward(user + system);
}
void account_vtime(struct task_struct *prev, struct task_struct *next)
void account_switch_vtime(struct task_struct *prev)
{
struct thread_info *ti;
@ -107,7 +107,7 @@ void account_vtime(struct task_struct *prev, struct task_struct *next)
ti = task_thread_info(prev);
ti->user_timer = S390_lowcore.user_timer;
ti->system_timer = S390_lowcore.system_timer;
ti = task_thread_info(next);
ti = task_thread_info(current);
S390_lowcore.user_timer = ti->user_timer;
S390_lowcore.system_timer = ti->system_timer;
}

View file

@ -130,4 +130,10 @@ extern void account_process_tick(struct task_struct *, int user);
extern void account_steal_ticks(unsigned long ticks);
extern void account_idle_ticks(unsigned long ticks);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void account_switch_vtime(struct task_struct *prev);
#else
static inline void account_switch_vtime(struct task_struct *prev) { }
#endif
#endif /* _LINUX_KERNEL_STAT_H */

View file

@ -1796,6 +1796,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
* Manfred Spraul <manfred@colorfullife.com>
*/
prev_state = prev->state;
account_switch_vtime(prev);
finish_arch_switch(prev);
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
local_irq_disable();