Currently whether generic vtime is running or not, the idle cputime is fetched from the nohz accounting.
However generic vtime already does its own idle cputime accounting. Only the kernel stat accessors are not plugged to support it. Read the idle generic vtime cputime when it's running, this will allow to later more clearly split nohz and vtime cputime accounting. Signed-off-by: Frederic Weisbecker <[email protected]> --- include/linux/vtime.h | 9 +++++++-- kernel/sched/cputime.c | 38 +++++++++++++++++++++++++++++--------- kernel/time/tick-sched.c | 12 +++++++++--- 3 files changed, 45 insertions(+), 14 deletions(-) diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 29dd5b91dd7d..336875bea767 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -10,7 +10,6 @@ */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING extern void vtime_account_kernel(struct task_struct *tsk); -extern void vtime_account_idle(struct task_struct *tsk); #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN @@ -27,7 +26,13 @@ static inline void vtime_guest_exit(struct task_struct *tsk) { } static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } #endif +static inline bool vtime_generic_enabled_cpu(int cpu) +{ + return context_tracking_enabled_cpu(cpu); +} + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE +extern void vtime_account_idle(struct task_struct *tsk); extern void vtime_account_irq(struct task_struct *tsk, unsigned int offset); extern void vtime_account_softirq(struct task_struct *tsk); extern void vtime_account_hardirq(struct task_struct *tsk); @@ -74,7 +79,7 @@ static inline bool vtime_accounting_enabled(void) static inline bool vtime_accounting_enabled_cpu(int cpu) { - return context_tracking_enabled_cpu(cpu); + return vtime_generic_enabled_cpu(cpu); } static inline bool vtime_accounting_enabled_this_cpu(void) diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 5dcb0f2e01bc..5613838d0307 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -759,9 +759,9 @@ void vtime_guest_exit(struct task_struct *tsk) } EXPORT_SYMBOL_GPL(vtime_guest_exit); -void vtime_account_idle(struct task_struct *tsk) +static void __vtime_account_idle(struct vtime *vtime) { - account_idle_time(get_vtime_delta(&tsk->vtime)); + account_idle_time(get_vtime_delta(vtime)); } void vtime_task_switch_generic(struct task_struct *prev) @@ -770,7 +770,7 @@ void vtime_task_switch_generic(struct task_struct *prev) write_seqcount_begin(&vtime->seqcount); if (vtime->state == VTIME_IDLE) - vtime_account_idle(prev); + __vtime_account_idle(vtime); else __vtime_account_kernel(prev, vtime); vtime->state = VTIME_INACTIVE; @@ -912,6 +912,7 @@ static int kcpustat_field_vtime(u64 *cpustat, int cpu, u64 *val) { struct vtime *vtime = &tsk->vtime; + struct rq *rq = cpu_rq(cpu); unsigned int seq; do { @@ -953,6 +954,14 @@ static int kcpustat_field_vtime(u64 *cpustat, if (state == VTIME_GUEST && task_nice(tsk) > 0) *val += vtime->gtime + vtime_delta(vtime); break; + case CPUTIME_IDLE: + if (state == VTIME_IDLE && !atomic_read(&rq->nr_iowait)) + *val += vtime_delta(vtime); + break; + case CPUTIME_IOWAIT: + if (state == VTIME_IDLE && atomic_read(&rq->nr_iowait) > 0) + *val += vtime_delta(vtime); + break; default: break; } @@ -1015,8 +1024,8 @@ static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst, *dst = *src; cpustat = dst->cpustat; - /* Task is sleeping, dead or idle, nothing to add */ - if (state < VTIME_SYS) + /* Task is sleeping or dead, nothing to add */ + if (state < VTIME_IDLE) continue; delta = vtime_delta(vtime); @@ -1025,15 +1034,17 @@ static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst, * Task runs either in user (including guest) or kernel space, * add pending nohz time to the right place. */ - if (state == VTIME_SYS) { + switch (vtime->state) { + case VTIME_SYS: cpustat[CPUTIME_SYSTEM] += vtime->stime + delta; - } else if (state == VTIME_USER) { + break; + case VTIME_USER: if (task_nice(tsk) > 0) cpustat[CPUTIME_NICE] += vtime->utime + delta; else cpustat[CPUTIME_USER] += vtime->utime + delta; - } else { - WARN_ON_ONCE(state != VTIME_GUEST); + break; + case VTIME_GUEST: if (task_nice(tsk) > 0) { cpustat[CPUTIME_GUEST_NICE] += vtime->gtime + delta; cpustat[CPUTIME_NICE] += vtime->gtime + delta; @@ -1041,6 +1052,15 @@ static int kcpustat_cpu_fetch_vtime(struct kernel_cpustat *dst, cpustat[CPUTIME_GUEST] += vtime->gtime + delta; cpustat[CPUTIME_USER] += vtime->gtime + delta; } + break; + case VTIME_IDLE: + if (atomic_read(&cpu_rq(cpu)->nr_iowait) > 0) + cpustat[CPUTIME_IOWAIT] += delta; + else + cpustat[CPUTIME_IDLE] += delta; + break; + default: + WARN_ON_ONCE(1); } } while (read_seqcount_retry(&vtime->seqcount, seq)); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 8ddf74e705d3..9632066aea4d 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -774,9 +774,10 @@ static void tick_nohz_start_idle(struct tick_sched *ts) sched_clock_idle_sleep_event(); } -static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime, +static u64 get_cpu_sleep_time_us(int cpu, enum cpu_usage_stat idx, ktime_t *sleeptime, bool compute_delta, u64 *last_update_time) { + struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); ktime_t now, idle; unsigned int seq; @@ -787,6 +788,11 @@ static u64 get_cpu_sleep_time_us(struct tick_sched *ts, ktime_t *sleeptime, if (last_update_time) *last_update_time = ktime_to_us(now); + if (vtime_generic_enabled_cpu(cpu)) { + idle = kcpustat_field(idx, cpu); + return ktime_to_us(idle); + } + do { seq = read_seqcount_begin(&ts->idle_sleeptime_seq); @@ -824,7 +830,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); - return get_cpu_sleep_time_us(ts, &ts->idle_sleeptime, + return get_cpu_sleep_time_us(cpu, CPUTIME_IDLE, &ts->idle_sleeptime, !nr_iowait_cpu(cpu), last_update_time); } EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); @@ -850,7 +856,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) { struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); - return get_cpu_sleep_time_us(ts, &ts->iowait_sleeptime, + return get_cpu_sleep_time_us(cpu, CPUTIME_IOWAIT, &ts->iowait_sleeptime, nr_iowait_cpu(cpu), last_update_time); } EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us); -- 2.51.1
