sched/cputime: Complete nsec conversion of tick based accounting
This is the final step toward tick based cputime conversion. Now that the whole cputime accounting engine accounts in nsecs, we can convert the very source of the cputime to account in nsecs. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Link: http://lkml.kernel.org/r/1485832191-26889-26-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
fb8b049c98
commit
2b1f967d80
|
@ -75,14 +75,13 @@ void irqtime_account_irq(struct task_struct *curr)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(irqtime_account_irq);
|
EXPORT_SYMBOL_GPL(irqtime_account_irq);
|
||||||
|
|
||||||
static cputime_t irqtime_tick_accounted(cputime_t maxtime)
|
static u64 irqtime_tick_accounted(u64 maxtime)
|
||||||
{
|
{
|
||||||
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
|
struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
|
||||||
cputime_t delta;
|
u64 delta;
|
||||||
|
|
||||||
delta = nsecs_to_cputime(irqtime->tick_delta);
|
delta = min(irqtime->tick_delta, maxtime);
|
||||||
delta = min(delta, maxtime);
|
irqtime->tick_delta -= delta;
|
||||||
irqtime->tick_delta -= cputime_to_nsecs(delta);
|
|
||||||
|
|
||||||
return delta;
|
return delta;
|
||||||
}
|
}
|
||||||
|
@ -91,7 +90,7 @@ static cputime_t irqtime_tick_accounted(cputime_t maxtime)
|
||||||
|
|
||||||
#define sched_clock_irqtime (0)
|
#define sched_clock_irqtime (0)
|
||||||
|
|
||||||
static cputime_t irqtime_tick_accounted(cputime_t dummy)
|
static u64 irqtime_tick_accounted(u64 dummy)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -234,22 +233,19 @@ void account_idle_time(u64 cputime)
|
||||||
* ticks are not redelivered later. Due to that, this function may on
|
* ticks are not redelivered later. Due to that, this function may on
|
||||||
* occasion account more time than the calling functions think elapsed.
|
* occasion account more time than the calling functions think elapsed.
|
||||||
*/
|
*/
|
||||||
static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
|
static __always_inline u64 steal_account_process_time(u64 maxtime)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_PARAVIRT
|
#ifdef CONFIG_PARAVIRT
|
||||||
if (static_key_false(¶virt_steal_enabled)) {
|
if (static_key_false(¶virt_steal_enabled)) {
|
||||||
cputime_t steal_cputime;
|
u64 steal;
|
||||||
u64 steal, rounded;
|
|
||||||
|
|
||||||
steal = paravirt_steal_clock(smp_processor_id());
|
steal = paravirt_steal_clock(smp_processor_id());
|
||||||
steal -= this_rq()->prev_steal_time;
|
steal -= this_rq()->prev_steal_time;
|
||||||
|
steal = min(steal, maxtime);
|
||||||
|
account_steal_time(steal);
|
||||||
|
this_rq()->prev_steal_time += steal;
|
||||||
|
|
||||||
steal_cputime = min(nsecs_to_cputime(steal), maxtime);
|
return steal;
|
||||||
rounded = cputime_to_nsecs(steal_cputime);
|
|
||||||
account_steal_time(rounded);
|
|
||||||
this_rq()->prev_steal_time += rounded;
|
|
||||||
|
|
||||||
return steal_cputime;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -258,9 +254,9 @@ static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
|
||||||
/*
|
/*
|
||||||
* Account how much elapsed time was spent in steal, irq, or softirq time.
|
* Account how much elapsed time was spent in steal, irq, or softirq time.
|
||||||
*/
|
*/
|
||||||
static inline cputime_t account_other_time(cputime_t max)
|
static inline u64 account_other_time(u64 max)
|
||||||
{
|
{
|
||||||
cputime_t accounted;
|
u64 accounted;
|
||||||
|
|
||||||
/* Shall be converted to a lockdep-enabled lightweight check */
|
/* Shall be converted to a lockdep-enabled lightweight check */
|
||||||
WARN_ON_ONCE(!irqs_disabled());
|
WARN_ON_ONCE(!irqs_disabled());
|
||||||
|
@ -364,9 +360,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
|
||||||
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||||
struct rq *rq, int ticks)
|
struct rq *rq, int ticks)
|
||||||
{
|
{
|
||||||
u64 old_cputime = (__force u64) cputime_one_jiffy * ticks;
|
u64 other, cputime = TICK_NSEC * ticks;
|
||||||
cputime_t other;
|
|
||||||
u64 cputime;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When returning from idle, many ticks can get accounted at
|
* When returning from idle, many ticks can get accounted at
|
||||||
|
@ -376,11 +370,10 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||||
* other time can exceed ticks occasionally.
|
* other time can exceed ticks occasionally.
|
||||||
*/
|
*/
|
||||||
other = account_other_time(ULONG_MAX);
|
other = account_other_time(ULONG_MAX);
|
||||||
if (other >= old_cputime)
|
if (other >= cputime)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
old_cputime -= other;
|
cputime -= other;
|
||||||
cputime = cputime_to_nsecs(old_cputime);
|
|
||||||
|
|
||||||
if (this_cpu_ksoftirqd() == p) {
|
if (this_cpu_ksoftirqd() == p) {
|
||||||
/*
|
/*
|
||||||
|
@ -477,8 +470,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
|
||||||
*/
|
*/
|
||||||
void account_process_tick(struct task_struct *p, int user_tick)
|
void account_process_tick(struct task_struct *p, int user_tick)
|
||||||
{
|
{
|
||||||
cputime_t old_cputime, steal;
|
u64 cputime, steal;
|
||||||
u64 cputime;
|
|
||||||
struct rq *rq = this_rq();
|
struct rq *rq = this_rq();
|
||||||
|
|
||||||
if (vtime_accounting_cpu_enabled())
|
if (vtime_accounting_cpu_enabled())
|
||||||
|
@ -489,14 +481,13 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
old_cputime = cputime_one_jiffy;
|
cputime = TICK_NSEC;
|
||||||
steal = steal_account_process_time(ULONG_MAX);
|
steal = steal_account_process_time(ULONG_MAX);
|
||||||
|
|
||||||
if (steal >= old_cputime)
|
if (steal >= cputime)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
old_cputime -= steal;
|
cputime -= steal;
|
||||||
cputime = cputime_to_nsecs(old_cputime);
|
|
||||||
|
|
||||||
if (user_tick)
|
if (user_tick)
|
||||||
account_user_time(p, cputime);
|
account_user_time(p, cputime);
|
||||||
|
@ -520,7 +511,7 @@ void account_idle_ticks(unsigned long ticks)
|
||||||
}
|
}
|
||||||
|
|
||||||
cputime = ticks * TICK_NSEC;
|
cputime = ticks * TICK_NSEC;
|
||||||
steal = cputime_to_nsecs(steal_account_process_time(ULONG_MAX));
|
steal = steal_account_process_time(ULONG_MAX);
|
||||||
|
|
||||||
if (steal >= cputime)
|
if (steal >= cputime)
|
||||||
return;
|
return;
|
||||||
|
@ -741,6 +732,7 @@ void vtime_account_user(struct task_struct *tsk)
|
||||||
write_seqcount_begin(&tsk->vtime_seqcount);
|
write_seqcount_begin(&tsk->vtime_seqcount);
|
||||||
tsk->vtime_snap_whence = VTIME_SYS;
|
tsk->vtime_snap_whence = VTIME_SYS;
|
||||||
if (vtime_delta(tsk)) {
|
if (vtime_delta(tsk)) {
|
||||||
|
u64 nsecs;
|
||||||
delta_cpu = get_vtime_delta(tsk);
|
delta_cpu = get_vtime_delta(tsk);
|
||||||
account_user_time(tsk, cputime_to_nsecs(delta_cpu));
|
account_user_time(tsk, cputime_to_nsecs(delta_cpu));
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue