Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this (fairly busy) cycle were: - There was a class of scheduler bugs related to forgetting to update the rq-clock timestamp which can cause weird and hard to debug problems, so there's a new debug facility for this: which uncovered a whole lot of bugs which convinced us that we want to keep the debug facility. (Peter Zijlstra, Matt Fleming) - Various cputime related updates: eliminate cputime and use u64 nanoseconds directly, simplify and improve the arch interfaces, implement delayed accounting more widely, etc. - (Frederic Weisbecker) - Move code around for better structure plus cleanups (Ingo Molnar) - Move IO schedule accounting deeper into the scheduler plus related changes to improve the situation (Tejun Heo) - ... plus a round of sched/rt and sched/deadline fixes, plus other fixes, updats and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (85 commits) sched/core: Remove unlikely() annotation from sched_move_task() sched/autogroup: Rename auto_group.[ch] to autogroup.[ch] sched/topology: Split out scheduler topology code from core.c into topology.c sched/core: Remove unnecessary #include headers sched/rq_clock: Consolidate the ordering of the rq_clock methods delayacct: Include <uapi/linux/taskstats.h> sched/core: Clean up comments sched/rt: Show the 'sched_rr_timeslice' SCHED_RR timeslice tuning knob in milliseconds sched/clock: Add dummy clear_sched_clock_stable() stub function sched/cputime: Remove generic asm headers sched/cputime: Remove unused nsec_to_cputime() s390, sched/cputime: Remove unused cputime definitions powerpc, sched/cputime: Remove unused cputime definitions s390, sched/cputime: Make arch_cpu_idle_time() to return nsecs ia64, sched/cputime: Remove unused cputime definitions ia64: Convert vtime to use nsec units directly ia64, sched/cputime: Move the nsecs based cputime headers to the last arch using it sched/cputime: Remove jiffies based cputime sched/cputime, vtime: Return nsecs instead of cputime_t to account sched/cputime: Complete nsec conversion of tick based accounting ...
This commit is contained in:
commit
828cad8ea0
|
@ -408,6 +408,11 @@ CONTENTS
|
||||||
* the new scheduling related syscalls that manipulate it, i.e.,
|
* the new scheduling related syscalls that manipulate it, i.e.,
|
||||||
sched_setattr() and sched_getattr() are implemented.
|
sched_setattr() and sched_getattr() are implemented.
|
||||||
|
|
||||||
|
For debugging purposes, the leftover runtime and absolute deadline of a
|
||||||
|
SCHED_DEADLINE task can be retrieved through /proc/<pid>/sched (entries
|
||||||
|
dl.runtime and dl.deadline, both values in ns). A programmatic way to
|
||||||
|
retrieve these values from production code is under discussion.
|
||||||
|
|
||||||
|
|
||||||
4.3 Default behavior
|
4.3 Default behavior
|
||||||
---------------------
|
---------------------
|
||||||
|
@ -476,6 +481,7 @@ CONTENTS
|
||||||
|
|
||||||
Still missing:
|
Still missing:
|
||||||
|
|
||||||
|
- programmatic way to retrieve current runtime and absolute deadline
|
||||||
- refinements to deadline inheritance, especially regarding the possibility
|
- refinements to deadline inheritance, especially regarding the possibility
|
||||||
of retaining bandwidth isolation among non-interacting tasks. This is
|
of retaining bandwidth isolation among non-interacting tasks. This is
|
||||||
being studied from both theoretical and practical points of view, and
|
being studied from both theoretical and practical points of view, and
|
||||||
|
|
|
@ -158,11 +158,11 @@ as its prone to starvation without deadline scheduling.
|
||||||
Consider two sibling groups A and B; both have 50% bandwidth, but A's
|
Consider two sibling groups A and B; both have 50% bandwidth, but A's
|
||||||
period is twice the length of B's.
|
period is twice the length of B's.
|
||||||
|
|
||||||
* group A: period=100000us, runtime=10000us
|
* group A: period=100000us, runtime=50000us
|
||||||
- this runs for 0.01s once every 0.1s
|
- this runs for 0.05s once every 0.1s
|
||||||
|
|
||||||
* group B: period= 50000us, runtime=10000us
|
* group B: period= 50000us, runtime=25000us
|
||||||
- this runs for 0.01s twice every 0.1s (or once every 0.05 sec).
|
- this runs for 0.025s twice every 0.1s (or once every 0.05 sec).
|
||||||
|
|
||||||
This means that currently a while (1) loop in A will run for the full period of
|
This means that currently a while (1) loop in A will run for the full period of
|
||||||
B and can starve B's tasks (assuming they are of lower priority) for a whole
|
B and can starve B's tasks (assuming they are of lower priority) for a whole
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
|
|
||||||
|
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += exec.h
|
generic-y += exec.h
|
||||||
generic-y += export.h
|
generic-y += export.h
|
||||||
generic-y += irq_work.h
|
generic-y += irq_work.h
|
||||||
|
|
|
@ -1145,7 +1145,7 @@ struct rusage32 {
|
||||||
SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
|
SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
|
||||||
{
|
{
|
||||||
struct rusage32 r;
|
struct rusage32 r;
|
||||||
cputime_t utime, stime;
|
u64 utime, stime;
|
||||||
unsigned long utime_jiffies, stime_jiffies;
|
unsigned long utime_jiffies, stime_jiffies;
|
||||||
|
|
||||||
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
|
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
|
||||||
|
@ -1155,16 +1155,16 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
|
||||||
switch (who) {
|
switch (who) {
|
||||||
case RUSAGE_SELF:
|
case RUSAGE_SELF:
|
||||||
task_cputime(current, &utime, &stime);
|
task_cputime(current, &utime, &stime);
|
||||||
utime_jiffies = cputime_to_jiffies(utime);
|
utime_jiffies = nsecs_to_jiffies(utime);
|
||||||
stime_jiffies = cputime_to_jiffies(stime);
|
stime_jiffies = nsecs_to_jiffies(stime);
|
||||||
jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
|
jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
|
||||||
jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
|
jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
|
||||||
r.ru_minflt = current->min_flt;
|
r.ru_minflt = current->min_flt;
|
||||||
r.ru_majflt = current->maj_flt;
|
r.ru_majflt = current->maj_flt;
|
||||||
break;
|
break;
|
||||||
case RUSAGE_CHILDREN:
|
case RUSAGE_CHILDREN:
|
||||||
utime_jiffies = cputime_to_jiffies(current->signal->cutime);
|
utime_jiffies = nsecs_to_jiffies(current->signal->cutime);
|
||||||
stime_jiffies = cputime_to_jiffies(current->signal->cstime);
|
stime_jiffies = nsecs_to_jiffies(current->signal->cstime);
|
||||||
jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
|
jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
|
||||||
jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
|
jiffies_to_timeval32(stime_jiffies, &r.ru_stime);
|
||||||
r.ru_minflt = current->signal->cmin_flt;
|
r.ru_minflt = current->signal->cmin_flt;
|
||||||
|
|
|
@ -2,7 +2,6 @@ generic-y += auxvec.h
|
||||||
generic-y += bitsperlong.h
|
generic-y += bitsperlong.h
|
||||||
generic-y += bugs.h
|
generic-y += bugs.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
generic-y += bitsperlong.h
|
generic-y += bitsperlong.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += early_ioremap.h
|
generic-y += early_ioremap.h
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
generic-y += bugs.h
|
generic-y += bugs.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += delay.h
|
generic-y += delay.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
generic-y += dma.h
|
generic-y += dma.h
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
|
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += delay.h
|
generic-y += delay.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
generic-y += auxvec.h
|
generic-y += auxvec.h
|
||||||
generic-y += bitsperlong.h
|
generic-y += bitsperlong.h
|
||||||
generic-y += bugs.h
|
generic-y += bugs.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
|
|
|
@ -5,7 +5,6 @@ generic-y += barrier.h
|
||||||
generic-y += bitsperlong.h
|
generic-y += bitsperlong.h
|
||||||
generic-y += bugs.h
|
generic-y += bugs.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
|
|
|
@ -4,7 +4,6 @@ generic-y += barrier.h
|
||||||
generic-y += bitsperlong.h
|
generic-y += bitsperlong.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cmpxchg.h
|
generic-y += cmpxchg.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
generic-y += errno.h
|
generic-y += errno.h
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
|
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += exec.h
|
generic-y += exec.h
|
||||||
generic-y += irq_work.h
|
generic-y += irq_work.h
|
||||||
generic-y += mcs_spinlock.h
|
generic-y += mcs_spinlock.h
|
||||||
|
|
|
@ -5,7 +5,6 @@ generic-y += bugs.h
|
||||||
generic-y += cacheflush.h
|
generic-y += cacheflush.h
|
||||||
generic-y += checksum.h
|
generic-y += checksum.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += delay.h
|
generic-y += delay.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
|
|
|
@ -6,7 +6,6 @@ generic-y += barrier.h
|
||||||
generic-y += bug.h
|
generic-y += bug.h
|
||||||
generic-y += bugs.h
|
generic-y += bugs.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
|
|
|
@ -18,11 +18,7 @@
|
||||||
#ifndef __IA64_CPUTIME_H
|
#ifndef __IA64_CPUTIME_H
|
||||||
#define __IA64_CPUTIME_H
|
#define __IA64_CPUTIME_H
|
||||||
|
|
||||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||||
# include <asm-generic/cputime.h>
|
|
||||||
#else
|
|
||||||
# include <asm/processor.h>
|
|
||||||
# include <asm-generic/cputime_nsecs.h>
|
|
||||||
extern void arch_vtime_task_switch(struct task_struct *tsk);
|
extern void arch_vtime_task_switch(struct task_struct *tsk);
|
||||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||||
|
|
||||||
|
|
|
@ -27,6 +27,12 @@ struct thread_info {
|
||||||
mm_segment_t addr_limit; /* user-level address space limit */
|
mm_segment_t addr_limit; /* user-level address space limit */
|
||||||
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
|
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||||
|
__u64 utime;
|
||||||
|
__u64 stime;
|
||||||
|
__u64 gtime;
|
||||||
|
__u64 hardirq_time;
|
||||||
|
__u64 softirq_time;
|
||||||
|
__u64 idle_time;
|
||||||
__u64 ac_stamp;
|
__u64 ac_stamp;
|
||||||
__u64 ac_leave;
|
__u64 ac_leave;
|
||||||
__u64 ac_stime;
|
__u64 ac_stime;
|
||||||
|
|
|
@ -1031,7 +1031,7 @@ GLOBAL_ENTRY(ia64_native_sched_clock)
|
||||||
END(ia64_native_sched_clock)
|
END(ia64_native_sched_clock)
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||||
GLOBAL_ENTRY(cycle_to_cputime)
|
GLOBAL_ENTRY(cycle_to_nsec)
|
||||||
alloc r16=ar.pfs,1,0,0,0
|
alloc r16=ar.pfs,1,0,0,0
|
||||||
addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
|
||||||
;;
|
;;
|
||||||
|
@ -1047,7 +1047,7 @@ GLOBAL_ENTRY(cycle_to_cputime)
|
||||||
;;
|
;;
|
||||||
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
|
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
|
||||||
br.ret.sptk.many rp
|
br.ret.sptk.many rp
|
||||||
END(cycle_to_cputime)
|
END(cycle_to_nsec)
|
||||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||||
|
|
||||||
#ifdef CONFIG_IA64_BRL_EMU
|
#ifdef CONFIG_IA64_BRL_EMU
|
||||||
|
|
|
@ -619,6 +619,8 @@ setup_arch (char **cmdline_p)
|
||||||
check_sal_cache_flush();
|
check_sal_cache_flush();
|
||||||
#endif
|
#endif
|
||||||
paging_init();
|
paging_init();
|
||||||
|
|
||||||
|
clear_sched_clock_stable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#include <linux/timex.h>
|
#include <linux/timex.h>
|
||||||
#include <linux/timekeeper_internal.h>
|
#include <linux/timekeeper_internal.h>
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
|
#include <linux/cputime.h>
|
||||||
|
|
||||||
#include <asm/machvec.h>
|
#include <asm/machvec.h>
|
||||||
#include <asm/delay.h>
|
#include <asm/delay.h>
|
||||||
|
@ -59,18 +60,43 @@ static struct clocksource *itc_clocksource;
|
||||||
|
|
||||||
#include <linux/kernel_stat.h>
|
#include <linux/kernel_stat.h>
|
||||||
|
|
||||||
extern cputime_t cycle_to_cputime(u64 cyc);
|
extern u64 cycle_to_nsec(u64 cyc);
|
||||||
|
|
||||||
void vtime_account_user(struct task_struct *tsk)
|
void vtime_flush(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
cputime_t delta_utime;
|
|
||||||
struct thread_info *ti = task_thread_info(tsk);
|
struct thread_info *ti = task_thread_info(tsk);
|
||||||
|
u64 delta;
|
||||||
|
|
||||||
if (ti->ac_utime) {
|
if (ti->utime)
|
||||||
delta_utime = cycle_to_cputime(ti->ac_utime);
|
account_user_time(tsk, cycle_to_nsec(ti->utime));
|
||||||
account_user_time(tsk, delta_utime);
|
|
||||||
ti->ac_utime = 0;
|
if (ti->gtime)
|
||||||
|
account_guest_time(tsk, cycle_to_nsec(ti->gtime));
|
||||||
|
|
||||||
|
if (ti->idle_time)
|
||||||
|
account_idle_time(cycle_to_nsec(ti->idle_time));
|
||||||
|
|
||||||
|
if (ti->stime) {
|
||||||
|
delta = cycle_to_nsec(ti->stime);
|
||||||
|
account_system_index_time(tsk, delta, CPUTIME_SYSTEM);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ti->hardirq_time) {
|
||||||
|
delta = cycle_to_nsec(ti->hardirq_time);
|
||||||
|
account_system_index_time(tsk, delta, CPUTIME_IRQ);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ti->softirq_time) {
|
||||||
|
delta = cycle_to_nsec(ti->softirq_time));
|
||||||
|
account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ);
|
||||||
|
}
|
||||||
|
|
||||||
|
ti->utime = 0;
|
||||||
|
ti->gtime = 0;
|
||||||
|
ti->idle_time = 0;
|
||||||
|
ti->stime = 0;
|
||||||
|
ti->hardirq_time = 0;
|
||||||
|
ti->softirq_time = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -83,7 +109,7 @@ void arch_vtime_task_switch(struct task_struct *prev)
|
||||||
struct thread_info *pi = task_thread_info(prev);
|
struct thread_info *pi = task_thread_info(prev);
|
||||||
struct thread_info *ni = task_thread_info(current);
|
struct thread_info *ni = task_thread_info(current);
|
||||||
|
|
||||||
pi->ac_stamp = ni->ac_stamp;
|
ni->ac_stamp = pi->ac_stamp;
|
||||||
ni->ac_stime = ni->ac_utime = 0;
|
ni->ac_stime = ni->ac_utime = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,18 +117,15 @@ void arch_vtime_task_switch(struct task_struct *prev)
|
||||||
* Account time for a transition between system, hard irq or soft irq state.
|
* Account time for a transition between system, hard irq or soft irq state.
|
||||||
* Note that this function is called with interrupts enabled.
|
* Note that this function is called with interrupts enabled.
|
||||||
*/
|
*/
|
||||||
static cputime_t vtime_delta(struct task_struct *tsk)
|
static __u64 vtime_delta(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
struct thread_info *ti = task_thread_info(tsk);
|
struct thread_info *ti = task_thread_info(tsk);
|
||||||
cputime_t delta_stime;
|
__u64 now, delta_stime;
|
||||||
__u64 now;
|
|
||||||
|
|
||||||
WARN_ON_ONCE(!irqs_disabled());
|
WARN_ON_ONCE(!irqs_disabled());
|
||||||
|
|
||||||
now = ia64_get_itc();
|
now = ia64_get_itc();
|
||||||
|
delta_stime = now - ti->ac_stamp;
|
||||||
delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
|
|
||||||
ti->ac_stime = 0;
|
|
||||||
ti->ac_stamp = now;
|
ti->ac_stamp = now;
|
||||||
|
|
||||||
return delta_stime;
|
return delta_stime;
|
||||||
|
@ -110,15 +133,25 @@ static cputime_t vtime_delta(struct task_struct *tsk)
|
||||||
|
|
||||||
void vtime_account_system(struct task_struct *tsk)
|
void vtime_account_system(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
cputime_t delta = vtime_delta(tsk);
|
struct thread_info *ti = task_thread_info(tsk);
|
||||||
|
__u64 stime = vtime_delta(tsk);
|
||||||
|
|
||||||
account_system_time(tsk, 0, delta);
|
if ((tsk->flags & PF_VCPU) && !irq_count())
|
||||||
|
ti->gtime += stime;
|
||||||
|
else if (hardirq_count())
|
||||||
|
ti->hardirq_time += stime;
|
||||||
|
else if (in_serving_softirq())
|
||||||
|
ti->softirq_time += stime;
|
||||||
|
else
|
||||||
|
ti->stime += stime;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vtime_account_system);
|
EXPORT_SYMBOL_GPL(vtime_account_system);
|
||||||
|
|
||||||
void vtime_account_idle(struct task_struct *tsk)
|
void vtime_account_idle(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
account_idle_time(vtime_delta(tsk));
|
struct thread_info *ti = task_thread_info(tsk);
|
||||||
|
|
||||||
|
ti->idle_time += vtime_delta(tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
|
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += exec.h
|
generic-y += exec.h
|
||||||
generic-y += irq_work.h
|
generic-y += irq_work.h
|
||||||
generic-y += kvm_para.h
|
generic-y += kvm_para.h
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
generic-y += barrier.h
|
generic-y += barrier.h
|
||||||
generic-y += bitsperlong.h
|
generic-y += bitsperlong.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
generic-y += errno.h
|
generic-y += errno.h
|
||||||
|
|
|
@ -2,7 +2,6 @@ generic-y += auxvec.h
|
||||||
generic-y += bitsperlong.h
|
generic-y += bitsperlong.h
|
||||||
generic-y += bugs.h
|
generic-y += bugs.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += dma.h
|
generic-y += dma.h
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
|
|
||||||
generic-y += barrier.h
|
generic-y += barrier.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += exec.h
|
generic-y += exec.h
|
||||||
generic-y += irq_work.h
|
generic-y += irq_work.h
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
# MIPS headers
|
# MIPS headers
|
||||||
generic-(CONFIG_GENERIC_CSUM) += checksum.h
|
generic-(CONFIG_GENERIC_CSUM) += checksum.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += dma-contiguous.h
|
generic-y += dma-contiguous.h
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
|
|
|
@ -99,15 +99,7 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
|
||||||
#undef TASK_SIZE
|
#undef TASK_SIZE
|
||||||
#define TASK_SIZE TASK_SIZE32
|
#define TASK_SIZE TASK_SIZE32
|
||||||
|
|
||||||
#undef cputime_to_timeval
|
#undef ns_to_timeval
|
||||||
#define cputime_to_timeval cputime_to_compat_timeval
|
#define ns_to_timeval ns_to_compat_timeval
|
||||||
static __inline__ void
|
|
||||||
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
|
|
||||||
{
|
|
||||||
unsigned long jiffies = cputime_to_jiffies(cputime);
|
|
||||||
|
|
||||||
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
|
|
||||||
value->tv_sec = jiffies / HZ;
|
|
||||||
}
|
|
||||||
|
|
||||||
#include "../../../fs/binfmt_elf.c"
|
#include "../../../fs/binfmt_elf.c"
|
||||||
|
|
|
@ -102,15 +102,7 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
|
||||||
#undef TASK_SIZE
|
#undef TASK_SIZE
|
||||||
#define TASK_SIZE TASK_SIZE32
|
#define TASK_SIZE TASK_SIZE32
|
||||||
|
|
||||||
#undef cputime_to_timeval
|
#undef ns_to_timeval
|
||||||
#define cputime_to_timeval cputime_to_compat_timeval
|
#define ns_to_timeval ns_to_compat_timeval
|
||||||
static __inline__ void
|
|
||||||
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
|
|
||||||
{
|
|
||||||
unsigned long jiffies = cputime_to_jiffies(cputime);
|
|
||||||
|
|
||||||
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
|
|
||||||
value->tv_sec = jiffies / HZ;
|
|
||||||
}
|
|
||||||
|
|
||||||
#include "../../../fs/binfmt_elf.c"
|
#include "../../../fs/binfmt_elf.c"
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
|
|
||||||
generic-y += barrier.h
|
generic-y += barrier.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += exec.h
|
generic-y += exec.h
|
||||||
generic-y += irq_work.h
|
generic-y += irq_work.h
|
||||||
generic-y += mcs_spinlock.h
|
generic-y += mcs_spinlock.h
|
||||||
|
|
|
@ -6,7 +6,6 @@ generic-y += bitsperlong.h
|
||||||
generic-y += bug.h
|
generic-y += bug.h
|
||||||
generic-y += bugs.h
|
generic-y += bugs.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
|
|
|
@ -12,7 +12,6 @@ generic-y += checksum.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cmpxchg-local.h
|
generic-y += cmpxchg-local.h
|
||||||
generic-y += cmpxchg.h
|
generic-y += cmpxchg.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
generic-y += auxvec.h
|
generic-y += auxvec.h
|
||||||
generic-y += barrier.h
|
generic-y += barrier.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
|
|
|
@ -91,14 +91,7 @@ struct elf_prpsinfo32
|
||||||
current->thread.map_base = DEFAULT_MAP_BASE32; \
|
current->thread.map_base = DEFAULT_MAP_BASE32; \
|
||||||
current->thread.task_size = DEFAULT_TASK_SIZE32 \
|
current->thread.task_size = DEFAULT_TASK_SIZE32 \
|
||||||
|
|
||||||
#undef cputime_to_timeval
|
#undef ns_to_timeval
|
||||||
#define cputime_to_timeval cputime_to_compat_timeval
|
#define ns_to_timeval ns_to_compat_timeval
|
||||||
static __inline__ void
|
|
||||||
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
|
|
||||||
{
|
|
||||||
unsigned long jiffies = cputime_to_jiffies(cputime);
|
|
||||||
value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
|
|
||||||
value->tv_sec = jiffies / HZ;
|
|
||||||
}
|
|
||||||
|
|
||||||
#include "../../../fs/binfmt_elf.c"
|
#include "../../../fs/binfmt_elf.c"
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#undef PCI_DEBUG
|
#undef PCI_DEBUG
|
||||||
#include <linux/proc_fs.h>
|
#include <linux/proc_fs.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
|
@ -176,6 +177,7 @@ void __init setup_arch(char **cmdline_p)
|
||||||
conswitchp = &dummy_con; /* we use do_take_over_console() later ! */
|
conswitchp = &dummy_con; /* we use do_take_over_console() later ! */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
clear_sched_clock_stable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -12,9 +12,17 @@
|
||||||
|
|
||||||
/* Stuff for accurate time accounting */
|
/* Stuff for accurate time accounting */
|
||||||
struct cpu_accounting_data {
|
struct cpu_accounting_data {
|
||||||
unsigned long user_time; /* accumulated usermode TB ticks */
|
/* Accumulated cputime values to flush on ticks*/
|
||||||
unsigned long system_time; /* accumulated system TB ticks */
|
unsigned long utime;
|
||||||
unsigned long user_time_scaled; /* accumulated usermode SPURR ticks */
|
unsigned long stime;
|
||||||
|
unsigned long utime_scaled;
|
||||||
|
unsigned long stime_scaled;
|
||||||
|
unsigned long gtime;
|
||||||
|
unsigned long hardirq_time;
|
||||||
|
unsigned long softirq_time;
|
||||||
|
unsigned long steal_time;
|
||||||
|
unsigned long idle_time;
|
||||||
|
/* Internal counters */
|
||||||
unsigned long starttime; /* TB value snapshot */
|
unsigned long starttime; /* TB value snapshot */
|
||||||
unsigned long starttime_user; /* TB value on exit to usermode */
|
unsigned long starttime_user; /* TB value on exit to usermode */
|
||||||
unsigned long startspurr; /* SPURR value snapshot */
|
unsigned long startspurr; /* SPURR value snapshot */
|
||||||
|
|
|
@ -16,12 +16,7 @@
|
||||||
#ifndef __POWERPC_CPUTIME_H
|
#ifndef __POWERPC_CPUTIME_H
|
||||||
#define __POWERPC_CPUTIME_H
|
#define __POWERPC_CPUTIME_H
|
||||||
|
|
||||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||||
#include <asm-generic/cputime.h>
|
|
||||||
#ifdef __KERNEL__
|
|
||||||
static inline void setup_cputime_one_jiffy(void) { }
|
|
||||||
#endif
|
|
||||||
#else
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
|
@ -36,65 +31,6 @@ typedef u64 __nocast cputime64_t;
|
||||||
#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
|
#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
|
|
||||||
/*
|
|
||||||
* One jiffy in timebase units computed during initialization
|
|
||||||
*/
|
|
||||||
extern cputime_t cputime_one_jiffy;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> jiffies
|
|
||||||
*/
|
|
||||||
extern u64 __cputime_jiffies_factor;
|
|
||||||
|
|
||||||
static inline unsigned long cputime_to_jiffies(const cputime_t ct)
|
|
||||||
{
|
|
||||||
return mulhdu((__force u64) ct, __cputime_jiffies_factor);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime_t jiffies_to_cputime(const unsigned long jif)
|
|
||||||
{
|
|
||||||
u64 ct;
|
|
||||||
unsigned long sec;
|
|
||||||
|
|
||||||
/* have to be a little careful about overflow */
|
|
||||||
ct = jif % HZ;
|
|
||||||
sec = jif / HZ;
|
|
||||||
if (ct) {
|
|
||||||
ct *= tb_ticks_per_sec;
|
|
||||||
do_div(ct, HZ);
|
|
||||||
}
|
|
||||||
if (sec)
|
|
||||||
ct += (cputime_t) sec * tb_ticks_per_sec;
|
|
||||||
return (__force cputime_t) ct;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void setup_cputime_one_jiffy(void)
|
|
||||||
{
|
|
||||||
cputime_one_jiffy = jiffies_to_cputime(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
|
|
||||||
{
|
|
||||||
u64 ct;
|
|
||||||
u64 sec = jif;
|
|
||||||
|
|
||||||
/* have to be a little careful about overflow */
|
|
||||||
ct = do_div(sec, HZ);
|
|
||||||
if (ct) {
|
|
||||||
ct *= tb_ticks_per_sec;
|
|
||||||
do_div(ct, HZ);
|
|
||||||
}
|
|
||||||
if (sec)
|
|
||||||
ct += (u64) sec * tb_ticks_per_sec;
|
|
||||||
return (__force cputime64_t) ct;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 cputime64_to_jiffies64(const cputime_t ct)
|
|
||||||
{
|
|
||||||
return mulhdu((__force u64) ct, __cputime_jiffies_factor);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert cputime <-> microseconds
|
* Convert cputime <-> microseconds
|
||||||
*/
|
*/
|
||||||
|
@ -105,117 +41,6 @@ static inline unsigned long cputime_to_usecs(const cputime_t ct)
|
||||||
return mulhdu((__force u64) ct, __cputime_usec_factor);
|
return mulhdu((__force u64) ct, __cputime_usec_factor);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline cputime_t usecs_to_cputime(const unsigned long us)
|
|
||||||
{
|
|
||||||
u64 ct;
|
|
||||||
unsigned long sec;
|
|
||||||
|
|
||||||
/* have to be a little careful about overflow */
|
|
||||||
ct = us % 1000000;
|
|
||||||
sec = us / 1000000;
|
|
||||||
if (ct) {
|
|
||||||
ct *= tb_ticks_per_sec;
|
|
||||||
do_div(ct, 1000000);
|
|
||||||
}
|
|
||||||
if (sec)
|
|
||||||
ct += (cputime_t) sec * tb_ticks_per_sec;
|
|
||||||
return (__force cputime_t) ct;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define usecs_to_cputime64(us) usecs_to_cputime(us)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> seconds
|
|
||||||
*/
|
|
||||||
extern u64 __cputime_sec_factor;
|
|
||||||
|
|
||||||
static inline unsigned long cputime_to_secs(const cputime_t ct)
|
|
||||||
{
|
|
||||||
return mulhdu((__force u64) ct, __cputime_sec_factor);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime_t secs_to_cputime(const unsigned long sec)
|
|
||||||
{
|
|
||||||
return (__force cputime_t)((u64) sec * tb_ticks_per_sec);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> timespec
|
|
||||||
*/
|
|
||||||
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p)
|
|
||||||
{
|
|
||||||
u64 x = (__force u64) ct;
|
|
||||||
unsigned int frac;
|
|
||||||
|
|
||||||
frac = do_div(x, tb_ticks_per_sec);
|
|
||||||
p->tv_sec = x;
|
|
||||||
x = (u64) frac * 1000000000;
|
|
||||||
do_div(x, tb_ticks_per_sec);
|
|
||||||
p->tv_nsec = x;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime_t timespec_to_cputime(const struct timespec *p)
|
|
||||||
{
|
|
||||||
u64 ct;
|
|
||||||
|
|
||||||
ct = (u64) p->tv_nsec * tb_ticks_per_sec;
|
|
||||||
do_div(ct, 1000000000);
|
|
||||||
return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> timeval
|
|
||||||
*/
|
|
||||||
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p)
|
|
||||||
{
|
|
||||||
u64 x = (__force u64) ct;
|
|
||||||
unsigned int frac;
|
|
||||||
|
|
||||||
frac = do_div(x, tb_ticks_per_sec);
|
|
||||||
p->tv_sec = x;
|
|
||||||
x = (u64) frac * 1000000;
|
|
||||||
do_div(x, tb_ticks_per_sec);
|
|
||||||
p->tv_usec = x;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime_t timeval_to_cputime(const struct timeval *p)
|
|
||||||
{
|
|
||||||
u64 ct;
|
|
||||||
|
|
||||||
ct = (u64) p->tv_usec * tb_ticks_per_sec;
|
|
||||||
do_div(ct, 1000000);
|
|
||||||
return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> clock_t (units of 1/USER_HZ seconds)
|
|
||||||
*/
|
|
||||||
extern u64 __cputime_clockt_factor;
|
|
||||||
|
|
||||||
static inline unsigned long cputime_to_clock_t(const cputime_t ct)
|
|
||||||
{
|
|
||||||
return mulhdu((__force u64) ct, __cputime_clockt_factor);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime_t clock_t_to_cputime(const unsigned long clk)
|
|
||||||
{
|
|
||||||
u64 ct;
|
|
||||||
unsigned long sec;
|
|
||||||
|
|
||||||
/* have to be a little careful about overflow */
|
|
||||||
ct = clk % USER_HZ;
|
|
||||||
sec = clk / USER_HZ;
|
|
||||||
if (ct) {
|
|
||||||
ct *= tb_ticks_per_sec;
|
|
||||||
do_div(ct, USER_HZ);
|
|
||||||
}
|
|
||||||
if (sec)
|
|
||||||
ct += (u64) sec * tb_ticks_per_sec;
|
|
||||||
return (__force cputime_t) ct;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* PPC64 uses PACA which is task independent for storing accounting data while
|
* PPC64 uses PACA which is task independent for storing accounting data while
|
||||||
* PPC32 uses struct thread_info, therefore at task switch the accounting data
|
* PPC32 uses struct thread_info, therefore at task switch the accounting data
|
||||||
|
|
|
@ -187,7 +187,6 @@ struct paca_struct {
|
||||||
|
|
||||||
/* Stuff for accurate time accounting */
|
/* Stuff for accurate time accounting */
|
||||||
struct cpu_accounting_data accounting;
|
struct cpu_accounting_data accounting;
|
||||||
u64 stolen_time; /* TB ticks taken by hypervisor */
|
|
||||||
u64 dtl_ridx; /* read index in dispatch log */
|
u64 dtl_ridx; /* read index in dispatch log */
|
||||||
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
|
struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
|
||||||
|
|
||||||
|
|
|
@ -249,9 +249,9 @@ int main(void)
|
||||||
DEFINE(ACCOUNT_STARTTIME_USER,
|
DEFINE(ACCOUNT_STARTTIME_USER,
|
||||||
offsetof(struct paca_struct, accounting.starttime_user));
|
offsetof(struct paca_struct, accounting.starttime_user));
|
||||||
DEFINE(ACCOUNT_USER_TIME,
|
DEFINE(ACCOUNT_USER_TIME,
|
||||||
offsetof(struct paca_struct, accounting.user_time));
|
offsetof(struct paca_struct, accounting.utime));
|
||||||
DEFINE(ACCOUNT_SYSTEM_TIME,
|
DEFINE(ACCOUNT_SYSTEM_TIME,
|
||||||
offsetof(struct paca_struct, accounting.system_time));
|
offsetof(struct paca_struct, accounting.stime));
|
||||||
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
|
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
|
||||||
DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
|
DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost));
|
||||||
DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso));
|
DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso));
|
||||||
|
@ -262,9 +262,9 @@ int main(void)
|
||||||
DEFINE(ACCOUNT_STARTTIME_USER,
|
DEFINE(ACCOUNT_STARTTIME_USER,
|
||||||
offsetof(struct thread_info, accounting.starttime_user));
|
offsetof(struct thread_info, accounting.starttime_user));
|
||||||
DEFINE(ACCOUNT_USER_TIME,
|
DEFINE(ACCOUNT_USER_TIME,
|
||||||
offsetof(struct thread_info, accounting.user_time));
|
offsetof(struct thread_info, accounting.utime));
|
||||||
DEFINE(ACCOUNT_SYSTEM_TIME,
|
DEFINE(ACCOUNT_SYSTEM_TIME,
|
||||||
offsetof(struct thread_info, accounting.system_time));
|
offsetof(struct thread_info, accounting.stime));
|
||||||
#endif
|
#endif
|
||||||
#endif /* CONFIG_PPC64 */
|
#endif /* CONFIG_PPC64 */
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,7 @@
|
||||||
#include <linux/clk-provider.h>
|
#include <linux/clk-provider.h>
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
#include <linux/rtc.h>
|
#include <linux/rtc.h>
|
||||||
|
#include <linux/cputime.h>
|
||||||
#include <asm/trace.h>
|
#include <asm/trace.h>
|
||||||
|
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
@ -72,7 +73,6 @@
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include <asm/vdso_datapage.h>
|
#include <asm/vdso_datapage.h>
|
||||||
#include <asm/firmware.h>
|
#include <asm/firmware.h>
|
||||||
#include <asm/cputime.h>
|
|
||||||
#include <asm/asm-prototypes.h>
|
#include <asm/asm-prototypes.h>
|
||||||
|
|
||||||
/* powerpc clocksource/clockevent code */
|
/* powerpc clocksource/clockevent code */
|
||||||
|
@ -152,20 +152,11 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq);
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||||
/*
|
/*
|
||||||
* Factors for converting from cputime_t (timebase ticks) to
|
* Factor for converting from cputime_t (timebase ticks) to
|
||||||
* jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
|
* microseconds. This is stored as 0.64 fixed-point binary fraction.
|
||||||
* These are all stored as 0.64 fixed-point binary fractions.
|
|
||||||
*/
|
*/
|
||||||
u64 __cputime_jiffies_factor;
|
|
||||||
EXPORT_SYMBOL(__cputime_jiffies_factor);
|
|
||||||
u64 __cputime_usec_factor;
|
u64 __cputime_usec_factor;
|
||||||
EXPORT_SYMBOL(__cputime_usec_factor);
|
EXPORT_SYMBOL(__cputime_usec_factor);
|
||||||
u64 __cputime_sec_factor;
|
|
||||||
EXPORT_SYMBOL(__cputime_sec_factor);
|
|
||||||
u64 __cputime_clockt_factor;
|
|
||||||
EXPORT_SYMBOL(__cputime_clockt_factor);
|
|
||||||
|
|
||||||
cputime_t cputime_one_jiffy;
|
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_SPLPAR
|
#ifdef CONFIG_PPC_SPLPAR
|
||||||
void (*dtl_consumer)(struct dtl_entry *, u64);
|
void (*dtl_consumer)(struct dtl_entry *, u64);
|
||||||
|
@ -181,14 +172,8 @@ static void calc_cputime_factors(void)
|
||||||
{
|
{
|
||||||
struct div_result res;
|
struct div_result res;
|
||||||
|
|
||||||
div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
|
|
||||||
__cputime_jiffies_factor = res.result_low;
|
|
||||||
div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
|
div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
|
||||||
__cputime_usec_factor = res.result_low;
|
__cputime_usec_factor = res.result_low;
|
||||||
div128_by_32(1, 0, tb_ticks_per_sec, &res);
|
|
||||||
__cputime_sec_factor = res.result_low;
|
|
||||||
div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
|
|
||||||
__cputime_clockt_factor = res.result_low;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -271,25 +256,19 @@ void accumulate_stolen_time(void)
|
||||||
|
|
||||||
sst = scan_dispatch_log(acct->starttime_user);
|
sst = scan_dispatch_log(acct->starttime_user);
|
||||||
ust = scan_dispatch_log(acct->starttime);
|
ust = scan_dispatch_log(acct->starttime);
|
||||||
acct->system_time -= sst;
|
acct->stime -= sst;
|
||||||
acct->user_time -= ust;
|
acct->utime -= ust;
|
||||||
local_paca->stolen_time += ust + sst;
|
acct->steal_time += ust + sst;
|
||||||
|
|
||||||
local_paca->soft_enabled = save_soft_enabled;
|
local_paca->soft_enabled = save_soft_enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u64 calculate_stolen_time(u64 stop_tb)
|
static inline u64 calculate_stolen_time(u64 stop_tb)
|
||||||
{
|
{
|
||||||
u64 stolen = 0;
|
if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx))
|
||||||
|
return scan_dispatch_log(stop_tb);
|
||||||
|
|
||||||
if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
|
return 0;
|
||||||
stolen = scan_dispatch_log(stop_tb);
|
|
||||||
get_paca()->accounting.system_time -= stolen;
|
|
||||||
}
|
|
||||||
|
|
||||||
stolen += get_paca()->stolen_time;
|
|
||||||
get_paca()->stolen_time = 0;
|
|
||||||
return stolen;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* CONFIG_PPC_SPLPAR */
|
#else /* CONFIG_PPC_SPLPAR */
|
||||||
|
@ -305,28 +284,27 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
|
||||||
* or soft irq state.
|
* or soft irq state.
|
||||||
*/
|
*/
|
||||||
static unsigned long vtime_delta(struct task_struct *tsk,
|
static unsigned long vtime_delta(struct task_struct *tsk,
|
||||||
unsigned long *sys_scaled,
|
unsigned long *stime_scaled,
|
||||||
unsigned long *stolen)
|
unsigned long *steal_time)
|
||||||
{
|
{
|
||||||
unsigned long now, nowscaled, deltascaled;
|
unsigned long now, nowscaled, deltascaled;
|
||||||
unsigned long udelta, delta, user_scaled;
|
unsigned long stime;
|
||||||
|
unsigned long utime, utime_scaled;
|
||||||
struct cpu_accounting_data *acct = get_accounting(tsk);
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
||||||
|
|
||||||
WARN_ON_ONCE(!irqs_disabled());
|
WARN_ON_ONCE(!irqs_disabled());
|
||||||
|
|
||||||
now = mftb();
|
now = mftb();
|
||||||
nowscaled = read_spurr(now);
|
nowscaled = read_spurr(now);
|
||||||
acct->system_time += now - acct->starttime;
|
stime = now - acct->starttime;
|
||||||
acct->starttime = now;
|
acct->starttime = now;
|
||||||
deltascaled = nowscaled - acct->startspurr;
|
deltascaled = nowscaled - acct->startspurr;
|
||||||
acct->startspurr = nowscaled;
|
acct->startspurr = nowscaled;
|
||||||
|
|
||||||
*stolen = calculate_stolen_time(now);
|
*steal_time = calculate_stolen_time(now);
|
||||||
|
|
||||||
delta = acct->system_time;
|
utime = acct->utime - acct->utime_sspurr;
|
||||||
acct->system_time = 0;
|
acct->utime_sspurr = acct->utime;
|
||||||
udelta = acct->user_time - acct->utime_sspurr;
|
|
||||||
acct->utime_sspurr = acct->user_time;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Because we don't read the SPURR on every kernel entry/exit,
|
* Because we don't read the SPURR on every kernel entry/exit,
|
||||||
|
@ -338,62 +316,105 @@ static unsigned long vtime_delta(struct task_struct *tsk,
|
||||||
* the user ticks get saved up in paca->user_time_scaled to be
|
* the user ticks get saved up in paca->user_time_scaled to be
|
||||||
* used by account_process_tick.
|
* used by account_process_tick.
|
||||||
*/
|
*/
|
||||||
*sys_scaled = delta;
|
*stime_scaled = stime;
|
||||||
user_scaled = udelta;
|
utime_scaled = utime;
|
||||||
if (deltascaled != delta + udelta) {
|
if (deltascaled != stime + utime) {
|
||||||
if (udelta) {
|
if (utime) {
|
||||||
*sys_scaled = deltascaled * delta / (delta + udelta);
|
*stime_scaled = deltascaled * stime / (stime + utime);
|
||||||
user_scaled = deltascaled - *sys_scaled;
|
utime_scaled = deltascaled - *stime_scaled;
|
||||||
} else {
|
} else {
|
||||||
*sys_scaled = deltascaled;
|
*stime_scaled = deltascaled;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
acct->user_time_scaled += user_scaled;
|
acct->utime_scaled += utime_scaled;
|
||||||
|
|
||||||
return delta;
|
return stime;
|
||||||
}
|
}
|
||||||
|
|
||||||
void vtime_account_system(struct task_struct *tsk)
|
void vtime_account_system(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
unsigned long delta, sys_scaled, stolen;
|
unsigned long stime, stime_scaled, steal_time;
|
||||||
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
||||||
|
|
||||||
delta = vtime_delta(tsk, &sys_scaled, &stolen);
|
stime = vtime_delta(tsk, &stime_scaled, &steal_time);
|
||||||
account_system_time(tsk, 0, delta);
|
|
||||||
tsk->stimescaled += sys_scaled;
|
stime -= min(stime, steal_time);
|
||||||
if (stolen)
|
acct->steal_time += steal_time;
|
||||||
account_steal_time(stolen);
|
|
||||||
|
if ((tsk->flags & PF_VCPU) && !irq_count()) {
|
||||||
|
acct->gtime += stime;
|
||||||
|
acct->utime_scaled += stime_scaled;
|
||||||
|
} else {
|
||||||
|
if (hardirq_count())
|
||||||
|
acct->hardirq_time += stime;
|
||||||
|
else if (in_serving_softirq())
|
||||||
|
acct->softirq_time += stime;
|
||||||
|
else
|
||||||
|
acct->stime += stime;
|
||||||
|
|
||||||
|
acct->stime_scaled += stime_scaled;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vtime_account_system);
|
EXPORT_SYMBOL_GPL(vtime_account_system);
|
||||||
|
|
||||||
void vtime_account_idle(struct task_struct *tsk)
|
void vtime_account_idle(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
unsigned long delta, sys_scaled, stolen;
|
unsigned long stime, stime_scaled, steal_time;
|
||||||
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
||||||
|
|
||||||
delta = vtime_delta(tsk, &sys_scaled, &stolen);
|
stime = vtime_delta(tsk, &stime_scaled, &steal_time);
|
||||||
account_idle_time(delta + stolen);
|
acct->idle_time += stime + steal_time;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Transfer the user time accumulated in the paca
|
* Account the whole cputime accumulated in the paca
|
||||||
* by the exception entry and exit code to the generic
|
|
||||||
* process user time records.
|
|
||||||
* Must be called with interrupts disabled.
|
* Must be called with interrupts disabled.
|
||||||
* Assumes that vtime_account_system/idle() has been called
|
* Assumes that vtime_account_system/idle() has been called
|
||||||
* recently (i.e. since the last entry from usermode) so that
|
* recently (i.e. since the last entry from usermode) so that
|
||||||
* get_paca()->user_time_scaled is up to date.
|
* get_paca()->user_time_scaled is up to date.
|
||||||
*/
|
*/
|
||||||
void vtime_account_user(struct task_struct *tsk)
|
void vtime_flush(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
cputime_t utime, utimescaled;
|
|
||||||
struct cpu_accounting_data *acct = get_accounting(tsk);
|
struct cpu_accounting_data *acct = get_accounting(tsk);
|
||||||
|
|
||||||
utime = acct->user_time;
|
if (acct->utime)
|
||||||
utimescaled = acct->user_time_scaled;
|
account_user_time(tsk, cputime_to_nsecs(acct->utime));
|
||||||
acct->user_time = 0;
|
|
||||||
acct->user_time_scaled = 0;
|
if (acct->utime_scaled)
|
||||||
|
tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
|
||||||
|
|
||||||
|
if (acct->gtime)
|
||||||
|
account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
|
||||||
|
|
||||||
|
if (acct->steal_time)
|
||||||
|
account_steal_time(cputime_to_nsecs(acct->steal_time));
|
||||||
|
|
||||||
|
if (acct->idle_time)
|
||||||
|
account_idle_time(cputime_to_nsecs(acct->idle_time));
|
||||||
|
|
||||||
|
if (acct->stime)
|
||||||
|
account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
|
||||||
|
CPUTIME_SYSTEM);
|
||||||
|
if (acct->stime_scaled)
|
||||||
|
tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
|
||||||
|
|
||||||
|
if (acct->hardirq_time)
|
||||||
|
account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
|
||||||
|
CPUTIME_IRQ);
|
||||||
|
if (acct->softirq_time)
|
||||||
|
account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
|
||||||
|
CPUTIME_SOFTIRQ);
|
||||||
|
|
||||||
|
acct->utime = 0;
|
||||||
|
acct->utime_scaled = 0;
|
||||||
acct->utime_sspurr = 0;
|
acct->utime_sspurr = 0;
|
||||||
account_user_time(tsk, utime);
|
acct->gtime = 0;
|
||||||
tsk->utimescaled += utimescaled;
|
acct->steal_time = 0;
|
||||||
|
acct->idle_time = 0;
|
||||||
|
acct->stime = 0;
|
||||||
|
acct->stime_scaled = 0;
|
||||||
|
acct->hardirq_time = 0;
|
||||||
|
acct->softirq_time = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PPC32
|
#ifdef CONFIG_PPC32
|
||||||
|
@ -407,8 +428,7 @@ void arch_vtime_task_switch(struct task_struct *prev)
|
||||||
struct cpu_accounting_data *acct = get_accounting(current);
|
struct cpu_accounting_data *acct = get_accounting(current);
|
||||||
|
|
||||||
acct->starttime = get_accounting(prev)->starttime;
|
acct->starttime = get_accounting(prev)->starttime;
|
||||||
acct->system_time = 0;
|
acct->startspurr = get_accounting(prev)->startspurr;
|
||||||
acct->user_time = 0;
|
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PPC32 */
|
#endif /* CONFIG_PPC32 */
|
||||||
|
|
||||||
|
@ -1018,7 +1038,6 @@ void __init time_init(void)
|
||||||
tb_ticks_per_sec = ppc_tb_freq;
|
tb_ticks_per_sec = ppc_tb_freq;
|
||||||
tb_ticks_per_usec = ppc_tb_freq / 1000000;
|
tb_ticks_per_usec = ppc_tb_freq / 1000000;
|
||||||
calc_cputime_factors();
|
calc_cputime_factors();
|
||||||
setup_cputime_one_jiffy();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Compute scale factor for sched_clock.
|
* Compute scale factor for sched_clock.
|
||||||
|
|
|
@ -2287,14 +2287,14 @@ static void dump_one_paca(int cpu)
|
||||||
DUMP(p, subcore_sibling_mask, "x");
|
DUMP(p, subcore_sibling_mask, "x");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
DUMP(p, accounting.user_time, "llx");
|
DUMP(p, accounting.utime, "llx");
|
||||||
DUMP(p, accounting.system_time, "llx");
|
DUMP(p, accounting.stime, "llx");
|
||||||
DUMP(p, accounting.user_time_scaled, "llx");
|
DUMP(p, accounting.utime_scaled, "llx");
|
||||||
DUMP(p, accounting.starttime, "llx");
|
DUMP(p, accounting.starttime, "llx");
|
||||||
DUMP(p, accounting.starttime_user, "llx");
|
DUMP(p, accounting.starttime_user, "llx");
|
||||||
DUMP(p, accounting.startspurr, "llx");
|
DUMP(p, accounting.startspurr, "llx");
|
||||||
DUMP(p, accounting.utime_sspurr, "llx");
|
DUMP(p, accounting.utime_sspurr, "llx");
|
||||||
DUMP(p, stolen_time, "llx");
|
DUMP(p, accounting.steal_time, "llx");
|
||||||
#undef DUMP
|
#undef DUMP
|
||||||
|
|
||||||
catch_memory_errors = 0;
|
catch_memory_errors = 0;
|
||||||
|
|
|
@ -113,21 +113,21 @@ static void appldata_get_os_data(void *data)
|
||||||
j = 0;
|
j = 0;
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
os_data->os_cpu[j].per_cpu_user =
|
os_data->os_cpu[j].per_cpu_user =
|
||||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
|
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
|
||||||
os_data->os_cpu[j].per_cpu_nice =
|
os_data->os_cpu[j].per_cpu_nice =
|
||||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
|
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
|
||||||
os_data->os_cpu[j].per_cpu_system =
|
os_data->os_cpu[j].per_cpu_system =
|
||||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
|
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
|
||||||
os_data->os_cpu[j].per_cpu_idle =
|
os_data->os_cpu[j].per_cpu_idle =
|
||||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
|
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
|
||||||
os_data->os_cpu[j].per_cpu_irq =
|
os_data->os_cpu[j].per_cpu_irq =
|
||||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
|
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
|
||||||
os_data->os_cpu[j].per_cpu_softirq =
|
os_data->os_cpu[j].per_cpu_softirq =
|
||||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
|
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
|
||||||
os_data->os_cpu[j].per_cpu_iowait =
|
os_data->os_cpu[j].per_cpu_iowait =
|
||||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
|
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
|
||||||
os_data->os_cpu[j].per_cpu_steal =
|
os_data->os_cpu[j].per_cpu_steal =
|
||||||
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
|
nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
|
||||||
os_data->os_cpu[j].cpu_id = i;
|
os_data->os_cpu[j].cpu_id = i;
|
||||||
j++;
|
j++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,33 +25,6 @@ static inline unsigned long __div(unsigned long long n, unsigned long base)
|
||||||
return n / base;
|
return n / base;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define cputime_one_jiffy jiffies_to_cputime(1)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime to jiffies and back.
|
|
||||||
*/
|
|
||||||
static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
|
|
||||||
{
|
|
||||||
return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime_t jiffies_to_cputime(const unsigned int jif)
|
|
||||||
{
|
|
||||||
return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
|
|
||||||
{
|
|
||||||
unsigned long long jif = (__force unsigned long long) cputime;
|
|
||||||
do_div(jif, CPUTIME_PER_SEC / HZ);
|
|
||||||
return jif;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
|
|
||||||
{
|
|
||||||
return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert cputime to microseconds and back.
|
* Convert cputime to microseconds and back.
|
||||||
*/
|
*/
|
||||||
|
@ -60,88 +33,8 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
|
||||||
return (__force unsigned long long) cputime >> 12;
|
return (__force unsigned long long) cputime >> 12;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline cputime_t usecs_to_cputime(const unsigned int m)
|
|
||||||
{
|
|
||||||
return (__force cputime_t)(m * CPUTIME_PER_USEC);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define usecs_to_cputime64(m) usecs_to_cputime(m)
|
u64 arch_cpu_idle_time(int cpu);
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime to milliseconds and back.
|
|
||||||
*/
|
|
||||||
static inline unsigned int cputime_to_secs(const cputime_t cputime)
|
|
||||||
{
|
|
||||||
return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime_t secs_to_cputime(const unsigned int s)
|
|
||||||
{
|
|
||||||
return (__force cputime_t)(s * CPUTIME_PER_SEC);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime to timespec and back.
|
|
||||||
*/
|
|
||||||
static inline cputime_t timespec_to_cputime(const struct timespec *value)
|
|
||||||
{
|
|
||||||
unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
|
|
||||||
return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void cputime_to_timespec(const cputime_t cputime,
|
|
||||||
struct timespec *value)
|
|
||||||
{
|
|
||||||
unsigned long long __cputime = (__force unsigned long long) cputime;
|
|
||||||
value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
|
|
||||||
value->tv_sec = __cputime / CPUTIME_PER_SEC;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime to timeval and back.
|
|
||||||
* Since cputime and timeval have the same resolution (microseconds)
|
|
||||||
* this is easy.
|
|
||||||
*/
|
|
||||||
static inline cputime_t timeval_to_cputime(const struct timeval *value)
|
|
||||||
{
|
|
||||||
unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
|
|
||||||
return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void cputime_to_timeval(const cputime_t cputime,
|
|
||||||
struct timeval *value)
|
|
||||||
{
|
|
||||||
unsigned long long __cputime = (__force unsigned long long) cputime;
|
|
||||||
value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
|
|
||||||
value->tv_sec = __cputime / CPUTIME_PER_SEC;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime to clock and back.
|
|
||||||
*/
|
|
||||||
static inline clock_t cputime_to_clock_t(cputime_t cputime)
|
|
||||||
{
|
|
||||||
unsigned long long clock = (__force unsigned long long) cputime;
|
|
||||||
do_div(clock, CPUTIME_PER_SEC / USER_HZ);
|
|
||||||
return clock;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime_t clock_t_to_cputime(unsigned long x)
|
|
||||||
{
|
|
||||||
return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime64 to clock.
|
|
||||||
*/
|
|
||||||
static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
|
|
||||||
{
|
|
||||||
unsigned long long clock = (__force unsigned long long) cputime;
|
|
||||||
do_div(clock, CPUTIME_PER_SEC / USER_HZ);
|
|
||||||
return clock;
|
|
||||||
}
|
|
||||||
|
|
||||||
cputime64_t arch_cpu_idle_time(int cpu);
|
|
||||||
|
|
||||||
#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
|
#define arch_idle_time(cpu) arch_cpu_idle_time(cpu)
|
||||||
|
|
||||||
|
|
|
@ -85,53 +85,56 @@ struct lowcore {
|
||||||
__u64 mcck_enter_timer; /* 0x02c0 */
|
__u64 mcck_enter_timer; /* 0x02c0 */
|
||||||
__u64 exit_timer; /* 0x02c8 */
|
__u64 exit_timer; /* 0x02c8 */
|
||||||
__u64 user_timer; /* 0x02d0 */
|
__u64 user_timer; /* 0x02d0 */
|
||||||
__u64 system_timer; /* 0x02d8 */
|
__u64 guest_timer; /* 0x02d8 */
|
||||||
__u64 steal_timer; /* 0x02e0 */
|
__u64 system_timer; /* 0x02e0 */
|
||||||
__u64 last_update_timer; /* 0x02e8 */
|
__u64 hardirq_timer; /* 0x02e8 */
|
||||||
__u64 last_update_clock; /* 0x02f0 */
|
__u64 softirq_timer; /* 0x02f0 */
|
||||||
__u64 int_clock; /* 0x02f8 */
|
__u64 steal_timer; /* 0x02f8 */
|
||||||
__u64 mcck_clock; /* 0x0300 */
|
__u64 last_update_timer; /* 0x0300 */
|
||||||
__u64 clock_comparator; /* 0x0308 */
|
__u64 last_update_clock; /* 0x0308 */
|
||||||
|
__u64 int_clock; /* 0x0310 */
|
||||||
|
__u64 mcck_clock; /* 0x0318 */
|
||||||
|
__u64 clock_comparator; /* 0x0320 */
|
||||||
|
|
||||||
/* Current process. */
|
/* Current process. */
|
||||||
__u64 current_task; /* 0x0310 */
|
__u64 current_task; /* 0x0328 */
|
||||||
__u8 pad_0x318[0x320-0x318]; /* 0x0318 */
|
__u8 pad_0x318[0x320-0x318]; /* 0x0330 */
|
||||||
__u64 kernel_stack; /* 0x0320 */
|
__u64 kernel_stack; /* 0x0338 */
|
||||||
|
|
||||||
/* Interrupt, panic and restart stack. */
|
/* Interrupt, panic and restart stack. */
|
||||||
__u64 async_stack; /* 0x0328 */
|
__u64 async_stack; /* 0x0340 */
|
||||||
__u64 panic_stack; /* 0x0330 */
|
__u64 panic_stack; /* 0x0348 */
|
||||||
__u64 restart_stack; /* 0x0338 */
|
__u64 restart_stack; /* 0x0350 */
|
||||||
|
|
||||||
/* Restart function and parameter. */
|
/* Restart function and parameter. */
|
||||||
__u64 restart_fn; /* 0x0340 */
|
__u64 restart_fn; /* 0x0358 */
|
||||||
__u64 restart_data; /* 0x0348 */
|
__u64 restart_data; /* 0x0360 */
|
||||||
__u64 restart_source; /* 0x0350 */
|
__u64 restart_source; /* 0x0368 */
|
||||||
|
|
||||||
/* Address space pointer. */
|
/* Address space pointer. */
|
||||||
__u64 kernel_asce; /* 0x0358 */
|
__u64 kernel_asce; /* 0x0370 */
|
||||||
__u64 user_asce; /* 0x0360 */
|
__u64 user_asce; /* 0x0378 */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The lpp and current_pid fields form a
|
* The lpp and current_pid fields form a
|
||||||
* 64-bit value that is set as program
|
* 64-bit value that is set as program
|
||||||
* parameter with the LPP instruction.
|
* parameter with the LPP instruction.
|
||||||
*/
|
*/
|
||||||
__u32 lpp; /* 0x0368 */
|
__u32 lpp; /* 0x0380 */
|
||||||
__u32 current_pid; /* 0x036c */
|
__u32 current_pid; /* 0x0384 */
|
||||||
|
|
||||||
/* SMP info area */
|
/* SMP info area */
|
||||||
__u32 cpu_nr; /* 0x0370 */
|
__u32 cpu_nr; /* 0x0388 */
|
||||||
__u32 softirq_pending; /* 0x0374 */
|
__u32 softirq_pending; /* 0x038c */
|
||||||
__u64 percpu_offset; /* 0x0378 */
|
__u64 percpu_offset; /* 0x0390 */
|
||||||
__u64 vdso_per_cpu_data; /* 0x0380 */
|
__u64 vdso_per_cpu_data; /* 0x0398 */
|
||||||
__u64 machine_flags; /* 0x0388 */
|
__u64 machine_flags; /* 0x03a0 */
|
||||||
__u32 preempt_count; /* 0x0390 */
|
__u32 preempt_count; /* 0x03a8 */
|
||||||
__u8 pad_0x0394[0x0398-0x0394]; /* 0x0394 */
|
__u8 pad_0x03ac[0x03b0-0x03ac]; /* 0x03ac */
|
||||||
__u64 gmap; /* 0x0398 */
|
__u64 gmap; /* 0x03b0 */
|
||||||
__u32 spinlock_lockval; /* 0x03a0 */
|
__u32 spinlock_lockval; /* 0x03b8 */
|
||||||
__u32 fpu_flags; /* 0x03a4 */
|
__u32 fpu_flags; /* 0x03bc */
|
||||||
__u8 pad_0x03a8[0x0400-0x03a8]; /* 0x03a8 */
|
__u8 pad_0x03c0[0x0400-0x03c0]; /* 0x03c0 */
|
||||||
|
|
||||||
/* Per cpu primary space access list */
|
/* Per cpu primary space access list */
|
||||||
__u32 paste[16]; /* 0x0400 */
|
__u32 paste[16]; /* 0x0400 */
|
||||||
|
|
|
@ -111,7 +111,10 @@ struct thread_struct {
|
||||||
unsigned int acrs[NUM_ACRS];
|
unsigned int acrs[NUM_ACRS];
|
||||||
unsigned long ksp; /* kernel stack pointer */
|
unsigned long ksp; /* kernel stack pointer */
|
||||||
unsigned long user_timer; /* task cputime in user space */
|
unsigned long user_timer; /* task cputime in user space */
|
||||||
|
unsigned long guest_timer; /* task cputime in kvm guest */
|
||||||
unsigned long system_timer; /* task cputime in kernel space */
|
unsigned long system_timer; /* task cputime in kernel space */
|
||||||
|
unsigned long hardirq_timer; /* task cputime in hardirq context */
|
||||||
|
unsigned long softirq_timer; /* task cputime in softirq context */
|
||||||
unsigned long sys_call_table; /* system call table address */
|
unsigned long sys_call_table; /* system call table address */
|
||||||
mm_segment_t mm_segment;
|
mm_segment_t mm_segment;
|
||||||
unsigned long gmap_addr; /* address of last gmap fault. */
|
unsigned long gmap_addr; /* address of last gmap fault. */
|
||||||
|
|
|
@ -12,7 +12,7 @@
|
||||||
#include <linux/notifier.h>
|
#include <linux/notifier.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <asm/cputime.h>
|
#include <linux/cputime.h>
|
||||||
#include <asm/nmi.h>
|
#include <asm/nmi.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
#include "entry.h"
|
#include "entry.h"
|
||||||
|
@ -43,7 +43,7 @@ void enabled_wait(void)
|
||||||
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
|
idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
|
||||||
idle->idle_time += idle_time;
|
idle->idle_time += idle_time;
|
||||||
idle->idle_count++;
|
idle->idle_count++;
|
||||||
account_idle_time(idle_time);
|
account_idle_time(cputime_to_nsecs(idle_time));
|
||||||
write_seqcount_end(&idle->seqcount);
|
write_seqcount_end(&idle->seqcount);
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(enabled_wait);
|
NOKPROBE_SYMBOL(enabled_wait);
|
||||||
|
@ -84,7 +84,7 @@ static ssize_t show_idle_time(struct device *dev,
|
||||||
}
|
}
|
||||||
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
|
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
|
||||||
|
|
||||||
cputime64_t arch_cpu_idle_time(int cpu)
|
u64 arch_cpu_idle_time(int cpu)
|
||||||
{
|
{
|
||||||
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
|
struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
|
||||||
unsigned long long now, idle_enter, idle_exit;
|
unsigned long long now, idle_enter, idle_exit;
|
||||||
|
@ -96,7 +96,8 @@ cputime64_t arch_cpu_idle_time(int cpu)
|
||||||
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
|
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
|
||||||
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
|
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
|
||||||
} while (read_seqcount_retry(&idle->seqcount, seq));
|
} while (read_seqcount_retry(&idle->seqcount, seq));
|
||||||
return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
|
|
||||||
|
return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_cpu_idle_enter(void)
|
void arch_cpu_idle_enter(void)
|
||||||
|
|
|
@ -6,13 +6,13 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kernel_stat.h>
|
#include <linux/kernel_stat.h>
|
||||||
|
#include <linux/cputime.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/timex.h>
|
#include <linux/timex.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
|
|
||||||
#include <asm/cputime.h>
|
|
||||||
#include <asm/vtimer.h>
|
#include <asm/vtimer.h>
|
||||||
#include <asm/vtime.h>
|
#include <asm/vtime.h>
|
||||||
#include <asm/cpu_mf.h>
|
#include <asm/cpu_mf.h>
|
||||||
|
@ -90,14 +90,41 @@ static void update_mt_scaling(void)
|
||||||
__this_cpu_write(mt_scaling_jiffies, jiffies_64);
|
__this_cpu_write(mt_scaling_jiffies, jiffies_64);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u64 update_tsk_timer(unsigned long *tsk_vtime, u64 new)
|
||||||
|
{
|
||||||
|
u64 delta;
|
||||||
|
|
||||||
|
delta = new - *tsk_vtime;
|
||||||
|
*tsk_vtime = new;
|
||||||
|
return delta;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static inline u64 scale_vtime(u64 vtime)
|
||||||
|
{
|
||||||
|
u64 mult = __this_cpu_read(mt_scaling_mult);
|
||||||
|
u64 div = __this_cpu_read(mt_scaling_div);
|
||||||
|
|
||||||
|
if (smp_cpu_mtid)
|
||||||
|
return vtime * mult / div;
|
||||||
|
return vtime;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void account_system_index_scaled(struct task_struct *p,
|
||||||
|
cputime_t cputime, cputime_t scaled,
|
||||||
|
enum cpu_usage_stat index)
|
||||||
|
{
|
||||||
|
p->stimescaled += cputime_to_nsecs(scaled);
|
||||||
|
account_system_index_time(p, cputime_to_nsecs(cputime), index);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Update process times based on virtual cpu times stored by entry.S
|
* Update process times based on virtual cpu times stored by entry.S
|
||||||
* to the lowcore fields user_timer, system_timer & steal_clock.
|
* to the lowcore fields user_timer, system_timer & steal_clock.
|
||||||
*/
|
*/
|
||||||
static int do_account_vtime(struct task_struct *tsk)
|
static int do_account_vtime(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
u64 timer, clock, user, system, steal;
|
u64 timer, clock, user, guest, system, hardirq, softirq, steal;
|
||||||
u64 user_scaled, system_scaled;
|
|
||||||
|
|
||||||
timer = S390_lowcore.last_update_timer;
|
timer = S390_lowcore.last_update_timer;
|
||||||
clock = S390_lowcore.last_update_clock;
|
clock = S390_lowcore.last_update_clock;
|
||||||
|
@ -110,53 +137,76 @@ static int do_account_vtime(struct task_struct *tsk)
|
||||||
#endif
|
#endif
|
||||||
: "=m" (S390_lowcore.last_update_timer),
|
: "=m" (S390_lowcore.last_update_timer),
|
||||||
"=m" (S390_lowcore.last_update_clock));
|
"=m" (S390_lowcore.last_update_clock));
|
||||||
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
|
clock = S390_lowcore.last_update_clock - clock;
|
||||||
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
|
timer -= S390_lowcore.last_update_timer;
|
||||||
|
|
||||||
|
if (hardirq_count())
|
||||||
|
S390_lowcore.hardirq_timer += timer;
|
||||||
|
else
|
||||||
|
S390_lowcore.system_timer += timer;
|
||||||
|
|
||||||
/* Update MT utilization calculation */
|
/* Update MT utilization calculation */
|
||||||
if (smp_cpu_mtid &&
|
if (smp_cpu_mtid &&
|
||||||
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
|
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
|
||||||
update_mt_scaling();
|
update_mt_scaling();
|
||||||
|
|
||||||
user = S390_lowcore.user_timer - tsk->thread.user_timer;
|
/* Calculate cputime delta */
|
||||||
S390_lowcore.steal_timer -= user;
|
user = update_tsk_timer(&tsk->thread.user_timer,
|
||||||
tsk->thread.user_timer = S390_lowcore.user_timer;
|
READ_ONCE(S390_lowcore.user_timer));
|
||||||
|
guest = update_tsk_timer(&tsk->thread.guest_timer,
|
||||||
|
READ_ONCE(S390_lowcore.guest_timer));
|
||||||
|
system = update_tsk_timer(&tsk->thread.system_timer,
|
||||||
|
READ_ONCE(S390_lowcore.system_timer));
|
||||||
|
hardirq = update_tsk_timer(&tsk->thread.hardirq_timer,
|
||||||
|
READ_ONCE(S390_lowcore.hardirq_timer));
|
||||||
|
softirq = update_tsk_timer(&tsk->thread.softirq_timer,
|
||||||
|
READ_ONCE(S390_lowcore.softirq_timer));
|
||||||
|
S390_lowcore.steal_timer +=
|
||||||
|
clock - user - guest - system - hardirq - softirq;
|
||||||
|
|
||||||
system = S390_lowcore.system_timer - tsk->thread.system_timer;
|
/* Push account value */
|
||||||
S390_lowcore.steal_timer -= system;
|
if (user) {
|
||||||
tsk->thread.system_timer = S390_lowcore.system_timer;
|
account_user_time(tsk, cputime_to_nsecs(user));
|
||||||
|
tsk->utimescaled += cputime_to_nsecs(scale_vtime(user));
|
||||||
user_scaled = user;
|
|
||||||
system_scaled = system;
|
|
||||||
/* Do MT utilization scaling */
|
|
||||||
if (smp_cpu_mtid) {
|
|
||||||
u64 mult = __this_cpu_read(mt_scaling_mult);
|
|
||||||
u64 div = __this_cpu_read(mt_scaling_div);
|
|
||||||
|
|
||||||
user_scaled = (user_scaled * mult) / div;
|
|
||||||
system_scaled = (system_scaled * mult) / div;
|
|
||||||
}
|
}
|
||||||
account_user_time(tsk, user);
|
|
||||||
tsk->utimescaled += user_scaled;
|
if (guest) {
|
||||||
account_system_time(tsk, 0, system);
|
account_guest_time(tsk, cputime_to_nsecs(guest));
|
||||||
tsk->stimescaled += system_scaled;
|
tsk->utimescaled += cputime_to_nsecs(scale_vtime(guest));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (system)
|
||||||
|
account_system_index_scaled(tsk, system, scale_vtime(system),
|
||||||
|
CPUTIME_SYSTEM);
|
||||||
|
if (hardirq)
|
||||||
|
account_system_index_scaled(tsk, hardirq, scale_vtime(hardirq),
|
||||||
|
CPUTIME_IRQ);
|
||||||
|
if (softirq)
|
||||||
|
account_system_index_scaled(tsk, softirq, scale_vtime(softirq),
|
||||||
|
CPUTIME_SOFTIRQ);
|
||||||
|
|
||||||
steal = S390_lowcore.steal_timer;
|
steal = S390_lowcore.steal_timer;
|
||||||
if ((s64) steal > 0) {
|
if ((s64) steal > 0) {
|
||||||
S390_lowcore.steal_timer = 0;
|
S390_lowcore.steal_timer = 0;
|
||||||
account_steal_time(steal);
|
account_steal_time(cputime_to_nsecs(steal));
|
||||||
}
|
}
|
||||||
|
|
||||||
return virt_timer_forward(user + system);
|
return virt_timer_forward(user + guest + system + hardirq + softirq);
|
||||||
}
|
}
|
||||||
|
|
||||||
void vtime_task_switch(struct task_struct *prev)
|
void vtime_task_switch(struct task_struct *prev)
|
||||||
{
|
{
|
||||||
do_account_vtime(prev);
|
do_account_vtime(prev);
|
||||||
prev->thread.user_timer = S390_lowcore.user_timer;
|
prev->thread.user_timer = S390_lowcore.user_timer;
|
||||||
|
prev->thread.guest_timer = S390_lowcore.guest_timer;
|
||||||
prev->thread.system_timer = S390_lowcore.system_timer;
|
prev->thread.system_timer = S390_lowcore.system_timer;
|
||||||
|
prev->thread.hardirq_timer = S390_lowcore.hardirq_timer;
|
||||||
|
prev->thread.softirq_timer = S390_lowcore.softirq_timer;
|
||||||
S390_lowcore.user_timer = current->thread.user_timer;
|
S390_lowcore.user_timer = current->thread.user_timer;
|
||||||
|
S390_lowcore.guest_timer = current->thread.guest_timer;
|
||||||
S390_lowcore.system_timer = current->thread.system_timer;
|
S390_lowcore.system_timer = current->thread.system_timer;
|
||||||
|
S390_lowcore.hardirq_timer = current->thread.hardirq_timer;
|
||||||
|
S390_lowcore.softirq_timer = current->thread.softirq_timer;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -164,7 +214,7 @@ void vtime_task_switch(struct task_struct *prev)
|
||||||
* accounting system time in order to correctly compute
|
* accounting system time in order to correctly compute
|
||||||
* the stolen time accounting.
|
* the stolen time accounting.
|
||||||
*/
|
*/
|
||||||
void vtime_account_user(struct task_struct *tsk)
|
void vtime_flush(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
if (do_account_vtime(tsk))
|
if (do_account_vtime(tsk))
|
||||||
virt_timer_expire();
|
virt_timer_expire();
|
||||||
|
@ -176,32 +226,22 @@ void vtime_account_user(struct task_struct *tsk)
|
||||||
*/
|
*/
|
||||||
void vtime_account_irq_enter(struct task_struct *tsk)
|
void vtime_account_irq_enter(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
u64 timer, system, system_scaled;
|
u64 timer;
|
||||||
|
|
||||||
timer = S390_lowcore.last_update_timer;
|
timer = S390_lowcore.last_update_timer;
|
||||||
S390_lowcore.last_update_timer = get_vtimer();
|
S390_lowcore.last_update_timer = get_vtimer();
|
||||||
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
|
timer -= S390_lowcore.last_update_timer;
|
||||||
|
|
||||||
/* Update MT utilization calculation */
|
if ((tsk->flags & PF_VCPU) && (irq_count() == 0))
|
||||||
if (smp_cpu_mtid &&
|
S390_lowcore.guest_timer += timer;
|
||||||
time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies)))
|
else if (hardirq_count())
|
||||||
update_mt_scaling();
|
S390_lowcore.hardirq_timer += timer;
|
||||||
|
else if (in_serving_softirq())
|
||||||
|
S390_lowcore.softirq_timer += timer;
|
||||||
|
else
|
||||||
|
S390_lowcore.system_timer += timer;
|
||||||
|
|
||||||
system = S390_lowcore.system_timer - tsk->thread.system_timer;
|
virt_timer_forward(timer);
|
||||||
S390_lowcore.steal_timer -= system;
|
|
||||||
tsk->thread.system_timer = S390_lowcore.system_timer;
|
|
||||||
system_scaled = system;
|
|
||||||
/* Do MT utilization scaling */
|
|
||||||
if (smp_cpu_mtid) {
|
|
||||||
u64 mult = __this_cpu_read(mt_scaling_mult);
|
|
||||||
u64 div = __this_cpu_read(mt_scaling_div);
|
|
||||||
|
|
||||||
system_scaled = (system_scaled * mult) / div;
|
|
||||||
}
|
|
||||||
account_system_time(tsk, 0, system);
|
|
||||||
tsk->stimescaled += system_scaled;
|
|
||||||
|
|
||||||
virt_timer_forward(system);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
|
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,6 @@ header-y +=
|
||||||
|
|
||||||
generic-y += barrier.h
|
generic-y += barrier.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += irq_work.h
|
generic-y += irq_work.h
|
||||||
generic-y += mcs_spinlock.h
|
generic-y += mcs_spinlock.h
|
||||||
generic-y += mm-arch-hooks.h
|
generic-y += mm-arch-hooks.h
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
|
|
||||||
generic-y += bitsperlong.h
|
generic-y += bitsperlong.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += delay.h
|
generic-y += delay.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
|
|
|
@ -2,7 +2,6 @@
|
||||||
|
|
||||||
|
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
generic-y += exec.h
|
generic-y += exec.h
|
||||||
|
|
|
@ -4,7 +4,6 @@ header-y += ../arch/
|
||||||
generic-y += bug.h
|
generic-y += bug.h
|
||||||
generic-y += bugs.h
|
generic-y += bugs.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
generic-y += errno.h
|
generic-y += errno.h
|
||||||
generic-y += exec.h
|
generic-y += exec.h
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
generic-y += barrier.h
|
generic-y += barrier.h
|
||||||
generic-y += bug.h
|
generic-y += bug.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += delay.h
|
generic-y += delay.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
|
|
|
@ -4,7 +4,6 @@ generic-y += auxvec.h
|
||||||
generic-y += bitsperlong.h
|
generic-y += bitsperlong.h
|
||||||
generic-y += bugs.h
|
generic-y += bugs.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += current.h
|
generic-y += current.h
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
|
|
|
@ -7,7 +7,6 @@ generated-y += unistd_64_x32.h
|
||||||
generated-y += xen-hypercalls.h
|
generated-y += xen-hypercalls.h
|
||||||
|
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += dma-contiguous.h
|
generic-y += dma-contiguous.h
|
||||||
generic-y += early_ioremap.h
|
generic-y += early_ioremap.h
|
||||||
generic-y += mcs_spinlock.h
|
generic-y += mcs_spinlock.h
|
||||||
|
|
|
@ -905,8 +905,8 @@ static int apm_cpu_idle(struct cpuidle_device *dev,
|
||||||
{
|
{
|
||||||
static int use_apm_idle; /* = 0 */
|
static int use_apm_idle; /* = 0 */
|
||||||
static unsigned int last_jiffies; /* = 0 */
|
static unsigned int last_jiffies; /* = 0 */
|
||||||
static unsigned int last_stime; /* = 0 */
|
static u64 last_stime; /* = 0 */
|
||||||
cputime_t stime, utime;
|
u64 stime, utime;
|
||||||
|
|
||||||
int apm_idle_done = 0;
|
int apm_idle_done = 0;
|
||||||
unsigned int jiffies_since_last_check = jiffies - last_jiffies;
|
unsigned int jiffies_since_last_check = jiffies - last_jiffies;
|
||||||
|
@ -919,7 +919,7 @@ static int apm_cpu_idle(struct cpuidle_device *dev,
|
||||||
} else if (jiffies_since_last_check > idle_period) {
|
} else if (jiffies_since_last_check > idle_period) {
|
||||||
unsigned int idle_percentage;
|
unsigned int idle_percentage;
|
||||||
|
|
||||||
idle_percentage = cputime_to_jiffies(stime - last_stime);
|
idle_percentage = nsecs_to_jiffies(stime - last_stime);
|
||||||
idle_percentage *= 100;
|
idle_percentage *= 100;
|
||||||
idle_percentage /= jiffies_since_last_check;
|
idle_percentage /= jiffies_since_last_check;
|
||||||
use_apm_idle = (idle_percentage > idle_threshold);
|
use_apm_idle = (idle_percentage > idle_threshold);
|
||||||
|
|
|
@ -555,8 +555,10 @@ static void early_init_amd(struct cpuinfo_x86 *c)
|
||||||
if (c->x86_power & (1 << 8)) {
|
if (c->x86_power & (1 << 8)) {
|
||||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||||
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
||||||
if (!check_tsc_unstable())
|
if (check_tsc_unstable())
|
||||||
set_sched_clock_stable();
|
clear_sched_clock_stable();
|
||||||
|
} else {
|
||||||
|
clear_sched_clock_stable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
|
/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
#include <linux/bitops.h>
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/sched.h>
|
||||||
|
|
||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
#include <asm/e820.h>
|
#include <asm/e820.h>
|
||||||
|
@ -104,6 +104,8 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
|
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
clear_sched_clock_stable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_centaur(struct cpuinfo_x86 *c)
|
static void init_centaur(struct cpuinfo_x86 *c)
|
||||||
|
|
|
@ -83,6 +83,7 @@ static void default_init(struct cpuinfo_x86 *c)
|
||||||
strcpy(c->x86_model_id, "386");
|
strcpy(c->x86_model_id, "386");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
clear_sched_clock_stable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct cpu_dev default_cpu = {
|
static const struct cpu_dev default_cpu = {
|
||||||
|
@ -1056,6 +1057,8 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
||||||
*/
|
*/
|
||||||
if (this_cpu->c_init)
|
if (this_cpu->c_init)
|
||||||
this_cpu->c_init(c);
|
this_cpu->c_init(c);
|
||||||
|
else
|
||||||
|
clear_sched_clock_stable();
|
||||||
|
|
||||||
/* Disable the PN if appropriate */
|
/* Disable the PN if appropriate */
|
||||||
squash_the_stupid_serial_number(c);
|
squash_the_stupid_serial_number(c);
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
#include <asm/pci-direct.h>
|
#include <asm/pci-direct.h>
|
||||||
#include <asm/tsc.h>
|
#include <asm/tsc.h>
|
||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
#include "cpu.h"
|
#include "cpu.h"
|
||||||
|
|
||||||
|
@ -183,6 +184,7 @@ static void early_init_cyrix(struct cpuinfo_x86 *c)
|
||||||
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
|
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
clear_sched_clock_stable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_cyrix(struct cpuinfo_x86 *c)
|
static void init_cyrix(struct cpuinfo_x86 *c)
|
||||||
|
|
|
@ -119,8 +119,10 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
||||||
if (c->x86_power & (1 << 8)) {
|
if (c->x86_power & (1 << 8)) {
|
||||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||||
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
|
||||||
if (!check_tsc_unstable())
|
if (check_tsc_unstable())
|
||||||
set_sched_clock_stable();
|
clear_sched_clock_stable();
|
||||||
|
} else {
|
||||||
|
clear_sched_clock_stable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
|
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <asm/cpufeature.h>
|
#include <asm/cpufeature.h>
|
||||||
#include <asm/msr.h>
|
#include <asm/msr.h>
|
||||||
|
@ -14,6 +15,8 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
|
||||||
if (xlvl >= 0x80860001)
|
if (xlvl >= 0x80860001)
|
||||||
c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
|
c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clear_sched_clock_stable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void init_transmeta(struct cpuinfo_x86 *c)
|
static void init_transmeta(struct cpuinfo_x86 *c)
|
||||||
|
|
|
@ -132,10 +132,8 @@ int sched_set_itmt_support(void)
|
||||||
|
|
||||||
sysctl_sched_itmt_enabled = 1;
|
sysctl_sched_itmt_enabled = 1;
|
||||||
|
|
||||||
if (sysctl_sched_itmt_enabled) {
|
x86_topology_update = true;
|
||||||
x86_topology_update = true;
|
rebuild_sched_domains();
|
||||||
rebuild_sched_domains();
|
|
||||||
}
|
|
||||||
|
|
||||||
mutex_unlock(&itmt_update_mutex);
|
mutex_unlock(&itmt_update_mutex);
|
||||||
|
|
||||||
|
|
|
@ -107,12 +107,12 @@ static inline void kvm_sched_clock_init(bool stable)
|
||||||
{
|
{
|
||||||
if (!stable) {
|
if (!stable) {
|
||||||
pv_time_ops.sched_clock = kvm_clock_read;
|
pv_time_ops.sched_clock = kvm_clock_read;
|
||||||
|
clear_sched_clock_stable();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
kvm_sched_clock_offset = kvm_clock_read();
|
kvm_sched_clock_offset = kvm_clock_read();
|
||||||
pv_time_ops.sched_clock = kvm_sched_clock_read;
|
pv_time_ops.sched_clock = kvm_sched_clock_read;
|
||||||
set_sched_clock_stable();
|
|
||||||
|
|
||||||
printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n",
|
printk(KERN_INFO "kvm-clock: using sched offset of %llu cycles\n",
|
||||||
kvm_sched_clock_offset);
|
kvm_sched_clock_offset);
|
||||||
|
|
|
@ -1107,6 +1107,16 @@ static u64 read_tsc(struct clocksource *cs)
|
||||||
return (u64)rdtsc_ordered();
|
return (u64)rdtsc_ordered();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void tsc_cs_mark_unstable(struct clocksource *cs)
|
||||||
|
{
|
||||||
|
if (tsc_unstable)
|
||||||
|
return;
|
||||||
|
tsc_unstable = 1;
|
||||||
|
clear_sched_clock_stable();
|
||||||
|
disable_sched_clock_irqtime();
|
||||||
|
pr_info("Marking TSC unstable due to clocksource watchdog\n");
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
|
* .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
|
||||||
*/
|
*/
|
||||||
|
@ -1119,6 +1129,7 @@ static struct clocksource clocksource_tsc = {
|
||||||
CLOCK_SOURCE_MUST_VERIFY,
|
CLOCK_SOURCE_MUST_VERIFY,
|
||||||
.archdata = { .vclock_mode = VCLOCK_TSC },
|
.archdata = { .vclock_mode = VCLOCK_TSC },
|
||||||
.resume = tsc_resume,
|
.resume = tsc_resume,
|
||||||
|
.mark_unstable = tsc_cs_mark_unstable,
|
||||||
};
|
};
|
||||||
|
|
||||||
void mark_tsc_unstable(char *reason)
|
void mark_tsc_unstable(char *reason)
|
||||||
|
|
|
@ -964,10 +964,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
|
||||||
/* Calculate cpu time spent by current task in 100ns units */
|
/* Calculate cpu time spent by current task in 100ns units */
|
||||||
static u64 current_task_runtime_100ns(void)
|
static u64 current_task_runtime_100ns(void)
|
||||||
{
|
{
|
||||||
cputime_t utime, stime;
|
u64 utime, stime;
|
||||||
|
|
||||||
task_cputime_adjusted(current, &utime, &stime);
|
task_cputime_adjusted(current, &utime, &stime);
|
||||||
return div_u64(cputime_to_nsecs(utime + stime), 100);
|
|
||||||
|
return div_u64(utime + stime, 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
generic-y += bitsperlong.h
|
generic-y += bitsperlong.h
|
||||||
generic-y += bug.h
|
generic-y += bug.h
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
generic-y += cputime.h
|
|
||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
generic-y += dma-contiguous.h
|
generic-y += dma-contiguous.h
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
|
|
|
@ -132,7 +132,7 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
||||||
u64 cur_wall_time;
|
u64 cur_wall_time;
|
||||||
u64 busy_time;
|
u64 busy_time;
|
||||||
|
|
||||||
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
|
cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
|
||||||
|
|
||||||
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
|
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
|
||||||
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
|
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
|
||||||
|
@ -143,9 +143,9 @@ static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
||||||
|
|
||||||
idle_time = cur_wall_time - busy_time;
|
idle_time = cur_wall_time - busy_time;
|
||||||
if (wall)
|
if (wall)
|
||||||
*wall = cputime_to_usecs(cur_wall_time);
|
*wall = div_u64(cur_wall_time, NSEC_PER_USEC);
|
||||||
|
|
||||||
return cputime_to_usecs(idle_time);
|
return div_u64(idle_time, NSEC_PER_USEC);
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
|
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
|
||||||
|
|
|
@ -152,7 +152,7 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
|
||||||
if (ignore_nice) {
|
if (ignore_nice) {
|
||||||
u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||||
|
|
||||||
idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
|
idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
|
||||||
j_cdbs->prev_cpu_nice = cur_nice;
|
j_cdbs->prev_cpu_nice = cur_nice;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,6 @@
|
||||||
#include <linux/cpufreq.h>
|
#include <linux/cpufreq.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/cputime.h>
|
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(cpufreq_stats_lock);
|
static DEFINE_SPINLOCK(cpufreq_stats_lock);
|
||||||
|
|
||||||
|
|
|
@ -203,7 +203,7 @@ mISDNStackd(void *data)
|
||||||
{
|
{
|
||||||
struct mISDNstack *st = data;
|
struct mISDNstack *st = data;
|
||||||
#ifdef MISDN_MSG_STATS
|
#ifdef MISDN_MSG_STATS
|
||||||
cputime_t utime, stime;
|
u64 utime, stime;
|
||||||
#endif
|
#endif
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
@ -308,7 +308,7 @@ mISDNStackd(void *data)
|
||||||
st->stopped_cnt);
|
st->stopped_cnt);
|
||||||
task_cputime(st->thread, &utime, &stime);
|
task_cputime(st->thread, &utime, &stime);
|
||||||
printk(KERN_DEBUG
|
printk(KERN_DEBUG
|
||||||
"mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
|
"mISDNStackd daemon for %s utime(%llu) stime(%llu)\n",
|
||||||
dev_name(&st->dev->dev), utime, stime);
|
dev_name(&st->dev->dev), utime, stime);
|
||||||
printk(KERN_DEBUG
|
printk(KERN_DEBUG
|
||||||
"mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
|
"mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
|
||||||
|
|
|
@ -52,8 +52,8 @@ struct rackmeter_dma {
|
||||||
struct rackmeter_cpu {
|
struct rackmeter_cpu {
|
||||||
struct delayed_work sniffer;
|
struct delayed_work sniffer;
|
||||||
struct rackmeter *rm;
|
struct rackmeter *rm;
|
||||||
cputime64_t prev_wall;
|
u64 prev_wall;
|
||||||
cputime64_t prev_idle;
|
u64 prev_idle;
|
||||||
int zero;
|
int zero;
|
||||||
} ____cacheline_aligned;
|
} ____cacheline_aligned;
|
||||||
|
|
||||||
|
@ -81,7 +81,7 @@ static int rackmeter_ignore_nice;
|
||||||
/* This is copied from cpufreq_ondemand, maybe we should put it in
|
/* This is copied from cpufreq_ondemand, maybe we should put it in
|
||||||
* a common header somewhere
|
* a common header somewhere
|
||||||
*/
|
*/
|
||||||
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
|
static inline u64 get_cpu_idle_time(unsigned int cpu)
|
||||||
{
|
{
|
||||||
u64 retval;
|
u64 retval;
|
||||||
|
|
||||||
|
@ -217,23 +217,23 @@ static void rackmeter_do_timer(struct work_struct *work)
|
||||||
container_of(work, struct rackmeter_cpu, sniffer.work);
|
container_of(work, struct rackmeter_cpu, sniffer.work);
|
||||||
struct rackmeter *rm = rcpu->rm;
|
struct rackmeter *rm = rcpu->rm;
|
||||||
unsigned int cpu = smp_processor_id();
|
unsigned int cpu = smp_processor_id();
|
||||||
cputime64_t cur_jiffies, total_idle_ticks;
|
u64 cur_nsecs, total_idle_nsecs;
|
||||||
unsigned int total_ticks, idle_ticks;
|
u64 total_nsecs, idle_nsecs;
|
||||||
int i, offset, load, cumm, pause;
|
int i, offset, load, cumm, pause;
|
||||||
|
|
||||||
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
|
cur_nsecs = jiffies64_to_nsecs(get_jiffies_64());
|
||||||
total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
|
total_nsecs = cur_nsecs - rcpu->prev_wall;
|
||||||
rcpu->prev_wall = cur_jiffies;
|
rcpu->prev_wall = cur_nsecs;
|
||||||
|
|
||||||
total_idle_ticks = get_cpu_idle_time(cpu);
|
total_idle_nsecs = get_cpu_idle_time(cpu);
|
||||||
idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
|
idle_nsecs = total_idle_nsecs - rcpu->prev_idle;
|
||||||
idle_ticks = min(idle_ticks, total_ticks);
|
idle_nsecs = min(idle_nsecs, total_nsecs);
|
||||||
rcpu->prev_idle = total_idle_ticks;
|
rcpu->prev_idle = total_idle_nsecs;
|
||||||
|
|
||||||
/* We do a very dumb calculation to update the LEDs for now,
|
/* We do a very dumb calculation to update the LEDs for now,
|
||||||
* we'll do better once we have actual PWM implemented
|
* we'll do better once we have actual PWM implemented
|
||||||
*/
|
*/
|
||||||
load = (9 * (total_ticks - idle_ticks)) / total_ticks;
|
load = div64_u64(9 * (total_nsecs - idle_nsecs), total_nsecs);
|
||||||
|
|
||||||
offset = cpu << 3;
|
offset = cpu << 3;
|
||||||
cumm = 0;
|
cumm = 0;
|
||||||
|
@ -278,7 +278,7 @@ static void rackmeter_init_cpu_sniffer(struct rackmeter *rm)
|
||||||
continue;
|
continue;
|
||||||
rcpu = &rm->cpu[cpu];
|
rcpu = &rm->cpu[cpu];
|
||||||
rcpu->prev_idle = get_cpu_idle_time(cpu);
|
rcpu->prev_idle = get_cpu_idle_time(cpu);
|
||||||
rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
|
rcpu->prev_wall = jiffies64_to_nsecs(get_jiffies_64());
|
||||||
schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
|
schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
|
||||||
msecs_to_jiffies(CPU_SAMPLING_RATE));
|
msecs_to_jiffies(CPU_SAMPLING_RATE));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1428,17 +1428,18 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
|
||||||
* group-wide total, not its individual thread total.
|
* group-wide total, not its individual thread total.
|
||||||
*/
|
*/
|
||||||
thread_group_cputime(p, &cputime);
|
thread_group_cputime(p, &cputime);
|
||||||
cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
|
prstatus->pr_utime = ns_to_timeval(cputime.utime);
|
||||||
cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
|
prstatus->pr_stime = ns_to_timeval(cputime.stime);
|
||||||
} else {
|
} else {
|
||||||
cputime_t utime, stime;
|
u64 utime, stime;
|
||||||
|
|
||||||
task_cputime(p, &utime, &stime);
|
task_cputime(p, &utime, &stime);
|
||||||
cputime_to_timeval(utime, &prstatus->pr_utime);
|
prstatus->pr_utime = ns_to_timeval(utime);
|
||||||
cputime_to_timeval(stime, &prstatus->pr_stime);
|
prstatus->pr_stime = ns_to_timeval(stime);
|
||||||
}
|
}
|
||||||
cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
|
|
||||||
cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
|
prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
|
||||||
|
prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
|
static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
|
||||||
|
|
|
@ -1349,17 +1349,17 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
|
||||||
* group-wide total, not its individual thread total.
|
* group-wide total, not its individual thread total.
|
||||||
*/
|
*/
|
||||||
thread_group_cputime(p, &cputime);
|
thread_group_cputime(p, &cputime);
|
||||||
cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
|
prstatus->pr_utime = ns_to_timeval(cputime.utime);
|
||||||
cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
|
prstatus->pr_stime = ns_to_timeval(cputime.stime);
|
||||||
} else {
|
} else {
|
||||||
cputime_t utime, stime;
|
u64 utime, stime;
|
||||||
|
|
||||||
task_cputime(p, &utime, &stime);
|
task_cputime(p, &utime, &stime);
|
||||||
cputime_to_timeval(utime, &prstatus->pr_utime);
|
prstatus->pr_utime = ns_to_timeval(utime);
|
||||||
cputime_to_timeval(stime, &prstatus->pr_stime);
|
prstatus->pr_stime = ns_to_timeval(stime);
|
||||||
}
|
}
|
||||||
cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
|
prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
|
||||||
cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
|
prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
|
||||||
|
|
||||||
prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
|
prstatus->pr_exec_fdpic_loadmap = p->mm->context.exec_fdpic_loadmap;
|
||||||
prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
|
prstatus->pr_interp_fdpic_loadmap = p->mm->context.interp_fdpic_loadmap;
|
||||||
|
|
|
@ -51,22 +51,8 @@
|
||||||
#define elf_prstatus compat_elf_prstatus
|
#define elf_prstatus compat_elf_prstatus
|
||||||
#define elf_prpsinfo compat_elf_prpsinfo
|
#define elf_prpsinfo compat_elf_prpsinfo
|
||||||
|
|
||||||
/*
|
#undef ns_to_timeval
|
||||||
* Compat version of cputime_to_compat_timeval, perhaps this
|
#define ns_to_timeval ns_to_compat_timeval
|
||||||
* should be an inline in <linux/compat.h>.
|
|
||||||
*/
|
|
||||||
static void cputime_to_compat_timeval(const cputime_t cputime,
|
|
||||||
struct compat_timeval *value)
|
|
||||||
{
|
|
||||||
struct timeval tv;
|
|
||||||
cputime_to_timeval(cputime, &tv);
|
|
||||||
value->tv_sec = tv.tv_sec;
|
|
||||||
value->tv_usec = tv.tv_usec;
|
|
||||||
}
|
|
||||||
|
|
||||||
#undef cputime_to_timeval
|
|
||||||
#define cputime_to_timeval cputime_to_compat_timeval
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* To use this file, asm/elf.h must define compat_elf_check_arch.
|
* To use this file, asm/elf.h must define compat_elf_check_arch.
|
||||||
|
|
|
@ -393,7 +393,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||||
/* Do we need to erase the effects of a prior jbd2_journal_flush? */
|
/* Do we need to erase the effects of a prior jbd2_journal_flush? */
|
||||||
if (journal->j_flags & JBD2_FLUSHED) {
|
if (journal->j_flags & JBD2_FLUSHED) {
|
||||||
jbd_debug(3, "super block updated\n");
|
jbd_debug(3, "super block updated\n");
|
||||||
mutex_lock(&journal->j_checkpoint_mutex);
|
mutex_lock_io(&journal->j_checkpoint_mutex);
|
||||||
/*
|
/*
|
||||||
* We hold j_checkpoint_mutex so tail cannot change under us.
|
* We hold j_checkpoint_mutex so tail cannot change under us.
|
||||||
* We don't need any special data guarantees for writing sb
|
* We don't need any special data guarantees for writing sb
|
||||||
|
|
|
@ -944,7 +944,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
|
||||||
*/
|
*/
|
||||||
void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
|
void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
|
||||||
{
|
{
|
||||||
mutex_lock(&journal->j_checkpoint_mutex);
|
mutex_lock_io(&journal->j_checkpoint_mutex);
|
||||||
if (tid_gt(tid, journal->j_tail_sequence))
|
if (tid_gt(tid, journal->j_tail_sequence))
|
||||||
__jbd2_update_log_tail(journal, tid, block);
|
__jbd2_update_log_tail(journal, tid, block);
|
||||||
mutex_unlock(&journal->j_checkpoint_mutex);
|
mutex_unlock(&journal->j_checkpoint_mutex);
|
||||||
|
@ -1304,7 +1304,7 @@ static int journal_reset(journal_t *journal)
|
||||||
journal->j_flags |= JBD2_FLUSHED;
|
journal->j_flags |= JBD2_FLUSHED;
|
||||||
} else {
|
} else {
|
||||||
/* Lock here to make assertions happy... */
|
/* Lock here to make assertions happy... */
|
||||||
mutex_lock(&journal->j_checkpoint_mutex);
|
mutex_lock_io(&journal->j_checkpoint_mutex);
|
||||||
/*
|
/*
|
||||||
* Update log tail information. We use REQ_FUA since new
|
* Update log tail information. We use REQ_FUA since new
|
||||||
* transaction will start reusing journal space and so we
|
* transaction will start reusing journal space and so we
|
||||||
|
@ -1691,7 +1691,7 @@ int jbd2_journal_destroy(journal_t *journal)
|
||||||
spin_lock(&journal->j_list_lock);
|
spin_lock(&journal->j_list_lock);
|
||||||
while (journal->j_checkpoint_transactions != NULL) {
|
while (journal->j_checkpoint_transactions != NULL) {
|
||||||
spin_unlock(&journal->j_list_lock);
|
spin_unlock(&journal->j_list_lock);
|
||||||
mutex_lock(&journal->j_checkpoint_mutex);
|
mutex_lock_io(&journal->j_checkpoint_mutex);
|
||||||
err = jbd2_log_do_checkpoint(journal);
|
err = jbd2_log_do_checkpoint(journal);
|
||||||
mutex_unlock(&journal->j_checkpoint_mutex);
|
mutex_unlock(&journal->j_checkpoint_mutex);
|
||||||
/*
|
/*
|
||||||
|
@ -1713,7 +1713,7 @@ int jbd2_journal_destroy(journal_t *journal)
|
||||||
|
|
||||||
if (journal->j_sb_buffer) {
|
if (journal->j_sb_buffer) {
|
||||||
if (!is_journal_aborted(journal)) {
|
if (!is_journal_aborted(journal)) {
|
||||||
mutex_lock(&journal->j_checkpoint_mutex);
|
mutex_lock_io(&journal->j_checkpoint_mutex);
|
||||||
|
|
||||||
write_lock(&journal->j_state_lock);
|
write_lock(&journal->j_state_lock);
|
||||||
journal->j_tail_sequence =
|
journal->j_tail_sequence =
|
||||||
|
@ -1955,7 +1955,7 @@ int jbd2_journal_flush(journal_t *journal)
|
||||||
spin_lock(&journal->j_list_lock);
|
spin_lock(&journal->j_list_lock);
|
||||||
while (!err && journal->j_checkpoint_transactions != NULL) {
|
while (!err && journal->j_checkpoint_transactions != NULL) {
|
||||||
spin_unlock(&journal->j_list_lock);
|
spin_unlock(&journal->j_list_lock);
|
||||||
mutex_lock(&journal->j_checkpoint_mutex);
|
mutex_lock_io(&journal->j_checkpoint_mutex);
|
||||||
err = jbd2_log_do_checkpoint(journal);
|
err = jbd2_log_do_checkpoint(journal);
|
||||||
mutex_unlock(&journal->j_checkpoint_mutex);
|
mutex_unlock(&journal->j_checkpoint_mutex);
|
||||||
spin_lock(&journal->j_list_lock);
|
spin_lock(&journal->j_list_lock);
|
||||||
|
@ -1965,7 +1965,7 @@ int jbd2_journal_flush(journal_t *journal)
|
||||||
if (is_journal_aborted(journal))
|
if (is_journal_aborted(journal))
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
mutex_lock(&journal->j_checkpoint_mutex);
|
mutex_lock_io(&journal->j_checkpoint_mutex);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
err = jbd2_cleanup_journal_tail(journal);
|
err = jbd2_cleanup_journal_tail(journal);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
|
|
|
@ -401,8 +401,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
||||||
unsigned long long start_time;
|
unsigned long long start_time;
|
||||||
unsigned long cmin_flt = 0, cmaj_flt = 0;
|
unsigned long cmin_flt = 0, cmaj_flt = 0;
|
||||||
unsigned long min_flt = 0, maj_flt = 0;
|
unsigned long min_flt = 0, maj_flt = 0;
|
||||||
cputime_t cutime, cstime, utime, stime;
|
u64 cutime, cstime, utime, stime;
|
||||||
cputime_t cgtime, gtime;
|
u64 cgtime, gtime;
|
||||||
unsigned long rsslim = 0;
|
unsigned long rsslim = 0;
|
||||||
char tcomm[sizeof(task->comm)];
|
char tcomm[sizeof(task->comm)];
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -497,10 +497,10 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
||||||
seq_put_decimal_ull(m, " ", cmin_flt);
|
seq_put_decimal_ull(m, " ", cmin_flt);
|
||||||
seq_put_decimal_ull(m, " ", maj_flt);
|
seq_put_decimal_ull(m, " ", maj_flt);
|
||||||
seq_put_decimal_ull(m, " ", cmaj_flt);
|
seq_put_decimal_ull(m, " ", cmaj_flt);
|
||||||
seq_put_decimal_ull(m, " ", cputime_to_clock_t(utime));
|
seq_put_decimal_ull(m, " ", nsec_to_clock_t(utime));
|
||||||
seq_put_decimal_ull(m, " ", cputime_to_clock_t(stime));
|
seq_put_decimal_ull(m, " ", nsec_to_clock_t(stime));
|
||||||
seq_put_decimal_ll(m, " ", cputime_to_clock_t(cutime));
|
seq_put_decimal_ll(m, " ", nsec_to_clock_t(cutime));
|
||||||
seq_put_decimal_ll(m, " ", cputime_to_clock_t(cstime));
|
seq_put_decimal_ll(m, " ", nsec_to_clock_t(cstime));
|
||||||
seq_put_decimal_ll(m, " ", priority);
|
seq_put_decimal_ll(m, " ", priority);
|
||||||
seq_put_decimal_ll(m, " ", nice);
|
seq_put_decimal_ll(m, " ", nice);
|
||||||
seq_put_decimal_ll(m, " ", num_threads);
|
seq_put_decimal_ll(m, " ", num_threads);
|
||||||
|
@ -542,8 +542,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
|
||||||
seq_put_decimal_ull(m, " ", task->rt_priority);
|
seq_put_decimal_ull(m, " ", task->rt_priority);
|
||||||
seq_put_decimal_ull(m, " ", task->policy);
|
seq_put_decimal_ull(m, " ", task->policy);
|
||||||
seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task));
|
seq_put_decimal_ull(m, " ", delayacct_blkio_ticks(task));
|
||||||
seq_put_decimal_ull(m, " ", cputime_to_clock_t(gtime));
|
seq_put_decimal_ull(m, " ", nsec_to_clock_t(gtime));
|
||||||
seq_put_decimal_ll(m, " ", cputime_to_clock_t(cgtime));
|
seq_put_decimal_ll(m, " ", nsec_to_clock_t(cgtime));
|
||||||
|
|
||||||
if (mm && permitted) {
|
if (mm && permitted) {
|
||||||
seq_put_decimal_ull(m, " ", mm->start_data);
|
seq_put_decimal_ull(m, " ", mm->start_data);
|
||||||
|
|
|
@ -21,9 +21,9 @@
|
||||||
|
|
||||||
#ifdef arch_idle_time
|
#ifdef arch_idle_time
|
||||||
|
|
||||||
static cputime64_t get_idle_time(int cpu)
|
static u64 get_idle_time(int cpu)
|
||||||
{
|
{
|
||||||
cputime64_t idle;
|
u64 idle;
|
||||||
|
|
||||||
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
|
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
|
||||||
if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
|
if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
|
||||||
|
@ -31,9 +31,9 @@ static cputime64_t get_idle_time(int cpu)
|
||||||
return idle;
|
return idle;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cputime64_t get_iowait_time(int cpu)
|
static u64 get_iowait_time(int cpu)
|
||||||
{
|
{
|
||||||
cputime64_t iowait;
|
u64 iowait;
|
||||||
|
|
||||||
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
|
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
|
||||||
if (cpu_online(cpu) && nr_iowait_cpu(cpu))
|
if (cpu_online(cpu) && nr_iowait_cpu(cpu))
|
||||||
|
@ -45,32 +45,32 @@ static cputime64_t get_iowait_time(int cpu)
|
||||||
|
|
||||||
static u64 get_idle_time(int cpu)
|
static u64 get_idle_time(int cpu)
|
||||||
{
|
{
|
||||||
u64 idle, idle_time = -1ULL;
|
u64 idle, idle_usecs = -1ULL;
|
||||||
|
|
||||||
if (cpu_online(cpu))
|
if (cpu_online(cpu))
|
||||||
idle_time = get_cpu_idle_time_us(cpu, NULL);
|
idle_usecs = get_cpu_idle_time_us(cpu, NULL);
|
||||||
|
|
||||||
if (idle_time == -1ULL)
|
if (idle_usecs == -1ULL)
|
||||||
/* !NO_HZ or cpu offline so we can rely on cpustat.idle */
|
/* !NO_HZ or cpu offline so we can rely on cpustat.idle */
|
||||||
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
|
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
|
||||||
else
|
else
|
||||||
idle = usecs_to_cputime64(idle_time);
|
idle = idle_usecs * NSEC_PER_USEC;
|
||||||
|
|
||||||
return idle;
|
return idle;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 get_iowait_time(int cpu)
|
static u64 get_iowait_time(int cpu)
|
||||||
{
|
{
|
||||||
u64 iowait, iowait_time = -1ULL;
|
u64 iowait, iowait_usecs = -1ULL;
|
||||||
|
|
||||||
if (cpu_online(cpu))
|
if (cpu_online(cpu))
|
||||||
iowait_time = get_cpu_iowait_time_us(cpu, NULL);
|
iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
|
||||||
|
|
||||||
if (iowait_time == -1ULL)
|
if (iowait_usecs == -1ULL)
|
||||||
/* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
|
/* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
|
||||||
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
|
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
|
||||||
else
|
else
|
||||||
iowait = usecs_to_cputime64(iowait_time);
|
iowait = iowait_usecs * NSEC_PER_USEC;
|
||||||
|
|
||||||
return iowait;
|
return iowait;
|
||||||
}
|
}
|
||||||
|
@ -115,16 +115,16 @@ static int show_stat(struct seq_file *p, void *v)
|
||||||
}
|
}
|
||||||
sum += arch_irq_stat();
|
sum += arch_irq_stat();
|
||||||
|
|
||||||
seq_put_decimal_ull(p, "cpu ", cputime64_to_clock_t(user));
|
seq_put_decimal_ull(p, "cpu ", nsec_to_clock_t(user));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
|
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
|
@ -140,16 +140,16 @@ static int show_stat(struct seq_file *p, void *v)
|
||||||
guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
|
guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
|
||||||
guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
|
guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
|
||||||
seq_printf(p, "cpu%d", i);
|
seq_printf(p, "cpu%d", i);
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
|
||||||
seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
|
seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
}
|
}
|
||||||
seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
|
seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
|
||||||
|
|
|
@ -5,23 +5,20 @@
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/kernel_stat.h>
|
#include <linux/kernel_stat.h>
|
||||||
#include <linux/cputime.h>
|
|
||||||
|
|
||||||
static int uptime_proc_show(struct seq_file *m, void *v)
|
static int uptime_proc_show(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
struct timespec uptime;
|
struct timespec uptime;
|
||||||
struct timespec idle;
|
struct timespec idle;
|
||||||
u64 idletime;
|
|
||||||
u64 nsec;
|
u64 nsec;
|
||||||
u32 rem;
|
u32 rem;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
idletime = 0;
|
nsec = 0;
|
||||||
for_each_possible_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
|
nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
|
||||||
|
|
||||||
get_monotonic_boottime(&uptime);
|
get_monotonic_boottime(&uptime);
|
||||||
nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
|
|
||||||
idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
|
idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
|
||||||
idle.tv_nsec = rem;
|
idle.tv_nsec = rem;
|
||||||
seq_printf(m, "%lu.%02lu %lu.%02lu\n",
|
seq_printf(m, "%lu.%02lu %lu.%02lu\n",
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
#ifndef _ASM_GENERIC_CPUTIME_H
|
|
||||||
#define _ASM_GENERIC_CPUTIME_H
|
|
||||||
|
|
||||||
#include <linux/time.h>
|
|
||||||
#include <linux/jiffies.h>
|
|
||||||
|
|
||||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
|
|
||||||
# include <asm-generic/cputime_jiffies.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
|
||||||
# include <asm-generic/cputime_nsecs.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -1,75 +0,0 @@
|
||||||
#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H
|
|
||||||
#define _ASM_GENERIC_CPUTIME_JIFFIES_H
|
|
||||||
|
|
||||||
typedef unsigned long __nocast cputime_t;
|
|
||||||
|
|
||||||
#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
|
|
||||||
|
|
||||||
#define cputime_one_jiffy jiffies_to_cputime(1)
|
|
||||||
#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
|
|
||||||
#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
|
|
||||||
|
|
||||||
typedef u64 __nocast cputime64_t;
|
|
||||||
|
|
||||||
#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
|
|
||||||
#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert nanoseconds <-> cputime
|
|
||||||
*/
|
|
||||||
#define cputime_to_nsecs(__ct) \
|
|
||||||
jiffies_to_nsecs(cputime_to_jiffies(__ct))
|
|
||||||
#define nsecs_to_cputime64(__nsec) \
|
|
||||||
jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec))
|
|
||||||
#define nsecs_to_cputime(__nsec) \
|
|
||||||
jiffies_to_cputime(nsecs_to_jiffies(__nsec))
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime to microseconds and back.
|
|
||||||
*/
|
|
||||||
#define cputime_to_usecs(__ct) \
|
|
||||||
jiffies_to_usecs(cputime_to_jiffies(__ct))
|
|
||||||
#define usecs_to_cputime(__usec) \
|
|
||||||
jiffies_to_cputime(usecs_to_jiffies(__usec))
|
|
||||||
#define usecs_to_cputime64(__usec) \
|
|
||||||
jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime to seconds and back.
|
|
||||||
*/
|
|
||||||
#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
|
|
||||||
#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime to timespec and back.
|
|
||||||
*/
|
|
||||||
#define timespec_to_cputime(__val) \
|
|
||||||
jiffies_to_cputime(timespec_to_jiffies(__val))
|
|
||||||
#define cputime_to_timespec(__ct,__val) \
|
|
||||||
jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime to timeval and back.
|
|
||||||
*/
|
|
||||||
#define timeval_to_cputime(__val) \
|
|
||||||
jiffies_to_cputime(timeval_to_jiffies(__val))
|
|
||||||
#define cputime_to_timeval(__ct,__val) \
|
|
||||||
jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime to clock and back.
|
|
||||||
*/
|
|
||||||
#define cputime_to_clock_t(__ct) \
|
|
||||||
jiffies_to_clock_t(cputime_to_jiffies(__ct))
|
|
||||||
#define clock_t_to_cputime(__x) \
|
|
||||||
jiffies_to_cputime(clock_t_to_jiffies(__x))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime64 to clock.
|
|
||||||
*/
|
|
||||||
#define cputime64_to_clock_t(__ct) \
|
|
||||||
jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -1,121 +0,0 @@
|
||||||
/*
|
|
||||||
* Definitions for measuring cputime in nsecs resolution.
|
|
||||||
*
|
|
||||||
* Based on <arch/ia64/include/asm/cputime.h>
|
|
||||||
*
|
|
||||||
* Copyright (C) 2007 FUJITSU LIMITED
|
|
||||||
* Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or
|
|
||||||
* modify it under the terms of the GNU General Public License
|
|
||||||
* as published by the Free Software Foundation; either version
|
|
||||||
* 2 of the License, or (at your option) any later version.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _ASM_GENERIC_CPUTIME_NSECS_H
|
|
||||||
#define _ASM_GENERIC_CPUTIME_NSECS_H
|
|
||||||
|
|
||||||
#include <linux/math64.h>
|
|
||||||
|
|
||||||
typedef u64 __nocast cputime_t;
|
|
||||||
typedef u64 __nocast cputime64_t;
|
|
||||||
|
|
||||||
#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
|
|
||||||
|
|
||||||
#define cputime_one_jiffy jiffies_to_cputime(1)
|
|
||||||
|
|
||||||
#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor)
|
|
||||||
#define cputime_div_rem(__ct, divisor, remainder) \
|
|
||||||
div_u64_rem((__force u64)__ct, divisor, remainder);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> jiffies (HZ)
|
|
||||||
*/
|
|
||||||
#define cputime_to_jiffies(__ct) \
|
|
||||||
cputime_div(__ct, NSEC_PER_SEC / HZ)
|
|
||||||
#define jiffies_to_cputime(__jif) \
|
|
||||||
(__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
|
|
||||||
#define cputime64_to_jiffies64(__ct) \
|
|
||||||
cputime_div(__ct, NSEC_PER_SEC / HZ)
|
|
||||||
#define jiffies64_to_cputime64(__jif) \
|
|
||||||
(__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> nanoseconds
|
|
||||||
*/
|
|
||||||
#define cputime_to_nsecs(__ct) \
|
|
||||||
(__force u64)(__ct)
|
|
||||||
#define nsecs_to_cputime(__nsecs) \
|
|
||||||
(__force cputime_t)(__nsecs)
|
|
||||||
#define nsecs_to_cputime64(__nsecs) \
|
|
||||||
(__force cputime64_t)(__nsecs)
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> microseconds
|
|
||||||
*/
|
|
||||||
#define cputime_to_usecs(__ct) \
|
|
||||||
cputime_div(__ct, NSEC_PER_USEC)
|
|
||||||
#define usecs_to_cputime(__usecs) \
|
|
||||||
(__force cputime_t)((__usecs) * NSEC_PER_USEC)
|
|
||||||
#define usecs_to_cputime64(__usecs) \
|
|
||||||
(__force cputime64_t)((__usecs) * NSEC_PER_USEC)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> seconds
|
|
||||||
*/
|
|
||||||
#define cputime_to_secs(__ct) \
|
|
||||||
cputime_div(__ct, NSEC_PER_SEC)
|
|
||||||
#define secs_to_cputime(__secs) \
|
|
||||||
(__force cputime_t)((__secs) * NSEC_PER_SEC)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> timespec (nsec)
|
|
||||||
*/
|
|
||||||
static inline cputime_t timespec_to_cputime(const struct timespec *val)
|
|
||||||
{
|
|
||||||
u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
|
|
||||||
return (__force cputime_t) ret;
|
|
||||||
}
|
|
||||||
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
|
|
||||||
{
|
|
||||||
u32 rem;
|
|
||||||
|
|
||||||
val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem);
|
|
||||||
val->tv_nsec = rem;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> timeval (msec)
|
|
||||||
*/
|
|
||||||
static inline cputime_t timeval_to_cputime(const struct timeval *val)
|
|
||||||
{
|
|
||||||
u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
|
|
||||||
val->tv_usec * NSEC_PER_USEC;
|
|
||||||
return (__force cputime_t) ret;
|
|
||||||
}
|
|
||||||
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
|
|
||||||
{
|
|
||||||
u32 rem;
|
|
||||||
|
|
||||||
val->tv_sec = cputime_div_rem(ct, NSEC_PER_SEC, &rem);
|
|
||||||
val->tv_usec = rem / NSEC_PER_USEC;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime <-> clock (USER_HZ)
|
|
||||||
*/
|
|
||||||
#define cputime_to_clock_t(__ct) \
|
|
||||||
cputime_div(__ct, (NSEC_PER_SEC / USER_HZ))
|
|
||||||
#define clock_t_to_cputime(__x) \
|
|
||||||
(__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Convert cputime64 to clock.
|
|
||||||
*/
|
|
||||||
#define cputime64_to_clock_t(__ct) \
|
|
||||||
cputime_to_clock_t((__force cputime_t)__ct)
|
|
||||||
|
|
||||||
#endif
|
|
|
@ -62,6 +62,8 @@ struct module;
|
||||||
* @archdata: arch-specific data
|
* @archdata: arch-specific data
|
||||||
* @suspend: suspend function for the clocksource, if necessary
|
* @suspend: suspend function for the clocksource, if necessary
|
||||||
* @resume: resume function for the clocksource, if necessary
|
* @resume: resume function for the clocksource, if necessary
|
||||||
|
* @mark_unstable: Optional function to inform the clocksource driver that
|
||||||
|
* the watchdog marked the clocksource unstable
|
||||||
* @owner: module reference, must be set by clocksource in modules
|
* @owner: module reference, must be set by clocksource in modules
|
||||||
*
|
*
|
||||||
* Note: This struct is not used in hotpathes of the timekeeping code
|
* Note: This struct is not used in hotpathes of the timekeeping code
|
||||||
|
@ -93,6 +95,7 @@ struct clocksource {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
void (*suspend)(struct clocksource *cs);
|
void (*suspend)(struct clocksource *cs);
|
||||||
void (*resume)(struct clocksource *cs);
|
void (*resume)(struct clocksource *cs);
|
||||||
|
void (*mark_unstable)(struct clocksource *cs);
|
||||||
|
|
||||||
/* private: */
|
/* private: */
|
||||||
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
|
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
|
||||||
|
|
|
@ -731,7 +731,25 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
|
||||||
static inline bool in_compat_syscall(void) { return is_compat_task(); }
|
static inline bool in_compat_syscall(void) { return is_compat_task(); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#else
|
/**
|
||||||
|
* ns_to_compat_timeval - Compat version of ns_to_timeval
|
||||||
|
* @nsec: the nanoseconds value to be converted
|
||||||
|
*
|
||||||
|
* Returns the compat_timeval representation of the nsec parameter.
|
||||||
|
*/
|
||||||
|
static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
|
||||||
|
{
|
||||||
|
struct timeval tv;
|
||||||
|
struct compat_timeval ctv;
|
||||||
|
|
||||||
|
tv = ns_to_timeval(nsec);
|
||||||
|
ctv.tv_sec = tv.tv_sec;
|
||||||
|
ctv.tv_usec = tv.tv_usec;
|
||||||
|
|
||||||
|
return ctv;
|
||||||
|
}
|
||||||
|
|
||||||
|
#else /* !CONFIG_COMPAT */
|
||||||
|
|
||||||
#define is_compat_task() (0)
|
#define is_compat_task() (0)
|
||||||
static inline bool in_compat_syscall(void) { return false; }
|
static inline bool in_compat_syscall(void) { return false; }
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#ifndef __LINUX_CPUTIME_H
|
#ifndef __LINUX_CPUTIME_H
|
||||||
#define __LINUX_CPUTIME_H
|
#define __LINUX_CPUTIME_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||||
#include <asm/cputime.h>
|
#include <asm/cputime.h>
|
||||||
|
|
||||||
#ifndef cputime_to_nsecs
|
#ifndef cputime_to_nsecs
|
||||||
|
@ -8,9 +9,5 @@
|
||||||
(cputime_to_usecs(__ct) * NSEC_PER_USEC)
|
(cputime_to_usecs(__ct) * NSEC_PER_USEC)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef nsecs_to_cputime
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||||
# define nsecs_to_cputime(__nsecs) \
|
|
||||||
usecs_to_cputime((__nsecs) / NSEC_PER_USEC)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif /* __LINUX_CPUTIME_H */
|
#endif /* __LINUX_CPUTIME_H */
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#ifndef _LINUX_DELAYACCT_H
|
#ifndef _LINUX_DELAYACCT_H
|
||||||
#define _LINUX_DELAYACCT_H
|
#define _LINUX_DELAYACCT_H
|
||||||
|
|
||||||
|
#include <uapi/linux/taskstats.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
|
|
@ -293,6 +293,8 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
|
||||||
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
|
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern u64 jiffies64_to_nsecs(u64 j);
|
||||||
|
|
||||||
extern unsigned long __msecs_to_jiffies(const unsigned int m);
|
extern unsigned long __msecs_to_jiffies(const unsigned int m);
|
||||||
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
|
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/vtime.h>
|
#include <linux/vtime.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <linux/cputime.h>
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 'kernel_stat.h' contains the definitions needed for doing
|
* 'kernel_stat.h' contains the definitions needed for doing
|
||||||
|
@ -78,15 +77,18 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
|
||||||
return kstat_cpu(cpu).irqs_sum;
|
return kstat_cpu(cpu).irqs_sum;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void account_user_time(struct task_struct *, cputime_t);
|
extern void account_user_time(struct task_struct *, u64);
|
||||||
extern void account_system_time(struct task_struct *, int, cputime_t);
|
extern void account_guest_time(struct task_struct *, u64);
|
||||||
extern void account_steal_time(cputime_t);
|
extern void account_system_time(struct task_struct *, int, u64);
|
||||||
extern void account_idle_time(cputime_t);
|
extern void account_system_index_time(struct task_struct *, u64,
|
||||||
|
enum cpu_usage_stat);
|
||||||
|
extern void account_steal_time(u64);
|
||||||
|
extern void account_idle_time(u64);
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||||
static inline void account_process_tick(struct task_struct *tsk, int user)
|
static inline void account_process_tick(struct task_struct *tsk, int user)
|
||||||
{
|
{
|
||||||
vtime_account_user(tsk);
|
vtime_flush(tsk);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
extern void account_process_tick(struct task_struct *, int user);
|
extern void account_process_tick(struct task_struct *, int user);
|
||||||
|
|
|
@ -156,10 +156,12 @@ extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
|
||||||
unsigned int subclass);
|
unsigned int subclass);
|
||||||
extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
|
extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
|
||||||
unsigned int subclass);
|
unsigned int subclass);
|
||||||
|
extern void mutex_lock_io_nested(struct mutex *lock, unsigned int subclass);
|
||||||
|
|
||||||
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
|
#define mutex_lock(lock) mutex_lock_nested(lock, 0)
|
||||||
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
|
#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
|
||||||
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
|
#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
|
||||||
|
#define mutex_lock_io(lock) mutex_lock_io_nested(lock, 0)
|
||||||
|
|
||||||
#define mutex_lock_nest_lock(lock, nest_lock) \
|
#define mutex_lock_nest_lock(lock, nest_lock) \
|
||||||
do { \
|
do { \
|
||||||
|
@ -171,11 +173,13 @@ do { \
|
||||||
extern void mutex_lock(struct mutex *lock);
|
extern void mutex_lock(struct mutex *lock);
|
||||||
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
|
extern int __must_check mutex_lock_interruptible(struct mutex *lock);
|
||||||
extern int __must_check mutex_lock_killable(struct mutex *lock);
|
extern int __must_check mutex_lock_killable(struct mutex *lock);
|
||||||
|
extern void mutex_lock_io(struct mutex *lock);
|
||||||
|
|
||||||
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
||||||
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
|
# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
|
||||||
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
|
# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
|
||||||
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
|
# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
|
||||||
|
# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -8,19 +8,9 @@
|
||||||
#include <linux/alarmtimer.h>
|
#include <linux/alarmtimer.h>
|
||||||
|
|
||||||
|
|
||||||
static inline unsigned long long cputime_to_expires(cputime_t expires)
|
|
||||||
{
|
|
||||||
return (__force unsigned long long)expires;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline cputime_t expires_to_cputime(unsigned long long expires)
|
|
||||||
{
|
|
||||||
return (__force cputime_t)expires;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct cpu_timer_list {
|
struct cpu_timer_list {
|
||||||
struct list_head entry;
|
struct list_head entry;
|
||||||
unsigned long long expires, incr;
|
u64 expires, incr;
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
int firing;
|
int firing;
|
||||||
};
|
};
|
||||||
|
@ -129,7 +119,7 @@ void run_posix_cpu_timers(struct task_struct *task);
|
||||||
void posix_cpu_timers_exit(struct task_struct *task);
|
void posix_cpu_timers_exit(struct task_struct *task);
|
||||||
void posix_cpu_timers_exit_group(struct task_struct *task);
|
void posix_cpu_timers_exit_group(struct task_struct *task);
|
||||||
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
|
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
|
||||||
cputime_t *newval, cputime_t *oldval);
|
u64 *newval, u64 *oldval);
|
||||||
|
|
||||||
long clock_nanosleep_restart(struct restart_block *restart_block);
|
long clock_nanosleep_restart(struct restart_block *restart_block);
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,6 @@ struct sched_param {
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <linux/cputime.h>
|
|
||||||
|
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/sem.h>
|
#include <linux/sem.h>
|
||||||
|
@ -461,12 +460,10 @@ extern signed long schedule_timeout_idle(signed long timeout);
|
||||||
asmlinkage void schedule(void);
|
asmlinkage void schedule(void);
|
||||||
extern void schedule_preempt_disabled(void);
|
extern void schedule_preempt_disabled(void);
|
||||||
|
|
||||||
|
extern int __must_check io_schedule_prepare(void);
|
||||||
|
extern void io_schedule_finish(int token);
|
||||||
extern long io_schedule_timeout(long timeout);
|
extern long io_schedule_timeout(long timeout);
|
||||||
|
extern void io_schedule(void);
|
||||||
static inline void io_schedule(void)
|
|
||||||
{
|
|
||||||
io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __noreturn do_task_dead(void);
|
void __noreturn do_task_dead(void);
|
||||||
|
|
||||||
|
@ -565,15 +562,13 @@ struct pacct_struct {
|
||||||
int ac_flag;
|
int ac_flag;
|
||||||
long ac_exitcode;
|
long ac_exitcode;
|
||||||
unsigned long ac_mem;
|
unsigned long ac_mem;
|
||||||
cputime_t ac_utime, ac_stime;
|
u64 ac_utime, ac_stime;
|
||||||
unsigned long ac_minflt, ac_majflt;
|
unsigned long ac_minflt, ac_majflt;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cpu_itimer {
|
struct cpu_itimer {
|
||||||
cputime_t expires;
|
u64 expires;
|
||||||
cputime_t incr;
|
u64 incr;
|
||||||
u32 error;
|
|
||||||
u32 incr_error;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -587,8 +582,8 @@ struct cpu_itimer {
|
||||||
*/
|
*/
|
||||||
struct prev_cputime {
|
struct prev_cputime {
|
||||||
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
|
||||||
cputime_t utime;
|
u64 utime;
|
||||||
cputime_t stime;
|
u64 stime;
|
||||||
raw_spinlock_t lock;
|
raw_spinlock_t lock;
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
@ -603,8 +598,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct task_cputime - collected CPU time counts
|
* struct task_cputime - collected CPU time counts
|
||||||
* @utime: time spent in user mode, in &cputime_t units
|
* @utime: time spent in user mode, in nanoseconds
|
||||||
* @stime: time spent in kernel mode, in &cputime_t units
|
* @stime: time spent in kernel mode, in nanoseconds
|
||||||
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
|
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
|
||||||
*
|
*
|
||||||
* This structure groups together three kinds of CPU time that are tracked for
|
* This structure groups together three kinds of CPU time that are tracked for
|
||||||
|
@ -612,8 +607,8 @@ static inline void prev_cputime_init(struct prev_cputime *prev)
|
||||||
* these counts together and treat all three of them in parallel.
|
* these counts together and treat all three of them in parallel.
|
||||||
*/
|
*/
|
||||||
struct task_cputime {
|
struct task_cputime {
|
||||||
cputime_t utime;
|
u64 utime;
|
||||||
cputime_t stime;
|
u64 stime;
|
||||||
unsigned long long sum_exec_runtime;
|
unsigned long long sum_exec_runtime;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -622,13 +617,6 @@ struct task_cputime {
|
||||||
#define prof_exp stime
|
#define prof_exp stime
|
||||||
#define sched_exp sum_exec_runtime
|
#define sched_exp sum_exec_runtime
|
||||||
|
|
||||||
#define INIT_CPUTIME \
|
|
||||||
(struct task_cputime) { \
|
|
||||||
.utime = 0, \
|
|
||||||
.stime = 0, \
|
|
||||||
.sum_exec_runtime = 0, \
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is the atomic variant of task_cputime, which can be used for
|
* This is the atomic variant of task_cputime, which can be used for
|
||||||
* storing and updating task_cputime statistics without locking.
|
* storing and updating task_cputime statistics without locking.
|
||||||
|
@ -787,9 +775,9 @@ struct signal_struct {
|
||||||
* in __exit_signal, except for the group leader.
|
* in __exit_signal, except for the group leader.
|
||||||
*/
|
*/
|
||||||
seqlock_t stats_lock;
|
seqlock_t stats_lock;
|
||||||
cputime_t utime, stime, cutime, cstime;
|
u64 utime, stime, cutime, cstime;
|
||||||
cputime_t gtime;
|
u64 gtime;
|
||||||
cputime_t cgtime;
|
u64 cgtime;
|
||||||
struct prev_cputime prev_cputime;
|
struct prev_cputime prev_cputime;
|
||||||
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
|
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
|
||||||
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
|
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
|
||||||
|
@ -1668,11 +1656,11 @@ struct task_struct {
|
||||||
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
|
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
|
||||||
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
|
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
|
||||||
|
|
||||||
cputime_t utime, stime;
|
u64 utime, stime;
|
||||||
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
||||||
cputime_t utimescaled, stimescaled;
|
u64 utimescaled, stimescaled;
|
||||||
#endif
|
#endif
|
||||||
cputime_t gtime;
|
u64 gtime;
|
||||||
struct prev_cputime prev_cputime;
|
struct prev_cputime prev_cputime;
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||||
seqcount_t vtime_seqcount;
|
seqcount_t vtime_seqcount;
|
||||||
|
@ -1824,7 +1812,7 @@ struct task_struct {
|
||||||
#if defined(CONFIG_TASK_XACCT)
|
#if defined(CONFIG_TASK_XACCT)
|
||||||
u64 acct_rss_mem1; /* accumulated rss usage */
|
u64 acct_rss_mem1; /* accumulated rss usage */
|
||||||
u64 acct_vm_mem1; /* accumulated virtual memory usage */
|
u64 acct_vm_mem1; /* accumulated virtual memory usage */
|
||||||
cputime_t acct_timexpd; /* stime + utime since last update */
|
u64 acct_timexpd; /* stime + utime since last update */
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_CPUSETS
|
#ifdef CONFIG_CPUSETS
|
||||||
nodemask_t mems_allowed; /* Protected by alloc_lock */
|
nodemask_t mems_allowed; /* Protected by alloc_lock */
|
||||||
|
@ -2269,17 +2257,17 @@ struct task_struct *try_get_task_struct(struct task_struct **ptask);
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||||
extern void task_cputime(struct task_struct *t,
|
extern void task_cputime(struct task_struct *t,
|
||||||
cputime_t *utime, cputime_t *stime);
|
u64 *utime, u64 *stime);
|
||||||
extern cputime_t task_gtime(struct task_struct *t);
|
extern u64 task_gtime(struct task_struct *t);
|
||||||
#else
|
#else
|
||||||
static inline void task_cputime(struct task_struct *t,
|
static inline void task_cputime(struct task_struct *t,
|
||||||
cputime_t *utime, cputime_t *stime)
|
u64 *utime, u64 *stime)
|
||||||
{
|
{
|
||||||
*utime = t->utime;
|
*utime = t->utime;
|
||||||
*stime = t->stime;
|
*stime = t->stime;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline cputime_t task_gtime(struct task_struct *t)
|
static inline u64 task_gtime(struct task_struct *t)
|
||||||
{
|
{
|
||||||
return t->gtime;
|
return t->gtime;
|
||||||
}
|
}
|
||||||
|
@ -2287,23 +2275,23 @@ static inline cputime_t task_gtime(struct task_struct *t)
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
|
||||||
static inline void task_cputime_scaled(struct task_struct *t,
|
static inline void task_cputime_scaled(struct task_struct *t,
|
||||||
cputime_t *utimescaled,
|
u64 *utimescaled,
|
||||||
cputime_t *stimescaled)
|
u64 *stimescaled)
|
||||||
{
|
{
|
||||||
*utimescaled = t->utimescaled;
|
*utimescaled = t->utimescaled;
|
||||||
*stimescaled = t->stimescaled;
|
*stimescaled = t->stimescaled;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline void task_cputime_scaled(struct task_struct *t,
|
static inline void task_cputime_scaled(struct task_struct *t,
|
||||||
cputime_t *utimescaled,
|
u64 *utimescaled,
|
||||||
cputime_t *stimescaled)
|
u64 *stimescaled)
|
||||||
{
|
{
|
||||||
task_cputime(t, utimescaled, stimescaled);
|
task_cputime(t, utimescaled, stimescaled);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
|
||||||
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
|
extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Per process flags
|
* Per process flags
|
||||||
|
@ -2522,10 +2510,18 @@ extern u64 sched_clock_cpu(int cpu);
|
||||||
extern void sched_clock_init(void);
|
extern void sched_clock_init(void);
|
||||||
|
|
||||||
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||||
|
static inline void sched_clock_init_late(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline void sched_clock_tick(void)
|
static inline void sched_clock_tick(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void clear_sched_clock_stable(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline void sched_clock_idle_sleep_event(void)
|
static inline void sched_clock_idle_sleep_event(void)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -2544,6 +2540,7 @@ static inline u64 local_clock(void)
|
||||||
return sched_clock();
|
return sched_clock();
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
|
extern void sched_clock_init_late(void);
|
||||||
/*
|
/*
|
||||||
* Architectures can set this to 1 if they have specified
|
* Architectures can set this to 1 if they have specified
|
||||||
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
|
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
|
||||||
|
@ -2551,7 +2548,6 @@ static inline u64 local_clock(void)
|
||||||
* is reliable after all:
|
* is reliable after all:
|
||||||
*/
|
*/
|
||||||
extern int sched_clock_stable(void);
|
extern int sched_clock_stable(void);
|
||||||
extern void set_sched_clock_stable(void);
|
|
||||||
extern void clear_sched_clock_stable(void);
|
extern void clear_sched_clock_stable(void);
|
||||||
|
|
||||||
extern void sched_clock_tick(void);
|
extern void sched_clock_tick(void);
|
||||||
|
|
|
@ -59,6 +59,7 @@ extern unsigned int sysctl_sched_cfs_bandwidth_slice;
|
||||||
extern unsigned int sysctl_sched_autogroup_enabled;
|
extern unsigned int sysctl_sched_autogroup_enabled;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern int sysctl_sched_rr_timeslice;
|
||||||
extern int sched_rr_timeslice;
|
extern int sched_rr_timeslice;
|
||||||
|
|
||||||
extern int sched_rr_handler(struct ctl_table *table, int write,
|
extern int sched_rr_handler(struct ctl_table *table, int write,
|
||||||
|
|
|
@ -58,27 +58,28 @@ static inline void vtime_task_switch(struct task_struct *prev)
|
||||||
|
|
||||||
extern void vtime_account_system(struct task_struct *tsk);
|
extern void vtime_account_system(struct task_struct *tsk);
|
||||||
extern void vtime_account_idle(struct task_struct *tsk);
|
extern void vtime_account_idle(struct task_struct *tsk);
|
||||||
extern void vtime_account_user(struct task_struct *tsk);
|
|
||||||
|
|
||||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||||
|
|
||||||
static inline void vtime_task_switch(struct task_struct *prev) { }
|
static inline void vtime_task_switch(struct task_struct *prev) { }
|
||||||
static inline void vtime_account_system(struct task_struct *tsk) { }
|
static inline void vtime_account_system(struct task_struct *tsk) { }
|
||||||
static inline void vtime_account_user(struct task_struct *tsk) { }
|
|
||||||
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
|
||||||
|
|
||||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
||||||
extern void arch_vtime_task_switch(struct task_struct *tsk);
|
extern void arch_vtime_task_switch(struct task_struct *tsk);
|
||||||
|
extern void vtime_account_user(struct task_struct *tsk);
|
||||||
extern void vtime_user_enter(struct task_struct *tsk);
|
extern void vtime_user_enter(struct task_struct *tsk);
|
||||||
|
|
||||||
static inline void vtime_user_exit(struct task_struct *tsk)
|
static inline void vtime_user_exit(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
vtime_account_user(tsk);
|
vtime_account_user(tsk);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern void vtime_guest_enter(struct task_struct *tsk);
|
extern void vtime_guest_enter(struct task_struct *tsk);
|
||||||
extern void vtime_guest_exit(struct task_struct *tsk);
|
extern void vtime_guest_exit(struct task_struct *tsk);
|
||||||
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
|
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
|
||||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|
||||||
|
static inline void vtime_account_user(struct task_struct *tsk) { }
|
||||||
static inline void vtime_user_enter(struct task_struct *tsk) { }
|
static inline void vtime_user_enter(struct task_struct *tsk) { }
|
||||||
static inline void vtime_user_exit(struct task_struct *tsk) { }
|
static inline void vtime_user_exit(struct task_struct *tsk) { }
|
||||||
static inline void vtime_guest_enter(struct task_struct *tsk) { }
|
static inline void vtime_guest_enter(struct task_struct *tsk) { }
|
||||||
|
@ -93,9 +94,11 @@ static inline void vtime_account_irq_exit(struct task_struct *tsk)
|
||||||
/* On hard|softirq exit we always account to hard|softirq cputime */
|
/* On hard|softirq exit we always account to hard|softirq cputime */
|
||||||
vtime_account_system(tsk);
|
vtime_account_system(tsk);
|
||||||
}
|
}
|
||||||
|
extern void vtime_flush(struct task_struct *tsk);
|
||||||
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
|
||||||
static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
|
static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
|
||||||
static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
|
static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
|
||||||
|
static inline void vtime_flush(struct task_struct *tsk) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -269,17 +269,17 @@ DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
|
||||||
TRACE_EVENT(itimer_state,
|
TRACE_EVENT(itimer_state,
|
||||||
|
|
||||||
TP_PROTO(int which, const struct itimerval *const value,
|
TP_PROTO(int which, const struct itimerval *const value,
|
||||||
cputime_t expires),
|
unsigned long long expires),
|
||||||
|
|
||||||
TP_ARGS(which, value, expires),
|
TP_ARGS(which, value, expires),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field( int, which )
|
__field( int, which )
|
||||||
__field( cputime_t, expires )
|
__field( unsigned long long, expires )
|
||||||
__field( long, value_sec )
|
__field( long, value_sec )
|
||||||
__field( long, value_usec )
|
__field( long, value_usec )
|
||||||
__field( long, interval_sec )
|
__field( long, interval_sec )
|
||||||
__field( long, interval_usec )
|
__field( long, interval_usec )
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
|
@ -292,7 +292,7 @@ TRACE_EVENT(itimer_state,
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
|
TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
|
||||||
__entry->which, (unsigned long long)__entry->expires,
|
__entry->which, __entry->expires,
|
||||||
__entry->value_sec, __entry->value_usec,
|
__entry->value_sec, __entry->value_usec,
|
||||||
__entry->interval_sec, __entry->interval_usec)
|
__entry->interval_sec, __entry->interval_usec)
|
||||||
);
|
);
|
||||||
|
@ -305,14 +305,14 @@ TRACE_EVENT(itimer_state,
|
||||||
*/
|
*/
|
||||||
TRACE_EVENT(itimer_expire,
|
TRACE_EVENT(itimer_expire,
|
||||||
|
|
||||||
TP_PROTO(int which, struct pid *pid, cputime_t now),
|
TP_PROTO(int which, struct pid *pid, unsigned long long now),
|
||||||
|
|
||||||
TP_ARGS(which, pid, now),
|
TP_ARGS(which, pid, now),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field( int , which )
|
__field( int , which )
|
||||||
__field( pid_t, pid )
|
__field( pid_t, pid )
|
||||||
__field( cputime_t, now )
|
__field( unsigned long long, now )
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
|
@ -322,7 +322,7 @@ TRACE_EVENT(itimer_expire,
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("which=%d pid=%d now=%llu", __entry->which,
|
TP_printk("which=%d pid=%d now=%llu", __entry->which,
|
||||||
(int) __entry->pid, (unsigned long long)__entry->now)
|
(int) __entry->pid, __entry->now)
|
||||||
);
|
);
|
||||||
|
|
||||||
#ifdef CONFIG_NO_HZ_COMMON
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
|
|
|
@ -625,7 +625,6 @@ asmlinkage __visible void __init start_kernel(void)
|
||||||
numa_policy_init();
|
numa_policy_init();
|
||||||
if (late_time_init)
|
if (late_time_init)
|
||||||
late_time_init();
|
late_time_init();
|
||||||
sched_clock_init();
|
|
||||||
calibrate_delay();
|
calibrate_delay();
|
||||||
pidmap_init();
|
pidmap_init();
|
||||||
anon_vma_init();
|
anon_vma_init();
|
||||||
|
|
|
@ -453,8 +453,8 @@ static void fill_ac(acct_t *ac)
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
spin_lock_irq(¤t->sighand->siglock);
|
||||||
tty = current->signal->tty; /* Safe as we hold the siglock */
|
tty = current->signal->tty; /* Safe as we hold the siglock */
|
||||||
ac->ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0;
|
ac->ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0;
|
||||||
ac->ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime)));
|
ac->ac_utime = encode_comp_t(nsec_to_AHZ(pacct->ac_utime));
|
||||||
ac->ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime)));
|
ac->ac_stime = encode_comp_t(nsec_to_AHZ(pacct->ac_stime));
|
||||||
ac->ac_flag = pacct->ac_flag;
|
ac->ac_flag = pacct->ac_flag;
|
||||||
ac->ac_mem = encode_comp_t(pacct->ac_mem);
|
ac->ac_mem = encode_comp_t(pacct->ac_mem);
|
||||||
ac->ac_minflt = encode_comp_t(pacct->ac_minflt);
|
ac->ac_minflt = encode_comp_t(pacct->ac_minflt);
|
||||||
|
@ -530,7 +530,7 @@ static void do_acct_process(struct bsd_acct_struct *acct)
|
||||||
void acct_collect(long exitcode, int group_dead)
|
void acct_collect(long exitcode, int group_dead)
|
||||||
{
|
{
|
||||||
struct pacct_struct *pacct = ¤t->signal->pacct;
|
struct pacct_struct *pacct = ¤t->signal->pacct;
|
||||||
cputime_t utime, stime;
|
u64 utime, stime;
|
||||||
unsigned long vsize = 0;
|
unsigned long vsize = 0;
|
||||||
|
|
||||||
if (group_dead && current->mm) {
|
if (group_dead && current->mm) {
|
||||||
|
@ -559,6 +559,7 @@ void acct_collect(long exitcode, int group_dead)
|
||||||
pacct->ac_flag |= ACORE;
|
pacct->ac_flag |= ACORE;
|
||||||
if (current->flags & PF_SIGNALED)
|
if (current->flags & PF_SIGNALED)
|
||||||
pacct->ac_flag |= AXSIG;
|
pacct->ac_flag |= AXSIG;
|
||||||
|
|
||||||
task_cputime(current, &utime, &stime);
|
task_cputime(current, &utime, &stime);
|
||||||
pacct->ac_utime += utime;
|
pacct->ac_utime += utime;
|
||||||
pacct->ac_stime += stime;
|
pacct->ac_stime += stime;
|
||||||
|
|
|
@ -82,19 +82,19 @@ void __delayacct_blkio_end(void)
|
||||||
|
|
||||||
int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
|
int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
cputime_t utime, stime, stimescaled, utimescaled;
|
u64 utime, stime, stimescaled, utimescaled;
|
||||||
unsigned long long t2, t3;
|
unsigned long long t2, t3;
|
||||||
unsigned long flags, t1;
|
unsigned long flags, t1;
|
||||||
s64 tmp;
|
s64 tmp;
|
||||||
|
|
||||||
task_cputime(tsk, &utime, &stime);
|
task_cputime(tsk, &utime, &stime);
|
||||||
tmp = (s64)d->cpu_run_real_total;
|
tmp = (s64)d->cpu_run_real_total;
|
||||||
tmp += cputime_to_nsecs(utime + stime);
|
tmp += utime + stime;
|
||||||
d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
|
d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
|
||||||
|
|
||||||
task_cputime_scaled(tsk, &utimescaled, &stimescaled);
|
task_cputime_scaled(tsk, &utimescaled, &stimescaled);
|
||||||
tmp = (s64)d->cpu_scaled_run_real_total;
|
tmp = (s64)d->cpu_scaled_run_real_total;
|
||||||
tmp += cputime_to_nsecs(utimescaled + stimescaled);
|
tmp += utimescaled + stimescaled;
|
||||||
d->cpu_scaled_run_real_total =
|
d->cpu_scaled_run_real_total =
|
||||||
(tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
|
(tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,7 @@ static void __exit_signal(struct task_struct *tsk)
|
||||||
bool group_dead = thread_group_leader(tsk);
|
bool group_dead = thread_group_leader(tsk);
|
||||||
struct sighand_struct *sighand;
|
struct sighand_struct *sighand;
|
||||||
struct tty_struct *uninitialized_var(tty);
|
struct tty_struct *uninitialized_var(tty);
|
||||||
cputime_t utime, stime;
|
u64 utime, stime;
|
||||||
|
|
||||||
sighand = rcu_dereference_check(tsk->sighand,
|
sighand = rcu_dereference_check(tsk->sighand,
|
||||||
lockdep_tasklist_lock_is_held());
|
lockdep_tasklist_lock_is_held());
|
||||||
|
@ -1091,7 +1091,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
|
||||||
struct signal_struct *sig = p->signal;
|
struct signal_struct *sig = p->signal;
|
||||||
struct signal_struct *psig = current->signal;
|
struct signal_struct *psig = current->signal;
|
||||||
unsigned long maxrss;
|
unsigned long maxrss;
|
||||||
cputime_t tgutime, tgstime;
|
u64 tgutime, tgstime;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The resource counters for the group leader are in its
|
* The resource counters for the group leader are in its
|
||||||
|
|
|
@ -1314,7 +1314,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
|
||||||
|
|
||||||
cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
|
cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
|
||||||
if (cpu_limit != RLIM_INFINITY) {
|
if (cpu_limit != RLIM_INFINITY) {
|
||||||
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
|
sig->cputime_expires.prof_exp = cpu_limit * NSEC_PER_SEC;
|
||||||
sig->cputimer.running = true;
|
sig->cputimer.running = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -783,6 +783,20 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
|
||||||
|
|
||||||
|
void __sched
|
||||||
|
mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
|
||||||
|
{
|
||||||
|
int token;
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
|
|
||||||
|
token = io_schedule_prepare();
|
||||||
|
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
|
||||||
|
subclass, NULL, _RET_IP_, NULL, 0);
|
||||||
|
io_schedule_finish(token);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
|
||||||
{
|
{
|
||||||
|
@ -950,6 +964,16 @@ int __sched mutex_lock_killable(struct mutex *lock)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mutex_lock_killable);
|
EXPORT_SYMBOL(mutex_lock_killable);
|
||||||
|
|
||||||
|
void __sched mutex_lock_io(struct mutex *lock)
|
||||||
|
{
|
||||||
|
int token;
|
||||||
|
|
||||||
|
token = io_schedule_prepare();
|
||||||
|
mutex_lock(lock);
|
||||||
|
io_schedule_finish(token);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mutex_lock_io);
|
||||||
|
|
||||||
static noinline void __sched
|
static noinline void __sched
|
||||||
__mutex_lock_slowpath(struct mutex *lock)
|
__mutex_lock_slowpath(struct mutex *lock)
|
||||||
{
|
{
|
||||||
|
|
|
@ -18,8 +18,8 @@ endif
|
||||||
obj-y += core.o loadavg.o clock.o cputime.o
|
obj-y += core.o loadavg.o clock.o cputime.o
|
||||||
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
|
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
|
||||||
obj-y += wait.o swait.o completion.o idle.o
|
obj-y += wait.o swait.o completion.o idle.o
|
||||||
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
|
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o
|
||||||
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
|
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
|
||||||
obj-$(CONFIG_SCHEDSTATS) += stats.o
|
obj-$(CONFIG_SCHEDSTATS) += stats.o
|
||||||
obj-$(CONFIG_SCHED_DEBUG) += debug.o
|
obj-$(CONFIG_SCHED_DEBUG) += debug.o
|
||||||
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
|
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue