2012-11-28 02:33:25 +08:00
|
|
|
#ifndef _LINUX_CONTEXT_TRACKING_H
|
|
|
|
#define _LINUX_CONTEXT_TRACKING_H
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
2013-01-08 01:12:14 +08:00
|
|
|
#include <linux/percpu.h>
|
2013-05-16 07:21:38 +08:00
|
|
|
#include <linux/vtime.h>
|
2013-07-12 01:12:32 +08:00
|
|
|
#include <linux/static_key.h>
|
2013-02-24 07:23:25 +08:00
|
|
|
#include <asm/ptrace.h>
|
2013-01-08 01:12:14 +08:00
|
|
|
|
|
|
|
struct context_tracking {
|
|
|
|
/*
|
|
|
|
* When active is false, probes are unset in order
|
|
|
|
* to minimize overhead: TIF flags are cleared
|
|
|
|
* and calls to user_enter/exit are ignored. This
|
|
|
|
* may be further optimized using static keys.
|
|
|
|
*/
|
|
|
|
bool active;
|
2013-02-24 08:19:14 +08:00
|
|
|
enum ctx_state {
|
2013-01-08 01:12:14 +08:00
|
|
|
IN_KERNEL = 0,
|
|
|
|
IN_USER,
|
|
|
|
} state;
|
|
|
|
};
|
|
|
|
|
2013-05-16 07:21:38 +08:00
|
|
|
|
2013-02-24 08:19:14 +08:00
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING
|
2013-07-12 01:12:32 +08:00
|
|
|
extern struct static_key context_tracking_enabled;
|
2013-01-08 01:12:14 +08:00
|
|
|
DECLARE_PER_CPU(struct context_tracking, context_tracking);
|
|
|
|
|
|
|
|
static inline bool context_tracking_in_user(void)
|
|
|
|
{
|
|
|
|
return __this_cpu_read(context_tracking.state) == IN_USER;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool context_tracking_active(void)
|
|
|
|
{
|
|
|
|
return __this_cpu_read(context_tracking.active);
|
|
|
|
}
|
2012-11-28 02:33:25 +08:00
|
|
|
|
2013-07-10 06:55:25 +08:00
|
|
|
extern void context_tracking_cpu_set(int cpu);
|
|
|
|
|
2013-07-10 08:44:35 +08:00
|
|
|
extern void context_tracking_user_enter(void);
|
|
|
|
extern void context_tracking_user_exit(void);
|
|
|
|
|
|
|
|
static inline void user_enter(void)
|
|
|
|
{
|
|
|
|
if (static_key_false(&context_tracking_enabled))
|
|
|
|
context_tracking_user_enter();
|
|
|
|
|
|
|
|
}
|
|
|
|
static inline void user_exit(void)
|
|
|
|
{
|
|
|
|
if (static_key_false(&context_tracking_enabled))
|
|
|
|
context_tracking_user_exit();
|
|
|
|
}
|
2013-02-24 07:23:25 +08:00
|
|
|
|
2013-02-24 08:19:14 +08:00
|
|
|
static inline enum ctx_state exception_enter(void)
|
2013-02-24 07:23:25 +08:00
|
|
|
{
|
2013-02-24 08:19:14 +08:00
|
|
|
enum ctx_state prev_ctx;
|
|
|
|
|
2013-07-10 08:44:35 +08:00
|
|
|
if (!static_key_false(&context_tracking_enabled))
|
|
|
|
return 0;
|
|
|
|
|
2013-02-24 08:19:14 +08:00
|
|
|
prev_ctx = this_cpu_read(context_tracking.state);
|
2013-07-10 08:44:35 +08:00
|
|
|
context_tracking_user_exit();
|
2013-02-24 08:19:14 +08:00
|
|
|
|
|
|
|
return prev_ctx;
|
2013-02-24 07:23:25 +08:00
|
|
|
}
|
|
|
|
|
2013-02-24 08:19:14 +08:00
|
|
|
static inline void exception_exit(enum ctx_state prev_ctx)
|
2013-02-24 07:23:25 +08:00
|
|
|
{
|
2013-07-10 08:44:35 +08:00
|
|
|
if (static_key_false(&context_tracking_enabled)) {
|
|
|
|
if (prev_ctx == IN_USER)
|
|
|
|
context_tracking_user_enter();
|
|
|
|
}
|
2013-02-24 07:23:25 +08:00
|
|
|
}
|
|
|
|
|
2012-11-28 02:33:25 +08:00
|
|
|
extern void context_tracking_task_switch(struct task_struct *prev,
|
|
|
|
struct task_struct *next);
|
|
|
|
#else
|
2013-01-08 01:12:14 +08:00
|
|
|
static inline bool context_tracking_in_user(void) { return false; }
|
2012-11-28 02:33:25 +08:00
|
|
|
static inline void user_enter(void) { }
|
|
|
|
static inline void user_exit(void) { }
|
2013-07-13 01:02:30 +08:00
|
|
|
static inline enum ctx_state exception_enter(void) { return 0; }
|
|
|
|
static inline void exception_exit(enum ctx_state prev_ctx) { }
|
|
|
|
static inline void context_tracking_task_switch(struct task_struct *prev,
|
|
|
|
struct task_struct *next) { }
|
|
|
|
#endif /* !CONFIG_CONTEXT_TRACKING */
|
2013-05-16 07:21:38 +08:00
|
|
|
|
2013-07-12 01:12:32 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_CONTEXT_TRACKING_FORCE
|
|
|
|
extern void context_tracking_init(void);
|
|
|
|
#else
|
|
|
|
static inline void context_tracking_init(void) { }
|
|
|
|
#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
|
|
|
|
|
|
|
|
|
2013-07-13 01:02:30 +08:00
|
|
|
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
|
2013-07-10 08:44:35 +08:00
|
|
|
static inline void guest_enter(void)
|
|
|
|
{
|
|
|
|
if (static_key_false(&context_tracking_enabled) &&
|
|
|
|
vtime_accounting_enabled())
|
|
|
|
vtime_guest_enter(current);
|
|
|
|
else
|
|
|
|
current->flags |= PF_VCPU;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void guest_exit(void)
|
|
|
|
{
|
|
|
|
if (static_key_false(&context_tracking_enabled) &&
|
|
|
|
vtime_accounting_enabled())
|
|
|
|
vtime_guest_exit(current);
|
|
|
|
else
|
|
|
|
current->flags &= ~PF_VCPU;
|
|
|
|
}
|
2013-07-13 01:02:30 +08:00
|
|
|
#else
|
2013-05-16 07:21:38 +08:00
|
|
|
static inline void guest_enter(void)
|
|
|
|
{
|
2013-07-13 01:02:30 +08:00
|
|
|
/*
|
2013-07-13 01:05:14 +08:00
|
|
|
* This is running in ioctl context so its safe
|
|
|
|
* to assume that it's the stime pending cputime
|
|
|
|
* to flush.
|
2013-07-13 01:02:30 +08:00
|
|
|
*/
|
|
|
|
vtime_account_system(current);
|
|
|
|
current->flags |= PF_VCPU;
|
2013-05-16 07:21:38 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void guest_exit(void)
|
|
|
|
{
|
2013-07-13 01:05:14 +08:00
|
|
|
/* Flush the guest cputime we spent on the guest */
|
2013-07-13 01:02:30 +08:00
|
|
|
vtime_account_system(current);
|
|
|
|
current->flags &= ~PF_VCPU;
|
2013-05-16 07:21:38 +08:00
|
|
|
}
|
2013-07-13 01:02:30 +08:00
|
|
|
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
|
2012-11-28 02:33:25 +08:00
|
|
|
|
|
|
|
#endif
|