diff --git a/include/linux/sched.h b/include/linux/sched.h index edad7a43edea..fa39434e3fdd 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1455,14 +1455,15 @@ struct task_struct { /* Used for emulating ABI behavior of previous Linux versions */ unsigned int personality; - unsigned in_execve:1; /* Tell the LSMs that the process is doing an - * execve */ - unsigned in_iowait:1; - - /* Revert to default priority/policy when forking */ + /* scheduler bits, serialized by scheduler locks */ unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; unsigned sched_migrated:1; + unsigned :0; /* force alignment to the next boundary */ + + /* unserialized, strictly 'current' */ + unsigned in_execve:1; /* bit to tell LSMs we're in execve */ + unsigned in_iowait:1; #ifdef CONFIG_MEMCG unsigned memcg_may_oom:1; #endif @@ -2002,7 +2003,8 @@ static inline int pid_alive(const struct task_struct *p) } /** - * is_global_init - check if a task structure is init + * is_global_init - check if a task structure is init. Since init + * is free to have sub-threads we need to check tgid. * @tsk: Task structure to be checked. * * Check if a task structure is the first user space task the kernel created. @@ -2011,7 +2013,7 @@ static inline int pid_alive(const struct task_struct *p) */ static inline int is_global_init(struct task_struct *tsk) { - return tsk->pid == 1; + return task_tgid_nr(tsk) == 1; } extern struct pid *cad_pid; diff --git a/kernel/fork.c b/kernel/fork.c index fce002ee3ddf..1155eac61687 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -380,6 +380,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) #endif tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; + tsk->wake_q.next = NULL; account_kernel_stack(ti, 1); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 90e26b11deaa..cfdc0e61066c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2689,7 +2689,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) int decayed, removed = 0; if (atomic_long_read(&cfs_rq->removed_load_avg)) { - long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); + s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); sa->load_avg = max_t(long, sa->load_avg - r, 0); sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); removed = 1;