Note that sched_core_fork() is called from under tasklist_lock, and
not from sched_fork() earlier. This avoids a few races later.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Don Hiatt <dhiatt@digitalocean.com>
Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com>
Tested-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lkml.kernel.org/r/20210422123308.980003687@infradead.org
#ifdef CONFIG_SCHED_CORE
extern void sched_core_free(struct task_struct *tsk);
+extern void sched_core_fork(struct task_struct *p);
#else
static inline void sched_core_free(struct task_struct *tsk) { }
+static inline void sched_core_fork(struct task_struct *p) { }
#endif
#endif
klp_copy_process(p);
+ sched_core_fork(p);
+
spin_lock(¤t->sighand->siglock);
/*
return p;
bad_fork_cancel_cgroup:
+ sched_core_free(p);
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
cgroup_cancel_fork(p, args);
return cookie;
}
+void sched_core_fork(struct task_struct *p)
+{
+ RB_CLEAR_NODE(&p->core_node);
+ p->core_cookie = sched_core_clone_cookie(current);
+}
+
void sched_core_free(struct task_struct *p)
{
sched_core_put_cookie(p->core_cookie);