sched_ext: Add boilerplate for extensible scheduler class
This adds dummy implementations of sched_ext interfaces which interact with the scheduler core and hook them in the correct places. As they're all dummies, this doesn't cause any behavior changes. This is split out to help reviewing. v2: balance_scx_on_up() dropped. This will be handled in sched_ext proper. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: David Vernet <dvernet@meta.com> Acked-by: Josh Don <joshdon@google.com> Acked-by: Hao Luo <haoluo@google.com> Acked-by: Barret Rhoden <brho@google.com>
This commit is contained in:
parent
2c8d046d5d
commit
a7a9fc5492
6 changed files with 66 additions and 8 deletions
12
include/linux/sched/ext.h
Normal file
12
include/linux/sched/ext.h
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _LINUX_SCHED_EXT_H
|
||||||
|
#define _LINUX_SCHED_EXT_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_SCHED_CLASS_EXT
|
||||||
|
#error "NOT IMPLEMENTED YET"
|
||||||
|
#else /* !CONFIG_SCHED_CLASS_EXT */
|
||||||
|
|
||||||
|
static inline void sched_ext_free(struct task_struct *p) {}
|
||||||
|
|
||||||
|
#endif /* CONFIG_SCHED_CLASS_EXT */
|
||||||
|
#endif /* _LINUX_SCHED_EXT_H */
|
|
@ -23,6 +23,7 @@
|
||||||
#include <linux/sched/task.h>
|
#include <linux/sched/task.h>
|
||||||
#include <linux/sched/task_stack.h>
|
#include <linux/sched/task_stack.h>
|
||||||
#include <linux/sched/cputime.h>
|
#include <linux/sched/cputime.h>
|
||||||
|
#include <linux/sched/ext.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/rtmutex.h>
|
#include <linux/rtmutex.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
|
@ -971,6 +972,7 @@ void __put_task_struct(struct task_struct *tsk)
|
||||||
WARN_ON(refcount_read(&tsk->usage));
|
WARN_ON(refcount_read(&tsk->usage));
|
||||||
WARN_ON(tsk == current);
|
WARN_ON(tsk == current);
|
||||||
|
|
||||||
|
sched_ext_free(tsk);
|
||||||
io_uring_free(tsk);
|
io_uring_free(tsk);
|
||||||
cgroup_free(tsk);
|
cgroup_free(tsk);
|
||||||
task_numa_free(tsk, true);
|
task_numa_free(tsk, true);
|
||||||
|
|
|
@ -4559,6 +4559,8 @@ late_initcall(sched_core_sysctl_init);
|
||||||
*/
|
*/
|
||||||
int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
__sched_fork(clone_flags, p);
|
__sched_fork(clone_flags, p);
|
||||||
/*
|
/*
|
||||||
* We mark the process as NEW here. This guarantees that
|
* We mark the process as NEW here. This guarantees that
|
||||||
|
@ -4595,12 +4597,16 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||||
p->sched_reset_on_fork = 0;
|
p->sched_reset_on_fork = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (dl_prio(p->prio))
|
scx_pre_fork(p);
|
||||||
return -EAGAIN;
|
|
||||||
else if (rt_prio(p->prio))
|
if (dl_prio(p->prio)) {
|
||||||
|
ret = -EAGAIN;
|
||||||
|
goto out_cancel;
|
||||||
|
} else if (rt_prio(p->prio)) {
|
||||||
p->sched_class = &rt_sched_class;
|
p->sched_class = &rt_sched_class;
|
||||||
else
|
} else {
|
||||||
p->sched_class = &fair_sched_class;
|
p->sched_class = &fair_sched_class;
|
||||||
|
}
|
||||||
|
|
||||||
init_entity_runnable_average(&p->se);
|
init_entity_runnable_average(&p->se);
|
||||||
|
|
||||||
|
@ -4618,6 +4624,10 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||||
RB_CLEAR_NODE(&p->pushable_dl_tasks);
|
RB_CLEAR_NODE(&p->pushable_dl_tasks);
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_cancel:
|
||||||
|
scx_cancel_fork(p);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
|
int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
|
||||||
|
@ -4648,16 +4658,18 @@ int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
|
||||||
p->sched_class->task_fork(p);
|
p->sched_class->task_fork(p);
|
||||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||||
|
|
||||||
return 0;
|
return scx_fork(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
void sched_cancel_fork(struct task_struct *p)
|
void sched_cancel_fork(struct task_struct *p)
|
||||||
{
|
{
|
||||||
|
scx_cancel_fork(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
void sched_post_fork(struct task_struct *p)
|
void sched_post_fork(struct task_struct *p)
|
||||||
{
|
{
|
||||||
uclamp_post_fork(p);
|
uclamp_post_fork(p);
|
||||||
|
scx_post_fork(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned long to_ratio(u64 period, u64 runtime)
|
unsigned long to_ratio(u64 period, u64 runtime)
|
||||||
|
@ -5800,7 +5812,7 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
|
||||||
* We can terminate the balance pass as soon as we know there is
|
* We can terminate the balance pass as soon as we know there is
|
||||||
* a runnable task of @class priority or higher.
|
* a runnable task of @class priority or higher.
|
||||||
*/
|
*/
|
||||||
for_class_range(class, prev->sched_class, &idle_sched_class) {
|
for_balance_class_range(class, prev->sched_class, &idle_sched_class) {
|
||||||
if (class->balance(rq, prev, rf))
|
if (class->balance(rq, prev, rf))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -5818,6 +5830,9 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
|
||||||
const struct sched_class *class;
|
const struct sched_class *class;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
|
||||||
|
if (scx_enabled())
|
||||||
|
goto restart;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Optimization: we know that if all tasks are in the fair class we can
|
* Optimization: we know that if all tasks are in the fair class we can
|
||||||
* call that function directly, but only if the @prev task wasn't of a
|
* call that function directly, but only if the @prev task wasn't of a
|
||||||
|
@ -5858,7 +5873,7 @@ restart:
|
||||||
if (prev->dl_server)
|
if (prev->dl_server)
|
||||||
prev->dl_server = NULL;
|
prev->dl_server = NULL;
|
||||||
|
|
||||||
for_each_class(class) {
|
for_each_active_class(class) {
|
||||||
p = class->pick_next_task(rq);
|
p = class->pick_next_task(rq);
|
||||||
if (p)
|
if (p)
|
||||||
return p;
|
return p;
|
||||||
|
@ -5891,7 +5906,7 @@ static inline struct task_struct *pick_task(struct rq *rq)
|
||||||
const struct sched_class *class;
|
const struct sched_class *class;
|
||||||
struct task_struct *p;
|
struct task_struct *p;
|
||||||
|
|
||||||
for_each_class(class) {
|
for_each_active_class(class) {
|
||||||
p = class->pick_task(rq);
|
p = class->pick_task(rq);
|
||||||
if (p)
|
if (p)
|
||||||
return p;
|
return p;
|
||||||
|
@ -8355,6 +8370,7 @@ void __init sched_init(void)
|
||||||
balance_push_set(smp_processor_id(), false);
|
balance_push_set(smp_processor_id(), false);
|
||||||
#endif
|
#endif
|
||||||
init_sched_fair_class();
|
init_sched_fair_class();
|
||||||
|
init_sched_ext_class();
|
||||||
|
|
||||||
psi_init();
|
psi_init();
|
||||||
|
|
||||||
|
|
24
kernel/sched/ext.h
Normal file
24
kernel/sched/ext.h
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
|
||||||
|
#ifdef CONFIG_SCHED_CLASS_EXT
|
||||||
|
#error "NOT IMPLEMENTED YET"
|
||||||
|
#else /* CONFIG_SCHED_CLASS_EXT */
|
||||||
|
|
||||||
|
#define scx_enabled() false
|
||||||
|
|
||||||
|
static inline void scx_pre_fork(struct task_struct *p) {}
|
||||||
|
static inline int scx_fork(struct task_struct *p) { return 0; }
|
||||||
|
static inline void scx_post_fork(struct task_struct *p) {}
|
||||||
|
static inline void scx_cancel_fork(struct task_struct *p) {}
|
||||||
|
static inline void init_sched_ext_class(void) {}
|
||||||
|
|
||||||
|
#define for_each_active_class for_each_class
|
||||||
|
#define for_balance_class_range for_class_range
|
||||||
|
|
||||||
|
#endif /* CONFIG_SCHED_CLASS_EXT */
|
||||||
|
|
||||||
|
#if defined(CONFIG_SCHED_CLASS_EXT) && defined(CONFIG_SMP)
|
||||||
|
#error "NOT IMPLEMENTED YET"
|
||||||
|
#else
|
||||||
|
static inline void scx_update_idle(struct rq *rq, bool idle) {}
|
||||||
|
#endif
|
|
@ -452,11 +452,13 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags)
|
||||||
|
|
||||||
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
||||||
{
|
{
|
||||||
|
scx_update_idle(rq, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
|
static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
|
||||||
{
|
{
|
||||||
update_idle_core(rq);
|
update_idle_core(rq);
|
||||||
|
scx_update_idle(rq, true);
|
||||||
schedstat_inc(rq->sched_goidle);
|
schedstat_inc(rq->sched_goidle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3658,4 +3658,6 @@ static inline void balance_callbacks(struct rq *rq, struct balance_callback *hea
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#include "ext.h"
|
||||||
|
|
||||||
#endif /* _KERNEL_SCHED_SCHED_H */
|
#endif /* _KERNEL_SCHED_SCHED_H */
|
||||||
|
|
Loading…
Add table
Reference in a new issue