sched/fair: Convert arch_scale_cpu_capacity() from weak function to #define
Bring arch_scale_cpu_capacity() in line with the recent change of its
arch_scale_freq_capacity() sibling in commit dfbca41f34
("sched:
Optimize freq invariant accounting") from weak function to #define to
allow inlining of the function.
While at it, remove the ARCH_CAPACITY sched_feature as well. With the
change to #define there isn't a straightforward way to allow runtime
switch between an arch implementation and the default implementation of
arch_scale_cpu_capacity() using sched_feature. The default was to use
the arch-specific implementation, but only the arm architecture provides
one and that is essentially equivalent to the default implementation.
Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Dietmar Eggemann <Dietmar.Eggemann@arm.com>
Cc: Juri Lelli <Juri.Lelli@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: daniel.lezcano@linaro.org
Cc: mturquette@baylibre.com
Cc: pang.xunlei@zte.com.cn
Cc: rjw@rjwysocki.net
Cc: sgurrappadi@nvidia.com
Cc: vincent.guittot@linaro.org
Cc: yuyang.du@intel.com
Link: http://lkml.kernel.org/r/1439569394-11974-3-git-send-email-morten.rasmussen@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
e0f5f3afd2
commit
8cd5601c50
3 changed files with 12 additions and 26 deletions
|
@ -6054,19 +6054,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
|
||||||
return load_idx;
|
return load_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
|
||||||
{
|
|
||||||
if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
|
|
||||||
return sd->smt_gain / sd->span_weight;
|
|
||||||
|
|
||||||
return SCHED_CAPACITY_SCALE;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
|
||||||
{
|
|
||||||
return default_scale_cpu_capacity(sd, cpu);
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned long scale_rt_capacity(int cpu)
|
static unsigned long scale_rt_capacity(int cpu)
|
||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
@ -6096,16 +6083,9 @@ static unsigned long scale_rt_capacity(int cpu)
|
||||||
|
|
||||||
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||||
{
|
{
|
||||||
unsigned long capacity = SCHED_CAPACITY_SCALE;
|
unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
|
||||||
struct sched_group *sdg = sd->groups;
|
struct sched_group *sdg = sd->groups;
|
||||||
|
|
||||||
if (sched_feat(ARCH_CAPACITY))
|
|
||||||
capacity *= arch_scale_cpu_capacity(sd, cpu);
|
|
||||||
else
|
|
||||||
capacity *= default_scale_cpu_capacity(sd, cpu);
|
|
||||||
|
|
||||||
capacity >>= SCHED_CAPACITY_SHIFT;
|
|
||||||
|
|
||||||
cpu_rq(cpu)->cpu_capacity_orig = capacity;
|
cpu_rq(cpu)->cpu_capacity_orig = capacity;
|
||||||
|
|
||||||
capacity *= scale_rt_capacity(cpu);
|
capacity *= scale_rt_capacity(cpu);
|
||||||
|
|
|
@ -36,11 +36,6 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true)
|
||||||
*/
|
*/
|
||||||
SCHED_FEAT(WAKEUP_PREEMPTION, true)
|
SCHED_FEAT(WAKEUP_PREEMPTION, true)
|
||||||
|
|
||||||
/*
|
|
||||||
* Use arch dependent cpu capacity functions
|
|
||||||
*/
|
|
||||||
SCHED_FEAT(ARCH_CAPACITY, true)
|
|
||||||
|
|
||||||
SCHED_FEAT(HRTICK, false)
|
SCHED_FEAT(HRTICK, false)
|
||||||
SCHED_FEAT(DOUBLE_TICK, false)
|
SCHED_FEAT(DOUBLE_TICK, false)
|
||||||
SCHED_FEAT(LB_BIAS, true)
|
SCHED_FEAT(LB_BIAS, true)
|
||||||
|
|
|
@ -1394,6 +1394,17 @@ unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef arch_scale_cpu_capacity
|
||||||
|
static __always_inline
|
||||||
|
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||||
|
{
|
||||||
|
if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
|
||||||
|
return sd->smt_gain / sd->span_weight;
|
||||||
|
|
||||||
|
return SCHED_CAPACITY_SCALE;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
|
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
|
||||||
{
|
{
|
||||||
rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
|
rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
|
||||||
|
|
Loading…
Add table
Reference in a new issue