sched: old sleeper bonus
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index 521b89b..070eefd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -680,6 +680,7 @@
SCHED_FEAT_SYNC_WAKEUPS = 32,
SCHED_FEAT_HRTICK = 64,
SCHED_FEAT_DOUBLE_TICK = 128,
+ SCHED_FEAT_NORMALIZED_SLEEPER = 256,
};
const_debug unsigned int sysctl_sched_features =
@@ -690,7 +691,8 @@
SCHED_FEAT_CACHE_HOT_BUDDY * 1 |
SCHED_FEAT_SYNC_WAKEUPS * 1 |
SCHED_FEAT_HRTICK * 1 |
- SCHED_FEAT_DOUBLE_TICK * 0;
+ SCHED_FEAT_DOUBLE_TICK * 0 |
+ SCHED_FEAT_NORMALIZED_SLEEPER * 1;
#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 290cf77..022e036 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -501,8 +501,11 @@
if (!initial) {
/* sleeps upto a single latency don't count. */
if (sched_feat(NEW_FAIR_SLEEPERS)) {
- vruntime -= calc_delta_fair(sysctl_sched_latency,
- &cfs_rq->load);
+ if (sched_feat(NORMALIZED_SLEEPER))
+ vruntime -= calc_delta_fair(sysctl_sched_latency,
+ &cfs_rq->load);
+ else
+ vruntime -= sysctl_sched_latency;
}
/* ensure we never gain time by being placed backwards. */