2009-09-16 06:54:45 +00:00
|
|
|
/*
|
|
|
|
* Only give sleepers 50% of their service deficit. This allows
|
|
|
|
* them to run sooner, but does not allow tons of sleepers to
|
|
|
|
* rip the spread apart.
|
|
|
|
*/
|
2011-07-06 12:20:14 +00:00
|
|
|
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
|
2009-09-11 10:31:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Place new tasks ahead so that they do not starve already running
|
|
|
|
* tasks
|
|
|
|
*/
|
2011-07-06 12:20:14 +00:00
|
|
|
SCHED_FEAT(START_DEBIT, true)
|
2009-09-11 10:31:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prefer to schedule the task we woke last (assuming it failed
|
|
|
|
* wakeup-preemption), since its likely going to consume data we
|
|
|
|
* touched, increases cache locality.
|
|
|
|
*/
|
2011-07-06 12:20:14 +00:00
|
|
|
SCHED_FEAT(NEXT_BUDDY, false)
|
2009-09-11 10:31:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prefer to schedule the task that ran last (when we did
|
|
|
|
* wake-preempt) as that likely will touch the same data, increases
|
|
|
|
* cache locality.
|
|
|
|
*/
|
2011-07-06 12:20:14 +00:00
|
|
|
SCHED_FEAT(LAST_BUDDY, true)
|
2009-09-11 10:31:23 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Consider buddies to be cache hot, decreases the likelyness of a
|
|
|
|
* cache buddy being migrated away, increases cache locality.
|
|
|
|
*/
|
2011-07-06 12:20:14 +00:00
|
|
|
SCHED_FEAT(CACHE_HOT_BUDDY, true)
|
2009-09-11 10:31:23 +00:00
|
|
|
|
2012-10-14 12:28:50 +00:00
|
|
|
/*
|
|
|
|
* Allow wakeup-time preemption of the current task:
|
|
|
|
*/
|
|
|
|
SCHED_FEAT(WAKEUP_PREEMPTION, true)
|
|
|
|
|
2009-09-03 11:20:03 +00:00
|
|
|
/*
|
2014-05-27 17:50:41 +00:00
|
|
|
* Use arch dependent cpu capacity functions
|
2009-09-03 11:20:03 +00:00
|
|
|
*/
|
2014-05-27 17:50:41 +00:00
|
|
|
SCHED_FEAT(ARCH_CAPACITY, true)
|
2009-09-03 11:20:03 +00:00
|
|
|
|
2011-07-06 12:20:14 +00:00
|
|
|
SCHED_FEAT(HRTICK, false)
|
|
|
|
SCHED_FEAT(DOUBLE_TICK, false)
|
|
|
|
SCHED_FEAT(LB_BIAS, true)
|
2009-09-11 10:31:23 +00:00
|
|
|
|
2010-10-05 00:03:22 +00:00
|
|
|
/*
|
2014-05-27 17:50:41 +00:00
|
|
|
* Decrement CPU capacity based on time not spent running tasks
|
2010-10-05 00:03:22 +00:00
|
|
|
*/
|
2014-05-27 17:50:41 +00:00
|
|
|
SCHED_FEAT(NONTASK_CAPACITY, true)
|
2011-04-05 15:23:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Queue remote wakeups on the target CPU and process them
|
|
|
|
* using the scheduler IPI. Reduces rq->lock contention/bounces.
|
|
|
|
*/
|
2011-07-06 12:20:14 +00:00
|
|
|
SCHED_FEAT(TTWU_QUEUE, true)
|
2011-07-15 08:35:52 +00:00
|
|
|
|
2011-07-06 12:20:14 +00:00
|
|
|
SCHED_FEAT(FORCE_SD_OVERLAP, false)
|
|
|
|
SCHED_FEAT(RT_RUNTIME_SHARE, true)
|
2012-04-17 11:38:40 +00:00
|
|
|
SCHED_FEAT(LB_MIN, false)
|
2012-10-25 12:16:43 +00:00
|
|
|
|
|
|
|
/*
|
2012-11-22 11:16:36 +00:00
|
|
|
* Apply the automatic NUMA scheduling policy. Enabled automatically
|
|
|
|
* at runtime if running on a NUMA machine. Can be controlled via
|
2013-10-07 10:28:53 +00:00
|
|
|
* numa_balancing=
|
2012-10-25 12:16:43 +00:00
|
|
|
*/
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
2012-11-22 11:16:36 +00:00
|
|
|
SCHED_FEAT(NUMA, false)
|
2013-10-07 10:29:00 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NUMA_FAVOUR_HIGHER will favor moving tasks towards nodes where a
|
|
|
|
* higher number of hinting faults are recorded during active load
|
|
|
|
* balancing.
|
|
|
|
*/
|
|
|
|
SCHED_FEAT(NUMA_FAVOUR_HIGHER, true)
|
2013-10-07 10:29:01 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* NUMA_RESIST_LOWER will resist moving tasks towards nodes where a
|
|
|
|
* lower number of hinting faults have been recorded. As this has
|
|
|
|
* the potential to prevent a task ever migrating to a new node
|
|
|
|
* due to CPU overload it is disabled by default.
|
|
|
|
*/
|
|
|
|
SCHED_FEAT(NUMA_RESIST_LOWER, false)
|
2012-10-25 12:16:43 +00:00
|
|
|
#endif
|