kernel/x86-Avoid-invoking-RCU-when-CPU-is-idle.patch

118 lines
3.0 KiB
Diff
Raw Normal View History

diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 15763af..f6978b0 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -386,17 +386,21 @@ void default_idle(void)
*/
smp_mb();
+ rcu_idle_enter();
if (!need_resched())
safe_halt(); /* enables interrupts racelessly */
else
local_irq_enable();
+ rcu_idle_exit();
current_thread_info()->status |= TS_POLLING;
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
} else {
local_irq_enable();
/* loop is done by the caller */
+ rcu_idle_enter();
cpu_relax();
+ rcu_idle_exit();
}
}
#ifdef CONFIG_APM_MODULE
@@ -457,14 +461,19 @@ static void mwait_idle(void)
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
+ rcu_idle_enter();
if (!need_resched())
__sti_mwait(0, 0);
else
local_irq_enable();
+ rcu_idle_exit();
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
- } else
+ } else {
local_irq_enable();
+ rcu_idle_enter();
+ rcu_idle_exit();
+ }
}
/*
@@ -477,8 +486,10 @@ static void poll_idle(void)
trace_power_start(POWER_CSTATE, 0, smp_processor_id());
trace_cpu_idle(0, smp_processor_id());
local_irq_enable();
+ rcu_idle_enter();
while (!need_resched())
cpu_relax();
+ rcu_idle_exit();
trace_power_end(smp_processor_id());
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
}
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 485204f..6d9d4d5 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -100,7 +100,6 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
tick_nohz_idle_enter();
- rcu_idle_enter();
while (!need_resched()) {
check_pgt_cache();
@@ -117,7 +116,6 @@ void cpu_idle(void)
pm_idle();
start_critical_timings();
}
- rcu_idle_exit();
tick_nohz_idle_exit();
preempt_enable_no_resched();
schedule();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 9b9fe4a..55a1a35 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -140,13 +140,9 @@ void cpu_idle(void)
/* Don't trace irqs off for idle */
stop_critical_timings();
- /* enter_idle() needs rcu for notifiers */
- rcu_idle_enter();
-
if (cpuidle_idle_call())
pm_idle();
- rcu_idle_exit();
start_critical_timings();
/* In many cases the interrupt that ended idle
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 20bce51..a9ddab8 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -261,6 +261,7 @@ static int intel_idle(struct cpuidle_device *dev,
kt_before = ktime_get_real();
stop_critical_timings();
+ rcu_idle_enter();
if (!need_resched()) {
__monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -268,6 +269,7 @@ static int intel_idle(struct cpuidle_device *dev,
if (!need_resched())
__mwait(eax, ecx);
}
+ rcu_idle_exit();
start_critical_timings();