kernel-ark/kernel/cpu.c
Ashok Raj 90d45d17f3 [PATCH] cpu hotplug: fix locking in cpufreq drivers
When calling target drivers to set frequency, we take cpucontrol lock.
When we modified the code to accomodate CPU hotplug, there was an attempt
to take a double lock of cpucontrol leading to a deadlock.  Since the
current thread context is already holding the cpucontrol lock, we dont need
to make another attempt to acquire it.

Now we leave a trace in current->flags indicating current thread already is
under cpucontrol lock held, so we dont attempt to do this another time.

Thanks to Andrew Morton for the beating:-)

From: Brice Goglin <Brice.Goglin@ens-lyon.org>

  Build fix

(akpm: this patch is still unpleasant.  Ashok continues to look for a cleaner
solution, doesn't he?  ;))

Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Brice Goglin <Brice.Goglin@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-11-09 07:55:50 -08:00

224 lines
5.1 KiB
C

/* CPU control.
* (C) 2001, 2002, 2003, 2004 Rusty Russell
*
* This code is licenced under the GPL.
*/
#include <linux/proc_fs.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/unistd.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
#include <asm/semaphore.h>
/* This protects CPUs going up and down... */
DECLARE_MUTEX(cpucontrol);
EXPORT_SYMBOL_GPL(cpucontrol);
static struct notifier_block *cpu_chain;
/*
* Used to check by callers if they need to acquire the cpucontrol
* or not to protect a cpu from being removed. Its sometimes required to
* call these functions both for normal operations, and in response to
* a cpu being added/removed. If the context of the call is in the same
* thread context as a CPU hotplug thread, we dont need to take the lock
* since its already protected
* check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
*/
int current_in_cpu_hotplug(void)
{
return (current->flags & PF_HOTPLUG_CPU);
}
EXPORT_SYMBOL_GPL(current_in_cpu_hotplug);
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
int ret;
if ((ret = down_interruptible(&cpucontrol)) != 0)
return ret;
ret = notifier_chain_register(&cpu_chain, nb);
up(&cpucontrol);
return ret;
}
EXPORT_SYMBOL(register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb)
{
down(&cpucontrol);
notifier_chain_unregister(&cpu_chain, nb);
up(&cpucontrol);
}
EXPORT_SYMBOL(unregister_cpu_notifier);
#ifdef CONFIG_HOTPLUG_CPU
static inline void check_for_tasks(int cpu)
{
struct task_struct *p;
write_lock_irq(&tasklist_lock);
for_each_process(p) {
if (task_cpu(p) == cpu &&
(!cputime_eq(p->utime, cputime_zero) ||
!cputime_eq(p->stime, cputime_zero)))
printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
(state = %ld, flags = %lx) \n",
p->comm, p->pid, cpu, p->state, p->flags);
}
write_unlock_irq(&tasklist_lock);
}
/* Take this CPU down. */
static int take_cpu_down(void *unused)
{
int err;
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable();
if (err < 0)
return err;
/* Force idle task to run as soon as we yield: it should
immediately notice cpu is offline and die quickly. */
sched_idle_next();
return 0;
}
int cpu_down(unsigned int cpu)
{
int err;
struct task_struct *p;
cpumask_t old_allowed, tmp;
if ((err = lock_cpu_hotplug_interruptible()) != 0)
return err;
if (num_online_cpus() == 1) {
err = -EBUSY;
goto out;
}
if (!cpu_online(cpu)) {
err = -EINVAL;
goto out;
}
/*
* Leave a trace in current->flags indicating we are already in
* process of performing CPU hotplug. Callers can check if cpucontrol
* is already acquired by current thread, and if so not cause
* a dead lock by not acquiring the lock
*/
current->flags |= PF_HOTPLUG_CPU;
err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
(void *)(long)cpu);
if (err == NOTIFY_BAD) {
printk("%s: attempt to take down CPU %u failed\n",
__FUNCTION__, cpu);
err = -EINVAL;
goto out;
}
/* Ensure that we are not runnable on dying cpu */
old_allowed = current->cpus_allowed;
tmp = CPU_MASK_ALL;
cpu_clear(cpu, tmp);
set_cpus_allowed(current, tmp);
p = __stop_machine_run(take_cpu_down, NULL, cpu);
if (IS_ERR(p)) {
/* CPU didn't die: tell everyone. Can't complain. */
if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
(void *)(long)cpu) == NOTIFY_BAD)
BUG();
err = PTR_ERR(p);
goto out_allowed;
}
if (cpu_online(cpu))
goto out_thread;
/* Wait for it to sleep (leaving idle task). */
while (!idle_cpu(cpu))
yield();
/* This actually kills the CPU. */
__cpu_die(cpu);
/* Move it here so it can run. */
kthread_bind(p, get_cpu());
put_cpu();
/* CPU is completely dead: tell everyone. Too late to complain. */
if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu)
== NOTIFY_BAD)
BUG();
check_for_tasks(cpu);
out_thread:
err = kthread_stop(p);
out_allowed:
set_cpus_allowed(current, old_allowed);
out:
current->flags &= ~PF_HOTPLUG_CPU;
unlock_cpu_hotplug();
return err;
}
#endif /*CONFIG_HOTPLUG_CPU*/
int __devinit cpu_up(unsigned int cpu)
{
int ret;
void *hcpu = (void *)(long)cpu;
if ((ret = down_interruptible(&cpucontrol)) != 0)
return ret;
if (cpu_online(cpu) || !cpu_present(cpu)) {
ret = -EINVAL;
goto out;
}
/*
* Leave a trace in current->flags indicating we are already in
* process of performing CPU hotplug.
*/
current->flags |= PF_HOTPLUG_CPU;
ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
if (ret == NOTIFY_BAD) {
printk("%s: attempt to bring up CPU %u failed\n",
__FUNCTION__, cpu);
ret = -EINVAL;
goto out_notify;
}
/* Arch-specific enabling code. */
ret = __cpu_up(cpu);
if (ret != 0)
goto out_notify;
if (!cpu_online(cpu))
BUG();
/* Now call notifier in preparation. */
notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
out_notify:
if (ret != 0)
notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
out:
current->flags &= ~PF_HOTPLUG_CPU;
up(&cpucontrol);
return ret;
}