2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* linux/kernel/irq/manage.c
|
|
|
|
*
|
2006-06-29 09:24:50 +00:00
|
|
|
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
|
|
|
|
* Copyright (C) 2005-2006 Thomas Gleixner
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This file contains driver APIs to the irq subsystem.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/irq.h>
|
2009-03-23 17:28:15 +00:00
|
|
|
#include <linux/kthread.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <linux/interrupt.h>
|
2008-04-29 07:59:25 +00:00
|
|
|
#include <linux/slab.h>
|
2009-03-23 17:28:15 +00:00
|
|
|
#include <linux/sched.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include "internals.h"
|
|
|
|
|
2011-02-23 23:52:23 +00:00
|
|
|
#ifdef CONFIG_IRQ_FORCED_THREADING
|
|
|
|
__read_mostly bool force_irqthreads;
|
|
|
|
|
|
|
|
static int __init setup_forced_irqthreads(char *arg)
|
|
|
|
{
|
|
|
|
force_irqthreads = true;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("threadirqs", setup_forced_irqthreads);
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
|
|
|
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
|
2005-11-07 09:01:06 +00:00
|
|
|
* @irq: interrupt number to wait for
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
* This function waits for any pending IRQ handlers for this interrupt
|
|
|
|
* to complete before returning. If you use this function while
|
|
|
|
* holding a resource the IRQ handler may need you will deadlock.
|
|
|
|
*
|
|
|
|
* This function may be called - with care - from IRQ context.
|
|
|
|
*/
|
|
|
|
void synchronize_irq(unsigned int irq)
|
|
|
|
{
|
2008-08-20 03:50:17 +00:00
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
2011-03-28 12:10:52 +00:00
|
|
|
bool inprogress;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-08-20 03:50:14 +00:00
|
|
|
if (!desc)
|
2005-11-03 14:51:18 +00:00
|
|
|
return;
|
|
|
|
|
2007-10-23 03:26:25 +00:00
|
|
|
do {
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait until we're out of the critical section. This might
|
|
|
|
* give the wrong answer due to the lack of memory barriers.
|
|
|
|
*/
|
2011-03-28 12:10:52 +00:00
|
|
|
while (irqd_irq_inprogress(&desc->irq_data))
|
2007-10-23 03:26:25 +00:00
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
/* Ok, that indicated we're done: double-check carefully. */
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
2011-03-28 12:10:52 +00:00
|
|
|
inprogress = irqd_irq_inprogress(&desc->irq_data);
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
2007-10-23 03:26:25 +00:00
|
|
|
|
|
|
|
/* Oops, that failed? */
|
2011-03-28 12:10:52 +00:00
|
|
|
} while (inprogress);
|
2009-03-23 17:28:15 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We made sure that no hardirq handler is running. Now verify
|
|
|
|
* that no threaded handlers are active.
|
|
|
|
*/
|
|
|
|
wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(synchronize_irq);
|
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
cpumask_var_t irq_default_affinity;
|
|
|
|
|
2007-02-16 09:27:25 +00:00
|
|
|
/**
|
|
|
|
* irq_can_set_affinity - Check if the affinity of a given irq can be set
|
|
|
|
* @irq: Interrupt to check
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
int irq_can_set_affinity(unsigned int irq)
|
|
|
|
{
|
2008-08-20 03:50:05 +00:00
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
2007-02-16 09:27:25 +00:00
|
|
|
|
2011-02-10 21:37:41 +00:00
|
|
|
if (!desc || !irqd_can_balance(&desc->irq_data) ||
|
|
|
|
!desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
|
2007-02-16 09:27:25 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2009-07-21 09:09:39 +00:00
|
|
|
/**
|
|
|
|
* irq_set_thread_affinity - Notify irq threads to adjust affinity
|
|
|
|
* @desc: irq descriptor which has affitnity changed
|
|
|
|
*
|
|
|
|
* We just set IRQTF_AFFINITY and delegate the affinity setting
|
|
|
|
* to the interrupt thread itself. We can not call
|
|
|
|
* set_cpus_allowed_ptr() here as we hold desc->lock and this
|
|
|
|
* code can be called from hard interrupt context.
|
|
|
|
*/
|
|
|
|
void irq_set_thread_affinity(struct irq_desc *desc)
|
2009-03-23 17:28:15 +00:00
|
|
|
{
|
|
|
|
struct irqaction *action = desc->action;
|
|
|
|
|
|
|
|
while (action) {
|
|
|
|
if (action->thread)
|
2009-07-21 09:09:39 +00:00
|
|
|
set_bit(IRQTF_AFFINITY, &action->thread_flags);
|
2009-03-23 17:28:15 +00:00
|
|
|
action = action->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-07 15:46:58 +00:00
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
2011-03-28 19:59:37 +00:00
|
|
|
static inline bool irq_can_move_pcntxt(struct irq_data *data)
|
2011-02-07 15:46:58 +00:00
|
|
|
{
|
2011-03-28 19:59:37 +00:00
|
|
|
return irqd_can_move_in_process_context(data);
|
2011-02-07 15:46:58 +00:00
|
|
|
}
|
2011-03-28 19:59:37 +00:00
|
|
|
static inline bool irq_move_pending(struct irq_data *data)
|
2011-02-07 15:46:58 +00:00
|
|
|
{
|
2011-03-28 19:59:37 +00:00
|
|
|
return irqd_is_setaffinity_pending(data);
|
2011-02-07 15:46:58 +00:00
|
|
|
}
|
|
|
|
static inline void
|
|
|
|
irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
|
|
|
|
{
|
|
|
|
cpumask_copy(desc->pending_mask, mask);
|
|
|
|
}
|
|
|
|
static inline void
|
|
|
|
irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
cpumask_copy(mask, desc->pending_mask);
|
|
|
|
}
|
|
|
|
#else
|
2011-03-28 19:59:37 +00:00
|
|
|
static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
|
|
|
|
static inline bool irq_move_pending(struct irq_desc *data) { return false; }
|
2011-02-07 15:46:58 +00:00
|
|
|
static inline void
|
|
|
|
irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
|
|
|
|
static inline void
|
|
|
|
irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
|
|
|
|
#endif
|
|
|
|
|
2011-03-25 19:38:50 +00:00
|
|
|
int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
|
2007-02-16 09:27:25 +00:00
|
|
|
{
|
2011-03-25 19:38:50 +00:00
|
|
|
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
|
|
|
struct irq_desc *desc = irq_data_to_desc(data);
|
2011-02-07 15:46:58 +00:00
|
|
|
int ret = 0;
|
2007-02-16 09:27:25 +00:00
|
|
|
|
2011-03-25 19:38:50 +00:00
|
|
|
if (!chip || !chip->irq_set_affinity)
|
2007-02-16 09:27:25 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2011-03-28 19:59:37 +00:00
|
|
|
if (irq_can_move_pcntxt(data)) {
|
2011-03-25 19:38:50 +00:00
|
|
|
ret = chip->irq_set_affinity(data, mask, false);
|
2011-02-07 15:02:20 +00:00
|
|
|
switch (ret) {
|
|
|
|
case IRQ_SET_MASK_OK:
|
2011-03-25 19:38:50 +00:00
|
|
|
cpumask_copy(data->affinity, mask);
|
2011-02-07 15:02:20 +00:00
|
|
|
case IRQ_SET_MASK_OK_NOCOPY:
|
2009-07-21 09:09:39 +00:00
|
|
|
irq_set_thread_affinity(desc);
|
2011-02-07 15:02:20 +00:00
|
|
|
ret = 0;
|
2009-04-28 00:59:53 +00:00
|
|
|
}
|
2011-02-07 15:46:58 +00:00
|
|
|
} else {
|
2011-03-25 19:38:50 +00:00
|
|
|
irqd_set_move_pending(data);
|
2011-02-07 15:46:58 +00:00
|
|
|
irq_copy_pending(desc, mask);
|
2009-04-28 00:59:53 +00:00
|
|
|
}
|
2011-02-07 15:46:58 +00:00
|
|
|
|
2011-01-19 21:01:44 +00:00
|
|
|
if (desc->affinity_notify) {
|
|
|
|
kref_get(&desc->affinity_notify->kref);
|
|
|
|
schedule_work(&desc->affinity_notify->work);
|
|
|
|
}
|
2011-02-08 16:22:00 +00:00
|
|
|
irq_compat_set_affinity(desc);
|
2011-03-25 19:38:50 +00:00
|
|
|
irqd_set(data, IRQD_AFFINITY_SET);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* irq_set_affinity - Set the irq affinity of a given irq
|
|
|
|
* @irq: Interrupt to set affinity
|
2011-03-18 16:33:56 +00:00
|
|
|
* @mask: cpumask
|
2011-03-25 19:38:50 +00:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
|
ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
2011-02-07 15:46:58 +00:00
|
|
|
return ret;
|
2007-02-16 09:27:25 +00:00
|
|
|
}
|
|
|
|
|
2010-04-30 21:44:50 +00:00
|
|
|
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
2011-02-12 09:37:36 +00:00
|
|
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
2010-04-30 21:44:50 +00:00
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
return -EINVAL;
|
|
|
|
desc->affinity_hint = m;
|
2011-02-12 09:37:36 +00:00
|
|
|
irq_put_desc_unlock(desc, flags);
|
2010-04-30 21:44:50 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
|
|
|
|
|
2011-01-19 21:01:44 +00:00
|
|
|
static void irq_affinity_notify(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct irq_affinity_notify *notify =
|
|
|
|
container_of(work, struct irq_affinity_notify, work);
|
|
|
|
struct irq_desc *desc = irq_to_desc(notify->irq);
|
|
|
|
cpumask_var_t cpumask;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2011-02-07 15:46:58 +00:00
|
|
|
if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
|
2011-01-19 21:01:44 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
2011-03-28 19:59:37 +00:00
|
|
|
if (irq_move_pending(&desc->irq_data))
|
2011-02-07 15:46:58 +00:00
|
|
|
irq_get_pending(cpumask, desc);
|
2011-01-19 21:01:44 +00:00
|
|
|
else
|
2011-01-31 07:57:41 +00:00
|
|
|
cpumask_copy(cpumask, desc->irq_data.affinity);
|
2011-01-19 21:01:44 +00:00
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
|
|
|
|
|
notify->notify(notify, cpumask);
|
|
|
|
|
|
|
|
free_cpumask_var(cpumask);
|
|
|
|
out:
|
|
|
|
kref_put(¬ify->kref, notify->release);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* irq_set_affinity_notifier - control notification of IRQ affinity changes
|
|
|
|
* @irq: Interrupt for which to enable/disable notification
|
|
|
|
* @notify: Context for notification, or %NULL to disable
|
|
|
|
* notification. Function pointers must be initialised;
|
|
|
|
* the other fields will be initialised by this function.
|
|
|
|
*
|
|
|
|
* Must be called in process context. Notification may only be enabled
|
|
|
|
* after the IRQ is allocated and must be disabled before the IRQ is
|
|
|
|
* freed using free_irq().
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
struct irq_affinity_notify *old_notify;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* The release function is promised process context */
|
|
|
|
might_sleep();
|
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Complete initialisation of *notify */
|
|
|
|
if (notify) {
|
|
|
|
notify->irq = irq;
|
|
|
|
kref_init(¬ify->kref);
|
|
|
|
INIT_WORK(¬ify->work, irq_affinity_notify);
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
|
|
old_notify = desc->affinity_notify;
|
|
|
|
desc->affinity_notify = notify;
|
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
|
|
|
|
|
if (old_notify)
|
|
|
|
kref_put(&old_notify->kref, old_notify->release);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
|
|
|
|
|
2008-05-29 18:02:52 +00:00
|
|
|
#ifndef CONFIG_AUTO_IRQ_AFFINITY
|
|
|
|
/*
|
|
|
|
* Generic version of the affinity autoselector.
|
|
|
|
*/
|
2011-02-07 15:02:20 +00:00
|
|
|
static int
|
|
|
|
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
|
2008-05-29 18:02:52 +00:00
|
|
|
{
|
2011-02-10 11:20:23 +00:00
|
|
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
2011-02-07 16:05:08 +00:00
|
|
|
struct cpumask *set = irq_default_affinity;
|
2011-02-07 15:02:20 +00:00
|
|
|
int ret;
|
2011-02-07 16:05:08 +00:00
|
|
|
|
2011-02-07 16:30:50 +00:00
|
|
|
/* Excludes PER_CPU and NO_BALANCE interrupts */
|
2008-05-29 18:02:52 +00:00
|
|
|
if (!irq_can_set_affinity(irq))
|
|
|
|
return 0;
|
|
|
|
|
2008-11-07 12:18:30 +00:00
|
|
|
/*
|
|
|
|
* Preserve an userspace affinity setup, but make sure that
|
|
|
|
* one of the targets is online.
|
|
|
|
*/
|
2011-02-08 16:22:00 +00:00
|
|
|
if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
|
2011-02-07 16:05:08 +00:00
|
|
|
if (cpumask_intersects(desc->irq_data.affinity,
|
|
|
|
cpu_online_mask))
|
|
|
|
set = desc->irq_data.affinity;
|
2011-02-08 16:22:00 +00:00
|
|
|
else {
|
|
|
|
irq_compat_clr_affinity(desc);
|
|
|
|
irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
|
|
|
|
}
|
2008-11-07 12:18:30 +00:00
|
|
|
}
|
2008-05-29 18:02:52 +00:00
|
|
|
|
2011-02-07 15:02:20 +00:00
|
|
|
cpumask_and(mask, cpu_online_mask, set);
|
|
|
|
ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
|
|
|
|
switch (ret) {
|
|
|
|
case IRQ_SET_MASK_OK:
|
|
|
|
cpumask_copy(desc->irq_data.affinity, mask);
|
|
|
|
case IRQ_SET_MASK_OK_NOCOPY:
|
|
|
|
irq_set_thread_affinity(desc);
|
|
|
|
}
|
2008-05-29 18:02:52 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2008-11-07 12:18:30 +00:00
|
|
|
#else
|
2011-02-07 15:02:20 +00:00
|
|
|
static inline int
|
|
|
|
setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
|
2008-11-07 12:18:30 +00:00
|
|
|
{
|
|
|
|
return irq_select_affinity(irq);
|
|
|
|
}
|
2008-05-29 18:02:52 +00:00
|
|
|
#endif
|
|
|
|
|
2008-11-07 12:18:30 +00:00
|
|
|
/*
|
|
|
|
* Called when affinity is set via /proc/irq
|
|
|
|
*/
|
2011-02-07 15:02:20 +00:00
|
|
|
int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
|
2008-11-07 12:18:30 +00:00
|
|
|
{
|
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
2011-02-07 15:02:20 +00:00
|
|
|
ret = setup_affinity(irq, desc, mask);
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
2008-11-07 12:18:30 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
2011-02-07 15:02:20 +00:00
|
|
|
static inline int
|
|
|
|
setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
|
2008-11-07 12:18:30 +00:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
#endif
|
|
|
|
|
2009-03-16 21:33:49 +00:00
|
|
|
void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
|
|
|
|
{
|
|
|
|
if (suspend) {
|
2010-07-29 10:16:32 +00:00
|
|
|
if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
|
2009-03-16 21:33:49 +00:00
|
|
|
return;
|
2011-02-08 11:44:58 +00:00
|
|
|
desc->istate |= IRQS_SUSPENDED;
|
2009-03-16 21:33:49 +00:00
|
|
|
}
|
|
|
|
|
2011-02-04 09:17:52 +00:00
|
|
|
if (!desc->depth++)
|
2011-02-03 11:27:44 +00:00
|
|
|
irq_disable(desc);
|
2009-03-16 21:33:49 +00:00
|
|
|
}
|
|
|
|
|
2011-02-12 09:37:36 +00:00
|
|
|
static int __disable_irq_nosync(unsigned int irq)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
return -EINVAL;
|
|
|
|
__disable_irq(desc, irq, false);
|
|
|
|
irq_put_desc_busunlock(desc, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
|
|
|
* disable_irq_nosync - disable an irq without waiting
|
|
|
|
* @irq: Interrupt to disable
|
|
|
|
*
|
|
|
|
* Disable the selected interrupt line. Disables and Enables are
|
|
|
|
* nested.
|
|
|
|
* Unlike disable_irq(), this function does not ensure existing
|
|
|
|
* instances of the IRQ handler have completed before returning.
|
|
|
|
*
|
|
|
|
* This function may be called from IRQ context.
|
|
|
|
*/
|
|
|
|
void disable_irq_nosync(unsigned int irq)
|
|
|
|
{
|
2011-02-12 09:37:36 +00:00
|
|
|
__disable_irq_nosync(irq);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(disable_irq_nosync);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* disable_irq - disable an irq and wait for completion
|
|
|
|
* @irq: Interrupt to disable
|
|
|
|
*
|
|
|
|
* Disable the selected interrupt line. Enables and Disables are
|
|
|
|
* nested.
|
|
|
|
* This function waits for any pending IRQ handlers for this interrupt
|
|
|
|
* to complete before returning. If you use this function while
|
|
|
|
* holding a resource the IRQ handler may need you will deadlock.
|
|
|
|
*
|
|
|
|
* This function may be called - with care - from IRQ context.
|
|
|
|
*/
|
|
|
|
void disable_irq(unsigned int irq)
|
|
|
|
{
|
2011-02-12 09:37:36 +00:00
|
|
|
if (!__disable_irq_nosync(irq))
|
2005-04-16 22:20:36 +00:00
|
|
|
synchronize_irq(irq);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(disable_irq);
|
|
|
|
|
2009-03-16 21:33:49 +00:00
|
|
|
void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
2008-04-28 15:01:56 +00:00
|
|
|
{
|
2011-02-04 12:19:20 +00:00
|
|
|
if (resume) {
|
2011-02-08 11:44:58 +00:00
|
|
|
if (!(desc->istate & IRQS_SUSPENDED)) {
|
2011-02-04 12:19:20 +00:00
|
|
|
if (!desc->action)
|
|
|
|
return;
|
|
|
|
if (!(desc->action->flags & IRQF_FORCE_RESUME))
|
|
|
|
return;
|
|
|
|
/* Pretend that it got disabled ! */
|
|
|
|
desc->depth++;
|
|
|
|
}
|
2011-02-08 11:44:58 +00:00
|
|
|
desc->istate &= ~IRQS_SUSPENDED;
|
2011-02-04 12:19:20 +00:00
|
|
|
}
|
2009-03-16 21:33:49 +00:00
|
|
|
|
2008-04-28 15:01:56 +00:00
|
|
|
switch (desc->depth) {
|
|
|
|
case 0:
|
2009-03-16 21:33:49 +00:00
|
|
|
err_out:
|
2008-07-26 02:45:36 +00:00
|
|
|
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
|
2008-04-28 15:01:56 +00:00
|
|
|
break;
|
|
|
|
case 1: {
|
2011-02-08 11:44:58 +00:00
|
|
|
if (desc->istate & IRQS_SUSPENDED)
|
2009-03-16 21:33:49 +00:00
|
|
|
goto err_out;
|
2008-04-28 15:01:56 +00:00
|
|
|
/* Prevent probing on this irq: */
|
2011-02-09 13:44:17 +00:00
|
|
|
irq_settings_set_noprobe(desc);
|
2011-02-04 09:17:52 +00:00
|
|
|
irq_enable(desc);
|
2008-04-28 15:01:56 +00:00
|
|
|
check_irq_resend(desc, irq);
|
|
|
|
/* fall-through */
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
desc->depth--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/**
|
|
|
|
* enable_irq - enable handling of an irq
|
|
|
|
* @irq: Interrupt to enable
|
|
|
|
*
|
|
|
|
* Undoes the effect of one call to disable_irq(). If this
|
|
|
|
* matches the last disable, processing of interrupts on this
|
|
|
|
* IRQ line is re-enabled.
|
|
|
|
*
|
2009-08-13 10:17:48 +00:00
|
|
|
* This function may be called from IRQ context only when
|
2010-10-01 10:58:38 +00:00
|
|
|
* desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
|
|
|
void enable_irq(unsigned int irq)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
2011-02-12 09:37:36 +00:00
|
|
|
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-08-20 03:50:14 +00:00
|
|
|
if (!desc)
|
2005-11-03 14:51:18 +00:00
|
|
|
return;
|
2011-02-03 12:23:54 +00:00
|
|
|
if (WARN(!desc->irq_data.chip,
|
|
|
|
KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
|
2011-02-12 09:37:36 +00:00
|
|
|
goto out;
|
2010-10-22 12:47:57 +00:00
|
|
|
|
2009-03-16 21:33:49 +00:00
|
|
|
__enable_irq(desc, irq, false);
|
2011-02-12 09:37:36 +00:00
|
|
|
out:
|
|
|
|
irq_put_desc_busunlock(desc, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(enable_irq);
|
|
|
|
|
2008-10-01 21:46:18 +00:00
|
|
|
static int set_irq_wake_real(unsigned int irq, unsigned int on)
|
2008-07-23 12:42:25 +00:00
|
|
|
{
|
2008-08-20 03:50:05 +00:00
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
2008-07-23 12:42:25 +00:00
|
|
|
int ret = -ENXIO;
|
|
|
|
|
2010-09-27 12:45:50 +00:00
|
|
|
if (desc->irq_data.chip->irq_set_wake)
|
|
|
|
ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
|
2008-07-23 12:42:25 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-06-29 09:24:55 +00:00
|
|
|
/**
|
2011-02-10 10:36:33 +00:00
|
|
|
* irq_set_irq_wake - control irq power management wakeup
|
2006-06-29 09:24:55 +00:00
|
|
|
* @irq: interrupt to control
|
|
|
|
* @on: enable/disable power management wakeup
|
|
|
|
*
|
2006-07-30 10:03:08 +00:00
|
|
|
* Enable/disable power management wakeup mode, which is
|
|
|
|
* disabled by default. Enables and disables must match,
|
|
|
|
* just as they match for non-wakeup mode support.
|
|
|
|
*
|
|
|
|
* Wakeup mode lets this IRQ wake the system from sleep
|
|
|
|
* states like "suspend to RAM".
|
2006-06-29 09:24:55 +00:00
|
|
|
*/
|
2011-02-10 10:36:33 +00:00
|
|
|
int irq_set_irq_wake(unsigned int irq, unsigned int on)
|
2006-06-29 09:24:55 +00:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
2011-02-12 09:37:36 +00:00
|
|
|
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
|
2008-07-23 12:42:25 +00:00
|
|
|
int ret = 0;
|
2006-06-29 09:24:55 +00:00
|
|
|
|
2006-07-30 10:03:08 +00:00
|
|
|
/* wakeup-capable irqs can be shared between drivers that
|
|
|
|
* don't need to have the same sleep mode behaviors.
|
|
|
|
*/
|
|
|
|
if (on) {
|
2008-07-23 12:42:25 +00:00
|
|
|
if (desc->wake_depth++ == 0) {
|
|
|
|
ret = set_irq_wake_real(irq, on);
|
|
|
|
if (ret)
|
|
|
|
desc->wake_depth = 0;
|
|
|
|
else
|
2011-02-10 18:46:26 +00:00
|
|
|
irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
|
2008-07-23 12:42:25 +00:00
|
|
|
}
|
2006-07-30 10:03:08 +00:00
|
|
|
} else {
|
|
|
|
if (desc->wake_depth == 0) {
|
2008-07-25 08:45:54 +00:00
|
|
|
WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
|
2008-07-23 12:42:25 +00:00
|
|
|
} else if (--desc->wake_depth == 0) {
|
|
|
|
ret = set_irq_wake_real(irq, on);
|
|
|
|
if (ret)
|
|
|
|
desc->wake_depth = 1;
|
|
|
|
else
|
2011-02-10 18:46:26 +00:00
|
|
|
irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
|
2008-07-23 12:42:25 +00:00
|
|
|
}
|
2006-07-30 10:03:08 +00:00
|
|
|
}
|
2011-02-12 09:37:36 +00:00
|
|
|
irq_put_desc_busunlock(desc, flags);
|
2006-06-29 09:24:55 +00:00
|
|
|
return ret;
|
|
|
|
}
|
2011-02-10 10:36:33 +00:00
|
|
|
EXPORT_SYMBOL(irq_set_irq_wake);
|
2006-06-29 09:24:55 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Internal function that tells the architecture code whether a
|
|
|
|
* particular irq has been exclusively allocated or is available
|
|
|
|
* for driver use.
|
|
|
|
*/
|
|
|
|
int can_request_irq(unsigned int irq, unsigned long irqflags)
|
|
|
|
{
|
2010-03-23 21:40:53 +00:00
|
|
|
unsigned long flags;
|
2011-02-12 09:37:36 +00:00
|
|
|
struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
|
|
|
|
int canrequest = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-08-20 03:50:14 +00:00
|
|
|
if (!desc)
|
|
|
|
return 0;
|
|
|
|
|
2011-02-12 09:37:36 +00:00
|
|
|
if (irq_settings_can_request(desc)) {
|
|
|
|
if (desc->action)
|
|
|
|
if (irqflags & desc->action->flags & IRQF_SHARED)
|
|
|
|
canrequest =1;
|
|
|
|
}
|
|
|
|
irq_put_desc_unlock(desc, flags);
|
|
|
|
return canrequest;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-10-01 21:46:18 +00:00
|
|
|
int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
2010-09-27 12:45:47 +00:00
|
|
|
unsigned long flags)
|
2008-07-24 04:28:54 +00:00
|
|
|
{
|
2010-10-01 10:58:38 +00:00
|
|
|
struct irq_chip *chip = desc->irq_data.chip;
|
2011-02-10 12:16:14 +00:00
|
|
|
int ret, unmask = 0;
|
2008-07-24 04:28:54 +00:00
|
|
|
|
2010-09-27 12:45:47 +00:00
|
|
|
if (!chip || !chip->irq_set_type) {
|
2008-07-24 04:28:54 +00:00
|
|
|
/*
|
|
|
|
* IRQF_TRIGGER_* but the PIC does not support multiple
|
|
|
|
* flow-types?
|
|
|
|
*/
|
2008-11-13 10:37:41 +00:00
|
|
|
pr_debug("No set_type function for IRQ %d (%s)\n", irq,
|
2008-07-24 04:28:54 +00:00
|
|
|
chip ? (chip->name ? : "unknown") : "unknown");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-02-08 16:28:12 +00:00
|
|
|
flags &= IRQ_TYPE_SENSE_MASK;
|
2011-02-10 12:16:14 +00:00
|
|
|
|
|
|
|
if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
|
2011-03-28 12:10:52 +00:00
|
|
|
if (!irqd_irq_masked(&desc->irq_data))
|
2011-02-10 12:16:14 +00:00
|
|
|
mask_irq(desc);
|
2011-03-28 12:10:52 +00:00
|
|
|
if (!irqd_irq_disabled(&desc->irq_data))
|
2011-02-10 12:16:14 +00:00
|
|
|
unmask = 1;
|
|
|
|
}
|
|
|
|
|
2008-12-01 22:31:38 +00:00
|
|
|
/* caller masked out all except trigger mode flags */
|
2010-09-27 12:45:47 +00:00
|
|
|
ret = chip->irq_set_type(&desc->irq_data, flags);
|
2008-07-24 04:28:54 +00:00
|
|
|
|
2011-02-08 16:28:12 +00:00
|
|
|
switch (ret) {
|
|
|
|
case IRQ_SET_MASK_OK:
|
|
|
|
irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
|
|
|
|
irqd_set(&desc->irq_data, flags);
|
|
|
|
|
|
|
|
case IRQ_SET_MASK_OK_NOCOPY:
|
|
|
|
flags = irqd_get_trigger_type(&desc->irq_data);
|
|
|
|
irq_settings_set_trigger_mask(desc, flags);
|
|
|
|
irqd_clear(&desc->irq_data, IRQD_LEVEL);
|
|
|
|
irq_settings_clr_level(desc);
|
|
|
|
if (flags & IRQ_TYPE_LEVEL_MASK) {
|
|
|
|
irq_settings_set_level(desc);
|
|
|
|
irqd_set(&desc->irq_data, IRQD_LEVEL);
|
|
|
|
}
|
2010-06-07 15:53:51 +00:00
|
|
|
|
2010-10-01 10:58:38 +00:00
|
|
|
if (chip != desc->irq_data.chip)
|
|
|
|
irq_chip_set_defaults(desc->irq_data.chip);
|
2011-02-10 12:16:14 +00:00
|
|
|
ret = 0;
|
2011-02-21 13:19:42 +00:00
|
|
|
break;
|
2011-02-08 16:28:12 +00:00
|
|
|
default:
|
|
|
|
pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
|
|
|
|
flags, irq, chip->irq_set_type);
|
2008-10-01 21:46:18 +00:00
|
|
|
}
|
2011-02-10 12:16:14 +00:00
|
|
|
if (unmask)
|
|
|
|
unmask_irq(desc);
|
2008-07-24 04:28:54 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-08-13 10:17:22 +00:00
|
|
|
/*
|
|
|
|
* Default primary interrupt handler for threaded interrupts. Is
|
|
|
|
* assigned as primary handler when request_threaded_irq is called
|
|
|
|
* with handler == NULL. Useful for oneshot interrupts.
|
|
|
|
*/
|
|
|
|
static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
return IRQ_WAKE_THREAD;
|
|
|
|
}
|
|
|
|
|
2009-08-13 11:21:38 +00:00
|
|
|
/*
|
|
|
|
* Primary handler for nested threaded interrupts. Should never be
|
|
|
|
* called.
|
|
|
|
*/
|
|
|
|
static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
WARN(1, "Primary handler called for nested irq %d\n", irq);
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
static int irq_wait_for_interrupt(struct irqaction *action)
|
|
|
|
{
|
|
|
|
while (!kthread_should_stop()) {
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
2009-03-24 10:46:22 +00:00
|
|
|
|
|
|
|
if (test_and_clear_bit(IRQTF_RUNTHREAD,
|
|
|
|
&action->thread_flags)) {
|
2009-03-23 17:28:15 +00:00
|
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
return 0;
|
2009-03-24 10:46:22 +00:00
|
|
|
}
|
|
|
|
schedule();
|
2009-03-23 17:28:15 +00:00
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2009-08-13 10:17:22 +00:00
|
|
|
/*
|
|
|
|
* Oneshot interrupts keep the irq line masked until the threaded
|
|
|
|
* handler finished. unmask if the interrupt has not been disabled and
|
|
|
|
* is marked MASKED.
|
|
|
|
*/
|
2011-02-23 23:52:13 +00:00
|
|
|
static void irq_finalize_oneshot(struct irq_desc *desc,
|
|
|
|
struct irqaction *action, bool force)
|
2009-08-13 10:17:22 +00:00
|
|
|
{
|
2011-02-23 23:52:13 +00:00
|
|
|
if (!(desc->istate & IRQS_ONESHOT))
|
|
|
|
return;
|
2010-03-09 18:45:54 +00:00
|
|
|
again:
|
2010-09-27 12:44:35 +00:00
|
|
|
chip_bus_lock(desc);
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_lock_irq(&desc->lock);
|
2010-03-09 18:45:54 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Implausible though it may be we need to protect us against
|
|
|
|
* the following scenario:
|
|
|
|
*
|
|
|
|
* The thread is faster done than the hard interrupt handler
|
|
|
|
* on the other CPU. If we unmask the irq line then the
|
|
|
|
* interrupt can come in again and masks the line, leaves due
|
2011-02-07 20:48:49 +00:00
|
|
|
* to IRQS_INPROGRESS and the irq line is masked forever.
|
2011-02-23 23:52:13 +00:00
|
|
|
*
|
|
|
|
* This also serializes the state of shared oneshot handlers
|
|
|
|
* versus "desc->threads_onehsot |= action->thread_mask;" in
|
|
|
|
* irq_wake_thread(). See the comment there which explains the
|
|
|
|
* serialization.
|
2010-03-09 18:45:54 +00:00
|
|
|
*/
|
2011-03-28 12:10:52 +00:00
|
|
|
if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
|
2010-03-09 18:45:54 +00:00
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
2010-09-27 12:44:35 +00:00
|
|
|
chip_bus_sync_unlock(desc);
|
2010-03-09 18:45:54 +00:00
|
|
|
cpu_relax();
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
2011-02-23 23:52:13 +00:00
|
|
|
/*
|
|
|
|
* Now check again, whether the thread should run. Otherwise
|
|
|
|
* we would clear the threads_oneshot bit of this thread which
|
|
|
|
* was just set.
|
|
|
|
*/
|
|
|
|
if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
|
|
|
|
goto out_unlock;
|
|
|
|
|
|
|
|
desc->threads_oneshot &= ~action->thread_mask;
|
|
|
|
|
2011-03-28 12:10:52 +00:00
|
|
|
if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
|
|
|
|
irqd_irq_masked(&desc->irq_data))
|
|
|
|
unmask_irq(desc);
|
|
|
|
|
2011-02-23 23:52:13 +00:00
|
|
|
out_unlock:
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
2010-09-27 12:44:35 +00:00
|
|
|
chip_bus_sync_unlock(desc);
|
2009-08-13 10:17:22 +00:00
|
|
|
}
|
|
|
|
|
2009-07-22 20:22:32 +00:00
|
|
|
#ifdef CONFIG_SMP
|
2009-07-21 09:09:39 +00:00
|
|
|
/*
|
2011-02-10 12:16:14 +00:00
|
|
|
* Check whether we need to chasnge the affinity of the interrupt thread.
|
2009-07-21 09:09:39 +00:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
|
|
|
|
{
|
|
|
|
cpumask_var_t mask;
|
|
|
|
|
|
|
|
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In case we are out of memory we set IRQTF_AFFINITY again and
|
|
|
|
* try again next time
|
|
|
|
*/
|
|
|
|
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
|
|
|
|
set_bit(IRQTF_AFFINITY, &action->thread_flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_lock_irq(&desc->lock);
|
2010-10-01 10:58:38 +00:00
|
|
|
cpumask_copy(mask, desc->irq_data.affinity);
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
2009-07-21 09:09:39 +00:00
|
|
|
|
|
|
|
set_cpus_allowed_ptr(current, mask);
|
|
|
|
free_cpumask_var(mask);
|
|
|
|
}
|
2009-07-22 20:22:32 +00:00
|
|
|
#else
|
|
|
|
static inline void
|
|
|
|
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
|
|
|
|
#endif
|
2009-07-21 09:09:39 +00:00
|
|
|
|
2011-02-23 23:52:23 +00:00
|
|
|
/*
|
|
|
|
* Interrupts which are not explicitely requested as threaded
|
|
|
|
* interrupts rely on the implicit bh/preempt disable of the hard irq
|
|
|
|
* context. So we need to disable bh here to avoid deadlocks and other
|
|
|
|
* side effects.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
|
|
|
|
{
|
|
|
|
local_bh_disable();
|
|
|
|
action->thread_fn(action->irq, action->dev_id);
|
|
|
|
irq_finalize_oneshot(desc, action, false);
|
|
|
|
local_bh_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupts explicitely requested as threaded interupts want to be
|
|
|
|
* preemtible - many of them need to sleep and wait for slow busses to
|
|
|
|
* complete.
|
|
|
|
*/
|
|
|
|
static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
|
|
|
|
{
|
|
|
|
action->thread_fn(action->irq, action->dev_id);
|
|
|
|
irq_finalize_oneshot(desc, action, false);
|
|
|
|
}
|
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
/*
|
|
|
|
* Interrupt handler thread
|
|
|
|
*/
|
|
|
|
static int irq_thread(void *data)
|
|
|
|
{
|
2011-01-07 12:41:40 +00:00
|
|
|
static const struct sched_param param = {
|
2010-10-20 23:01:12 +00:00
|
|
|
.sched_priority = MAX_USER_RT_PRIO/2,
|
|
|
|
};
|
2009-03-23 17:28:15 +00:00
|
|
|
struct irqaction *action = data;
|
|
|
|
struct irq_desc *desc = irq_to_desc(action->irq);
|
2011-02-23 23:52:23 +00:00
|
|
|
void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
|
2011-02-23 23:52:13 +00:00
|
|
|
int wake;
|
2009-03-23 17:28:15 +00:00
|
|
|
|
2011-02-23 23:52:23 +00:00
|
|
|
if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
|
|
|
|
&action->thread_flags))
|
|
|
|
handler_fn = irq_forced_thread_fn;
|
|
|
|
else
|
|
|
|
handler_fn = irq_thread_fn;
|
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
|
|
current->irqaction = action;
|
|
|
|
|
|
|
|
while (!irq_wait_for_interrupt(action)) {
|
|
|
|
|
2009-07-21 09:09:39 +00:00
|
|
|
irq_thread_check_affinity(desc, action);
|
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
atomic_inc(&desc->threads_active);
|
|
|
|
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_lock_irq(&desc->lock);
|
2011-03-28 12:10:52 +00:00
|
|
|
if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
|
2009-03-23 17:28:15 +00:00
|
|
|
/*
|
|
|
|
* CHECKME: We might need a dedicated
|
|
|
|
* IRQ_THREAD_PENDING flag here, which
|
|
|
|
* retriggers the thread in check_irq_resend()
|
2011-02-08 11:17:57 +00:00
|
|
|
* but AFAICT IRQS_PENDING should be fine as it
|
2009-03-23 17:28:15 +00:00
|
|
|
* retriggers the interrupt itself --- tglx
|
|
|
|
*/
|
2011-02-08 11:17:57 +00:00
|
|
|
irq_compat_set_pending(desc);
|
|
|
|
desc->istate |= IRQS_PENDING;
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
2009-03-23 17:28:15 +00:00
|
|
|
} else {
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
2011-02-23 23:52:23 +00:00
|
|
|
handler_fn(desc, action);
|
2009-03-23 17:28:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
wake = atomic_dec_and_test(&desc->threads_active);
|
|
|
|
|
|
|
|
if (wake && waitqueue_active(&desc->wait_for_threads))
|
|
|
|
wake_up(&desc->wait_for_threads);
|
|
|
|
}
|
|
|
|
|
2011-02-23 23:52:13 +00:00
|
|
|
/* Prevent a stale desc->threads_oneshot */
|
|
|
|
irq_finalize_oneshot(desc, action, true);
|
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
/*
|
|
|
|
* Clear irqaction. Otherwise exit_irq_thread() would make
|
|
|
|
* fuzz about an active irq thread going into nirvana.
|
|
|
|
*/
|
|
|
|
current->irqaction = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called from do_exit()
|
|
|
|
*/
|
|
|
|
void exit_irq_thread(void)
|
|
|
|
{
|
|
|
|
struct task_struct *tsk = current;
|
2011-02-23 23:52:13 +00:00
|
|
|
struct irq_desc *desc;
|
2009-03-23 17:28:15 +00:00
|
|
|
|
|
|
|
if (!tsk->irqaction)
|
|
|
|
return;
|
|
|
|
|
|
|
|
printk(KERN_ERR
|
|
|
|
"exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
|
|
|
|
tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
|
|
|
|
|
2011-02-23 23:52:13 +00:00
|
|
|
desc = irq_to_desc(tsk->irqaction->irq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prevent a stale desc->threads_oneshot. Must be called
|
|
|
|
* before setting the IRQTF_DIED flag.
|
|
|
|
*/
|
|
|
|
irq_finalize_oneshot(desc, tsk->irqaction, true);
|
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
/*
|
|
|
|
* Set the THREAD DIED flag to prevent further wakeups of the
|
|
|
|
* soon to be gone threaded handler.
|
|
|
|
*/
|
|
|
|
set_bit(IRQTF_DIED, &tsk->irqaction->flags);
|
|
|
|
}
|
|
|
|
|
2011-02-23 23:52:23 +00:00
|
|
|
static void irq_setup_forced_threading(struct irqaction *new)
|
|
|
|
{
|
|
|
|
if (!force_irqthreads)
|
|
|
|
return;
|
|
|
|
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
|
|
|
|
return;
|
|
|
|
|
|
|
|
new->flags |= IRQF_ONESHOT;
|
|
|
|
|
|
|
|
if (!new->thread_fn) {
|
|
|
|
set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
|
|
|
|
new->thread_fn = new->handler;
|
|
|
|
new->handler = irq_default_primary_handler;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Internal function to register an irqaction - typically used to
|
|
|
|
* allocate special interrupts that are part of the architecture.
|
|
|
|
*/
|
2008-10-16 07:55:00 +00:00
|
|
|
static int
|
2009-02-15 10:21:37 +00:00
|
|
|
__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-02-17 19:43:37 +00:00
|
|
|
struct irqaction *old, **old_ptr;
|
2006-11-14 10:03:23 +00:00
|
|
|
const char *old_name = NULL;
|
2011-02-23 23:52:13 +00:00
|
|
|
unsigned long flags, thread_mask = 0;
|
2011-02-07 15:02:20 +00:00
|
|
|
int ret, nested, shared = 0;
|
|
|
|
cpumask_var_t mask;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-08-20 03:50:14 +00:00
|
|
|
if (!desc)
|
2005-11-03 14:51:18 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2010-10-01 10:58:38 +00:00
|
|
|
if (desc->irq_data.chip == &no_irq_chip)
|
2005-04-16 22:20:36 +00:00
|
|
|
return -ENOSYS;
|
|
|
|
/*
|
|
|
|
* Some drivers like serial.c use request_irq() heavily,
|
|
|
|
* so we have to be careful not to interfere with a
|
|
|
|
* running system.
|
|
|
|
*/
|
2006-07-02 02:29:31 +00:00
|
|
|
if (new->flags & IRQF_SAMPLE_RANDOM) {
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* This function might sleep, we want to call it first,
|
|
|
|
* outside of the atomic block.
|
|
|
|
* Yes, this might clear the entropy pool if the wrong
|
|
|
|
* driver is attempted to be loaded, without actually
|
|
|
|
* installing a new handler, but is this really a problem,
|
|
|
|
* only the sysadmin is able to do this.
|
|
|
|
*/
|
|
|
|
rand_initialize_irq(irq);
|
|
|
|
}
|
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
/*
|
2009-08-13 11:21:38 +00:00
|
|
|
* Check whether the interrupt nests into another interrupt
|
|
|
|
* thread.
|
|
|
|
*/
|
2011-02-09 13:44:17 +00:00
|
|
|
nested = irq_settings_is_nested_thread(desc);
|
2009-08-13 11:21:38 +00:00
|
|
|
if (nested) {
|
|
|
|
if (!new->thread_fn)
|
|
|
|
return -EINVAL;
|
|
|
|
/*
|
|
|
|
* Replace the primary handler which was provided from
|
|
|
|
* the driver for non nested interrupt handling by the
|
|
|
|
* dummy function which warns when called.
|
|
|
|
*/
|
|
|
|
new->handler = irq_nested_primary_handler;
|
2011-02-23 23:52:23 +00:00
|
|
|
} else {
|
|
|
|
irq_setup_forced_threading(new);
|
2009-08-13 11:21:38 +00:00
|
|
|
}
|
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
/*
|
2009-08-13 11:21:38 +00:00
|
|
|
* Create a handler thread when a thread function is supplied
|
|
|
|
* and the interrupt does not nest into another interrupt
|
|
|
|
* thread.
|
2009-03-23 17:28:15 +00:00
|
|
|
*/
|
2009-08-13 11:21:38 +00:00
|
|
|
if (new->thread_fn && !nested) {
|
2009-03-23 17:28:15 +00:00
|
|
|
struct task_struct *t;
|
|
|
|
|
|
|
|
t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
|
|
|
|
new->name);
|
|
|
|
if (IS_ERR(t))
|
|
|
|
return PTR_ERR(t);
|
|
|
|
/*
|
|
|
|
* We keep the reference to the task struct even if
|
|
|
|
* the thread dies to avoid that the interrupt code
|
|
|
|
* references an already freed task_struct.
|
|
|
|
*/
|
|
|
|
get_task_struct(t);
|
|
|
|
new->thread = t;
|
|
|
|
}
|
|
|
|
|
2011-02-07 15:02:20 +00:00
|
|
|
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto out_thread;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* The following block of code has to be executed atomically
|
|
|
|
*/
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
2009-02-17 19:43:37 +00:00
|
|
|
old_ptr = &desc->action;
|
|
|
|
old = *old_ptr;
|
2006-06-29 09:24:40 +00:00
|
|
|
if (old) {
|
2006-06-29 09:24:56 +00:00
|
|
|
/*
|
|
|
|
* Can't share interrupts unless both agree to and are
|
|
|
|
* the same type (level, edge, polarity). So both flag
|
2006-07-02 02:29:31 +00:00
|
|
|
* fields must have IRQF_SHARED set and the bits which
|
2011-02-23 23:52:16 +00:00
|
|
|
* set the trigger type must match. Also all must
|
|
|
|
* agree on ONESHOT.
|
2006-06-29 09:24:56 +00:00
|
|
|
*/
|
2006-07-02 02:29:31 +00:00
|
|
|
if (!((old->flags & new->flags) & IRQF_SHARED) ||
|
2011-02-23 23:52:16 +00:00
|
|
|
((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
|
|
|
|
((old->flags ^ new->flags) & IRQF_ONESHOT)) {
|
2006-11-14 10:03:23 +00:00
|
|
|
old_name = old->name;
|
2006-03-25 11:08:23 +00:00
|
|
|
goto mismatch;
|
2006-11-14 10:03:23 +00:00
|
|
|
}
|
2006-03-25 11:08:23 +00:00
|
|
|
|
|
|
|
/* All handlers must agree on per-cpuness */
|
2006-07-02 02:29:31 +00:00
|
|
|
if ((old->flags & IRQF_PERCPU) !=
|
|
|
|
(new->flags & IRQF_PERCPU))
|
2006-03-25 11:08:23 +00:00
|
|
|
goto mismatch;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* add new interrupt at end of irq queue */
|
|
|
|
do {
|
2011-02-23 23:52:13 +00:00
|
|
|
thread_mask |= old->thread_mask;
|
2009-02-17 19:43:37 +00:00
|
|
|
old_ptr = &old->next;
|
|
|
|
old = *old_ptr;
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (old);
|
|
|
|
shared = 1;
|
|
|
|
}
|
|
|
|
|
2011-02-23 23:52:13 +00:00
|
|
|
/*
|
|
|
|
* Setup the thread mask for this irqaction. Unlikely to have
|
|
|
|
* 32 resp 64 irqs sharing one line, but who knows.
|
|
|
|
*/
|
|
|
|
if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out_mask;
|
|
|
|
}
|
|
|
|
new->thread_mask = 1 << ffz(thread_mask);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!shared) {
|
2010-10-01 10:58:38 +00:00
|
|
|
irq_chip_set_defaults(desc->irq_data.chip);
|
2006-06-29 09:24:56 +00:00
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
init_waitqueue_head(&desc->wait_for_threads);
|
|
|
|
|
2006-06-29 09:24:56 +00:00
|
|
|
/* Setup the type (level, edge polarity) if configured: */
|
2006-07-02 02:29:31 +00:00
|
|
|
if (new->flags & IRQF_TRIGGER_MASK) {
|
2008-12-01 22:31:38 +00:00
|
|
|
ret = __irq_set_trigger(desc, irq,
|
|
|
|
new->flags & IRQF_TRIGGER_MASK);
|
2008-07-24 04:28:54 +00:00
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
if (ret)
|
2011-02-07 15:02:20 +00:00
|
|
|
goto out_mask;
|
2011-02-14 19:16:43 +00:00
|
|
|
}
|
2006-06-29 09:24:51 +00:00
|
|
|
|
2011-02-07 20:48:49 +00:00
|
|
|
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
|
2011-03-28 12:10:52 +00:00
|
|
|
IRQS_ONESHOT | IRQS_WAITING);
|
|
|
|
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
|
2006-06-29 09:24:50 +00:00
|
|
|
|
2011-02-08 16:11:03 +00:00
|
|
|
if (new->flags & IRQF_PERCPU) {
|
|
|
|
irqd_set(&desc->irq_data, IRQD_PER_CPU);
|
|
|
|
irq_settings_set_per_cpu(desc);
|
|
|
|
}
|
2011-02-08 14:40:05 +00:00
|
|
|
|
2009-08-13 10:17:22 +00:00
|
|
|
if (new->flags & IRQF_ONESHOT)
|
2011-02-07 20:02:10 +00:00
|
|
|
desc->istate |= IRQS_ONESHOT;
|
2009-08-13 10:17:22 +00:00
|
|
|
|
2011-02-09 13:44:17 +00:00
|
|
|
if (irq_settings_can_autoenable(desc))
|
2011-02-02 21:41:14 +00:00
|
|
|
irq_startup(desc);
|
|
|
|
else
|
2006-06-29 09:24:56 +00:00
|
|
|
/* Undo nested disables: */
|
|
|
|
desc->depth = 1;
|
2008-05-29 18:02:52 +00:00
|
|
|
|
2008-11-07 12:58:46 +00:00
|
|
|
/* Exclude IRQ from balancing if requested */
|
2011-02-08 16:11:03 +00:00
|
|
|
if (new->flags & IRQF_NOBALANCING) {
|
|
|
|
irq_settings_set_no_balancing(desc);
|
|
|
|
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
|
|
|
|
}
|
2008-11-07 12:58:46 +00:00
|
|
|
|
2008-05-29 18:02:52 +00:00
|
|
|
/* Set default affinity mask once everything is setup */
|
2011-02-07 15:02:20 +00:00
|
|
|
setup_affinity(irq, desc, mask);
|
2008-10-01 21:46:18 +00:00
|
|
|
|
2011-02-08 16:28:12 +00:00
|
|
|
} else if (new->flags & IRQF_TRIGGER_MASK) {
|
|
|
|
unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
|
|
|
|
unsigned int omsk = irq_settings_get_trigger_mask(desc);
|
|
|
|
|
|
|
|
if (nmsk != omsk)
|
|
|
|
/* hope the handler works with current trigger mode */
|
|
|
|
pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
|
|
|
|
irq, nmsk, omsk);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-07-24 04:28:54 +00:00
|
|
|
|
2009-08-17 12:07:16 +00:00
|
|
|
new->irq = irq;
|
2009-02-17 19:43:37 +00:00
|
|
|
*old_ptr = new;
|
2008-07-24 04:28:54 +00:00
|
|
|
|
2007-01-23 22:16:31 +00:00
|
|
|
/* Reset broken irq detection when installing new handler */
|
|
|
|
desc->irq_count = 0;
|
|
|
|
desc->irqs_unhandled = 0;
|
2008-04-28 15:01:56 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether we disabled the irq via the spurious handler
|
|
|
|
* before. Reenable it and give it another chance.
|
|
|
|
*/
|
2011-02-07 19:40:54 +00:00
|
|
|
if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
|
|
|
|
desc->istate &= ~IRQS_SPURIOUS_DISABLED;
|
2009-03-16 21:33:49 +00:00
|
|
|
__enable_irq(desc, irq, false);
|
2008-04-28 15:01:56 +00:00
|
|
|
}
|
|
|
|
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-08-17 12:07:16 +00:00
|
|
|
/*
|
|
|
|
* Strictly no need to wake it up, but hung_task complains
|
|
|
|
* when no hard interrupt wakes the thread up.
|
|
|
|
*/
|
|
|
|
if (new->thread)
|
|
|
|
wake_up_process(new->thread);
|
|
|
|
|
2008-08-20 03:50:11 +00:00
|
|
|
register_irq_proc(irq, desc);
|
2005-04-16 22:20:36 +00:00
|
|
|
new->dir = NULL;
|
|
|
|
register_handler_proc(irq, new);
|
|
|
|
|
|
|
|
return 0;
|
2006-03-25 11:08:23 +00:00
|
|
|
|
|
|
|
mismatch:
|
2007-02-12 08:52:04 +00:00
|
|
|
#ifdef CONFIG_DEBUG_SHIRQ
|
2006-07-02 02:29:31 +00:00
|
|
|
if (!(new->flags & IRQF_PROBE_SHARED)) {
|
2006-07-01 11:35:45 +00:00
|
|
|
printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
|
2006-11-14 10:03:23 +00:00
|
|
|
if (old_name)
|
|
|
|
printk(KERN_ERR "current handler: %s\n", old_name);
|
2006-04-28 01:39:18 +00:00
|
|
|
dump_stack();
|
|
|
|
}
|
2007-02-12 08:52:04 +00:00
|
|
|
#endif
|
2009-03-23 17:28:15 +00:00
|
|
|
ret = -EBUSY;
|
|
|
|
|
2011-02-07 15:02:20 +00:00
|
|
|
out_mask:
|
2011-03-17 11:43:07 +00:00
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
2011-02-07 15:02:20 +00:00
|
|
|
free_cpumask_var(mask);
|
|
|
|
|
2009-03-23 17:28:15 +00:00
|
|
|
out_thread:
|
|
|
|
if (new->thread) {
|
|
|
|
struct task_struct *t = new->thread;
|
|
|
|
|
|
|
|
new->thread = NULL;
|
|
|
|
if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
|
|
|
|
kthread_stop(t);
|
|
|
|
put_task_struct(t);
|
|
|
|
}
|
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-10-16 07:55:00 +00:00
|
|
|
/**
|
|
|
|
* setup_irq - setup an interrupt
|
|
|
|
* @irq: Interrupt line to setup
|
|
|
|
* @act: irqaction for the interrupt
|
|
|
|
*
|
|
|
|
* Used to statically setup interrupts in the early boot process.
|
|
|
|
*/
|
|
|
|
int setup_irq(unsigned int irq, struct irqaction *act)
|
|
|
|
{
|
2011-02-10 00:04:25 +00:00
|
|
|
int retval;
|
2008-10-16 07:55:00 +00:00
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
|
2011-02-10 00:04:25 +00:00
|
|
|
chip_bus_lock(desc);
|
|
|
|
retval = __setup_irq(irq, desc, act);
|
|
|
|
chip_bus_sync_unlock(desc);
|
|
|
|
|
|
|
|
return retval;
|
2008-10-16 07:55:00 +00:00
|
|
|
}
|
2009-03-12 12:05:59 +00:00
|
|
|
EXPORT_SYMBOL_GPL(setup_irq);
|
2008-10-16 07:55:00 +00:00
|
|
|
|
2009-03-12 12:05:51 +00:00
|
|
|
/*
|
|
|
|
* Internal function to unregister an irqaction - used to free
|
|
|
|
* regular and special interrupts that are part of the architecture.
|
2005-04-16 22:20:36 +00:00
|
|
|
*/
|
2009-03-12 12:05:51 +00:00
|
|
|
static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-10-16 07:55:00 +00:00
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
2009-02-17 19:43:37 +00:00
|
|
|
struct irqaction *action, **action_ptr;
|
2005-04-16 22:20:36 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
2009-02-15 10:29:50 +00:00
|
|
|
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
|
2008-08-20 03:50:14 +00:00
|
|
|
|
|
|
|
if (!desc)
|
2009-03-12 12:05:42 +00:00
|
|
|
return NULL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
2009-02-15 10:29:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There can be multiple actions per IRQ descriptor, find the right
|
|
|
|
* one based on the dev_id:
|
|
|
|
*/
|
2009-02-17 19:43:37 +00:00
|
|
|
action_ptr = &desc->action;
|
2005-04-16 22:20:36 +00:00
|
|
|
for (;;) {
|
2009-02-17 19:43:37 +00:00
|
|
|
action = *action_ptr;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-02-15 10:29:50 +00:00
|
|
|
if (!action) {
|
|
|
|
WARN(1, "Trying to free already-free IRQ %d\n", irq);
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-03-12 12:05:42 +00:00
|
|
|
return NULL;
|
2009-02-15 10:29:50 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-02-17 19:28:29 +00:00
|
|
|
if (action->dev_id == dev_id)
|
|
|
|
break;
|
2009-02-17 19:43:37 +00:00
|
|
|
action_ptr = &action->next;
|
2009-02-15 10:29:50 +00:00
|
|
|
}
|
[PATCH] uml: add and use generic hw_controller_type->release
With Chris Wedgwood <cw@f00f.org>
Currently UML must explicitly call the UML-specific
free_irq_by_irq_and_dev() for each free_irq call it's done.
This is needed because ->shutdown and/or ->disable are only called when the
last "action" for that irq is removed.
Instead, for UML shared IRQs (UML IRQs are very often, if not always,
shared), for each dev_id some setup is done, which must be cleared on the
release of that fd. For instance, for each open console a new instance
(i.e. new dev_id) of the same IRQ is requested().
Exactly, a fd is stored in an array (pollfds), which is after read by a
host thread and passed to poll(). Each event registered by poll() triggers
an interrupt. So, for each free_irq() we must remove the corresponding
host fd from the table, which we do via this -release() method.
In this patch we add an appropriate hook for this, and remove all uses of
it by pointing the hook to the said procedure; this is safe to do since the
said procedure.
Also some cosmetic improvements are included.
This is heavily based on some work by Chris Wedgwood, which however didn't
get the patch merged for something I'd call a "misunderstanding" (the need
for this patch wasn't cleanly explained, thus adding the generic hook was
felt as undesirable).
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
CC: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-22 00:16:19 +00:00
|
|
|
|
2009-02-15 10:29:50 +00:00
|
|
|
/* Found it - now remove it from the list of entries: */
|
2009-02-17 19:43:37 +00:00
|
|
|
*action_ptr = action->next;
|
2009-02-15 10:29:50 +00:00
|
|
|
|
|
|
|
/* Currently used only by UML, might disappear one day: */
|
2005-06-22 00:16:24 +00:00
|
|
|
#ifdef CONFIG_IRQ_RELEASE_METHOD
|
2010-10-01 10:58:38 +00:00
|
|
|
if (desc->irq_data.chip->release)
|
|
|
|
desc->irq_data.chip->release(irq, dev_id);
|
2005-06-22 00:16:24 +00:00
|
|
|
#endif
|
[PATCH] uml: add and use generic hw_controller_type->release
With Chris Wedgwood <cw@f00f.org>
Currently UML must explicitly call the UML-specific
free_irq_by_irq_and_dev() for each free_irq call it's done.
This is needed because ->shutdown and/or ->disable are only called when the
last "action" for that irq is removed.
Instead, for UML shared IRQs (UML IRQs are very often, if not always,
shared), for each dev_id some setup is done, which must be cleared on the
release of that fd. For instance, for each open console a new instance
(i.e. new dev_id) of the same IRQ is requested().
Exactly, a fd is stored in an array (pollfds), which is after read by a
host thread and passed to poll(). Each event registered by poll() triggers
an interrupt. So, for each free_irq() we must remove the corresponding
host fd from the table, which we do via this -release() method.
In this patch we add an appropriate hook for this, and remove all uses of
it by pointing the hook to the said procedure; this is safe to do since the
said procedure.
Also some cosmetic improvements are included.
This is heavily based on some work by Chris Wedgwood, which however didn't
get the patch merged for something I'd call a "misunderstanding" (the need
for this patch wasn't cleanly explained, thus adding the generic hook was
felt as undesirable).
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
CC: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-22 00:16:19 +00:00
|
|
|
|
2009-02-15 10:29:50 +00:00
|
|
|
/* If this was the last handler, shut down the IRQ line: */
|
2011-02-02 21:41:14 +00:00
|
|
|
if (!desc->action)
|
|
|
|
irq_shutdown(desc);
|
2009-03-23 17:28:15 +00:00
|
|
|
|
2010-04-30 21:44:50 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
/* make sure affinity_hint is cleaned up */
|
|
|
|
if (WARN_ON_ONCE(desc->affinity_hint))
|
|
|
|
desc->affinity_hint = NULL;
|
|
|
|
#endif
|
|
|
|
|
2009-11-17 15:46:45 +00:00
|
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
2009-02-15 10:29:50 +00:00
|
|
|
|
|
|
|
unregister_handler_proc(irq, action);
|
|
|
|
|
|
|
|
/* Make sure it's not being used on another CPU: */
|
|
|
|
synchronize_irq(irq);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-01-30 12:33:24 +00:00
|
|
|
#ifdef CONFIG_DEBUG_SHIRQ
|
2009-02-15 10:29:50 +00:00
|
|
|
/*
|
|
|
|
* It's a shared IRQ -- the driver ought to be prepared for an IRQ
|
|
|
|
* event to happen even now it's being freed, so let's make sure that
|
|
|
|
* is so by doing an extra call to the handler ....
|
|
|
|
*
|
|
|
|
* ( We do this after actually deregistering it, to make sure that a
|
|
|
|
* 'real' IRQ doesn't run in * parallel with our fake. )
|
|
|
|
*/
|
|
|
|
if (action->flags & IRQF_SHARED) {
|
|
|
|
local_irq_save(flags);
|
|
|
|
action->handler(irq, dev_id);
|
|
|
|
local_irq_restore(flags);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2009-02-15 10:29:50 +00:00
|
|
|
#endif
|
2009-08-13 20:05:10 +00:00
|
|
|
|
|
|
|
if (action->thread) {
|
|
|
|
if (!test_bit(IRQTF_DIED, &action->thread_flags))
|
|
|
|
kthread_stop(action->thread);
|
|
|
|
put_task_struct(action->thread);
|
|
|
|
}
|
|
|
|
|
2009-03-12 12:05:42 +00:00
|
|
|
return action;
|
|
|
|
}
|
|
|
|
|
2009-03-12 12:05:51 +00:00
|
|
|
/**
|
|
|
|
* remove_irq - free an interrupt
|
|
|
|
* @irq: Interrupt line to free
|
|
|
|
* @act: irqaction for the interrupt
|
|
|
|
*
|
|
|
|
* Used to remove interrupts statically setup by the early boot process.
|
|
|
|
*/
|
|
|
|
void remove_irq(unsigned int irq, struct irqaction *act)
|
|
|
|
{
|
|
|
|
__free_irq(irq, act->dev_id);
|
|
|
|
}
|
2009-03-12 12:05:59 +00:00
|
|
|
EXPORT_SYMBOL_GPL(remove_irq);
|
2009-03-12 12:05:51 +00:00
|
|
|
|
2009-03-12 12:05:42 +00:00
|
|
|
/**
|
|
|
|
* free_irq - free an interrupt allocated with request_irq
|
|
|
|
* @irq: Interrupt line to free
|
|
|
|
* @dev_id: Device identity to free
|
|
|
|
*
|
|
|
|
* Remove an interrupt handler. The handler is removed and if the
|
|
|
|
* interrupt line is no longer in use by any driver it is disabled.
|
|
|
|
* On a shared IRQ the caller must ensure the interrupt is disabled
|
|
|
|
* on the card it drives before calling this function. The function
|
|
|
|
* does not return until any executing interrupts for this IRQ
|
|
|
|
* have completed.
|
|
|
|
*
|
|
|
|
* This function must not be called from interrupt context.
|
|
|
|
*/
|
|
|
|
void free_irq(unsigned int irq, void *dev_id)
|
|
|
|
{
|
2009-08-13 10:17:48 +00:00
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
return;
|
|
|
|
|
2011-01-19 21:01:44 +00:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
if (WARN_ON(desc->affinity_notify))
|
|
|
|
desc->affinity_notify = NULL;
|
|
|
|
#endif
|
|
|
|
|
2010-09-27 12:44:35 +00:00
|
|
|
chip_bus_lock(desc);
|
2009-03-12 12:05:51 +00:00
|
|
|
kfree(__free_irq(irq, dev_id));
|
2010-09-27 12:44:35 +00:00
|
|
|
chip_bus_sync_unlock(desc);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(free_irq);
|
|
|
|
|
|
|
|
/**
|
2009-03-23 17:28:15 +00:00
|
|
|
* request_threaded_irq - allocate an interrupt line
|
2005-04-16 22:20:36 +00:00
|
|
|
* @irq: Interrupt line to allocate
|
2009-03-23 17:28:15 +00:00
|
|
|
* @handler: Function to be called when the IRQ occurs.
|
|
|
|
* Primary handler for threaded interrupts
|
2009-08-13 10:17:22 +00:00
|
|
|
* If NULL and thread_fn != NULL the default
|
|
|
|
* primary handler is installed
|
2009-03-24 10:46:22 +00:00
|
|
|
* @thread_fn: Function called from the irq handler thread
|
|
|
|
* If NULL, no irq thread is created
|
2005-04-16 22:20:36 +00:00
|
|
|
* @irqflags: Interrupt type flags
|
|
|
|
* @devname: An ascii name for the claiming device
|
|
|
|
* @dev_id: A cookie passed back to the handler function
|
|
|
|
*
|
|
|
|
* This call allocates interrupt resources and enables the
|
|
|
|
* interrupt line and IRQ handling. From the point this
|
|
|
|
* call is made your handler function may be invoked. Since
|
|
|
|
* your handler function must clear any interrupt the board
|
|
|
|
* raises, you must take care both to initialise your hardware
|
|
|
|
* and to set up the interrupt handler in the right order.
|
|
|
|
*
|
2009-03-23 17:28:15 +00:00
|
|
|
* If you want to set up a threaded irq handler for your device
|
|
|
|
* then you need to supply @handler and @thread_fn. @handler ist
|
|
|
|
* still called in hard interrupt context and has to check
|
|
|
|
* whether the interrupt originates from the device. If yes it
|
|
|
|
* needs to disable the interrupt on the device and return
|
2009-05-12 18:35:54 +00:00
|
|
|
* IRQ_WAKE_THREAD which will wake up the handler thread and run
|
2009-03-23 17:28:15 +00:00
|
|
|
* @thread_fn. This split handler design is necessary to support
|
|
|
|
* shared interrupts.
|
|
|
|
*
|
2005-04-16 22:20:36 +00:00
|
|
|
* Dev_id must be globally unique. Normally the address of the
|
|
|
|
* device data structure is used as the cookie. Since the handler
|
|
|
|
* receives this value it makes sense to use it.
|
|
|
|
*
|
|
|
|
* If your interrupt is shared you must pass a non NULL dev_id
|
|
|
|
* as this is required when freeing the interrupt.
|
|
|
|
*
|
|
|
|
* Flags:
|
|
|
|
*
|
2006-07-02 02:29:31 +00:00
|
|
|
* IRQF_SHARED Interrupt is shared
|
|
|
|
* IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
|
2008-10-01 21:46:18 +00:00
|
|
|
* IRQF_TRIGGER_* Specify active edge(s) or level
|
2005-04-16 22:20:36 +00:00
|
|
|
*
|
|
|
|
*/
|
2009-03-23 17:28:15 +00:00
|
|
|
int request_threaded_irq(unsigned int irq, irq_handler_t handler,
|
|
|
|
irq_handler_t thread_fn, unsigned long irqflags,
|
|
|
|
const char *devname, void *dev_id)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2006-06-29 09:24:40 +00:00
|
|
|
struct irqaction *action;
|
2008-08-20 03:50:05 +00:00
|
|
|
struct irq_desc *desc;
|
2008-10-16 07:55:00 +00:00
|
|
|
int retval;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity-check: shared interrupts must pass in a real dev-ID,
|
|
|
|
* otherwise we'll have trouble later trying to figure out
|
|
|
|
* which interrupt is which (messes up the interrupt freeing
|
|
|
|
* logic etc).
|
|
|
|
*/
|
2006-07-02 02:29:31 +00:00
|
|
|
if ((irqflags & IRQF_SHARED) && !dev_id)
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
2008-08-20 03:50:14 +00:00
|
|
|
|
2008-08-20 03:50:17 +00:00
|
|
|
desc = irq_to_desc(irq);
|
2008-08-20 03:50:14 +00:00
|
|
|
if (!desc)
|
2005-04-16 22:20:36 +00:00
|
|
|
return -EINVAL;
|
2008-08-20 03:50:14 +00:00
|
|
|
|
2011-02-09 13:44:17 +00:00
|
|
|
if (!irq_settings_can_request(desc))
|
2006-06-29 09:24:49 +00:00
|
|
|
return -EINVAL;
|
2009-08-13 10:17:22 +00:00
|
|
|
|
|
|
|
if (!handler) {
|
|
|
|
if (!thread_fn)
|
|
|
|
return -EINVAL;
|
|
|
|
handler = irq_default_primary_handler;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2009-02-22 22:00:32 +00:00
|
|
|
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!action)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
action->handler = handler;
|
2009-03-23 17:28:15 +00:00
|
|
|
action->thread_fn = thread_fn;
|
2005-04-16 22:20:36 +00:00
|
|
|
action->flags = irqflags;
|
|
|
|
action->name = devname;
|
|
|
|
action->dev_id = dev_id;
|
|
|
|
|
2010-09-27 12:44:35 +00:00
|
|
|
chip_bus_lock(desc);
|
2008-10-16 07:55:00 +00:00
|
|
|
retval = __setup_irq(irq, desc, action);
|
2010-09-27 12:44:35 +00:00
|
|
|
chip_bus_sync_unlock(desc);
|
2009-08-13 10:17:48 +00:00
|
|
|
|
2008-08-21 18:58:28 +00:00
|
|
|
if (retval)
|
|
|
|
kfree(action);
|
|
|
|
|
2011-02-18 22:27:23 +00:00
|
|
|
#ifdef CONFIG_DEBUG_SHIRQ_FIXME
|
2009-04-01 17:06:35 +00:00
|
|
|
if (!retval && (irqflags & IRQF_SHARED)) {
|
2007-02-12 08:52:00 +00:00
|
|
|
/*
|
|
|
|
* It's a shared IRQ -- the driver ought to be prepared for it
|
|
|
|
* to happen immediately, so let's make sure....
|
2008-08-21 18:58:28 +00:00
|
|
|
* We disable the irq to make sure that a 'real' IRQ doesn't
|
|
|
|
* run in parallel with our fake.
|
2007-02-12 08:52:00 +00:00
|
|
|
*/
|
2007-08-31 06:56:34 +00:00
|
|
|
unsigned long flags;
|
2007-02-12 08:52:00 +00:00
|
|
|
|
2008-08-21 18:58:28 +00:00
|
|
|
disable_irq(irq);
|
2007-08-31 06:56:34 +00:00
|
|
|
local_irq_save(flags);
|
2008-08-21 18:58:28 +00:00
|
|
|
|
2007-08-31 06:56:34 +00:00
|
|
|
handler(irq, dev_id);
|
2008-08-21 18:58:28 +00:00
|
|
|
|
2007-08-31 06:56:34 +00:00
|
|
|
local_irq_restore(flags);
|
2008-08-21 18:58:28 +00:00
|
|
|
enable_irq(irq);
|
2007-02-12 08:52:00 +00:00
|
|
|
}
|
|
|
|
#endif
|
2005-04-16 22:20:36 +00:00
|
|
|
return retval;
|
|
|
|
}
|
2009-03-23 17:28:15 +00:00
|
|
|
EXPORT_SYMBOL(request_threaded_irq);
|
genirq: Introduce request_any_context_irq()
Now that we enjoy threaded interrupts, we're starting to see irq_chip
implementations (wm831x, pca953x) that make use of threaded interrupts
for the controller, and nested interrupts for the client interrupt. It
all works very well, with one drawback:
Drivers requesting an IRQ must now know whether the handler will
run in a thread context or not, and call request_threaded_irq() or
request_irq() accordingly.
The problem is that the requesting driver sometimes doesn't know
about the nature of the interrupt, specially when the interrupt
controller is a discrete chip (typically a GPIO expander connected
over I2C) that can be connected to a wide variety of otherwise perfectly
supported hardware.
This patch introduces the request_any_context_irq() function that mostly
mimics the usual request_irq(), except that it checks whether the irq
level is configured as nested or not, and calls the right backend.
On success, it also returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
[ tglx: Made return value an enum, simplified code and made the export
of request_any_context_irq GPL ]
Signed-off-by: Marc Zyngier <maz@misterjones.org>
Cc: <joachim.eastwood@jotron.com>
LKML-Reference: <927ea285bd0c68934ddae1a47e44a9ba@localhost>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2010-03-15 22:56:33 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* request_any_context_irq - allocate an interrupt line
|
|
|
|
* @irq: Interrupt line to allocate
|
|
|
|
* @handler: Function to be called when the IRQ occurs.
|
|
|
|
* Threaded handler for threaded interrupts.
|
|
|
|
* @flags: Interrupt type flags
|
|
|
|
* @name: An ascii name for the claiming device
|
|
|
|
* @dev_id: A cookie passed back to the handler function
|
|
|
|
*
|
|
|
|
* This call allocates interrupt resources and enables the
|
|
|
|
* interrupt line and IRQ handling. It selects either a
|
|
|
|
* hardirq or threaded handling method depending on the
|
|
|
|
* context.
|
|
|
|
*
|
|
|
|
* On failure, it returns a negative value. On success,
|
|
|
|
* it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
|
|
|
|
*/
|
|
|
|
int request_any_context_irq(unsigned int irq, irq_handler_t handler,
|
|
|
|
unsigned long flags, const char *name, void *dev_id)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2011-02-09 13:44:17 +00:00
|
|
|
if (irq_settings_is_nested_thread(desc)) {
|
genirq: Introduce request_any_context_irq()
Now that we enjoy threaded interrupts, we're starting to see irq_chip
implementations (wm831x, pca953x) that make use of threaded interrupts
for the controller, and nested interrupts for the client interrupt. It
all works very well, with one drawback:
Drivers requesting an IRQ must now know whether the handler will
run in a thread context or not, and call request_threaded_irq() or
request_irq() accordingly.
The problem is that the requesting driver sometimes doesn't know
about the nature of the interrupt, specially when the interrupt
controller is a discrete chip (typically a GPIO expander connected
over I2C) that can be connected to a wide variety of otherwise perfectly
supported hardware.
This patch introduces the request_any_context_irq() function that mostly
mimics the usual request_irq(), except that it checks whether the irq
level is configured as nested or not, and calls the right backend.
On success, it also returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
[ tglx: Made return value an enum, simplified code and made the export
of request_any_context_irq GPL ]
Signed-off-by: Marc Zyngier <maz@misterjones.org>
Cc: <joachim.eastwood@jotron.com>
LKML-Reference: <927ea285bd0c68934ddae1a47e44a9ba@localhost>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2010-03-15 22:56:33 +00:00
|
|
|
ret = request_threaded_irq(irq, NULL, handler,
|
|
|
|
flags, name, dev_id);
|
|
|
|
return !ret ? IRQC_IS_NESTED : ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = request_irq(irq, handler, flags, name, dev_id);
|
|
|
|
return !ret ? IRQC_IS_HARDIRQ : ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(request_any_context_irq);
|