sh: Switch dynamic IRQ creation to generic irq allocator.

Now that the genirq code provides an IRQ bitmap of its own and the
necessary API to manipulate it, there's no need to keep our own version
around anymore.

In the process we kill off some unused IRQ reservation code, with future
users now having to tie in to the genirq API as normal.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2010-10-26 16:05:08 +09:00
parent 57b813303a
commit 38ab13441c
4 changed files with 9 additions and 84 deletions

View File

@ -273,12 +273,6 @@ void __init init_IRQ(void)
{
plat_irq_setup();
/*
* Pin any of the legacy IRQ vectors that haven't already been
* grabbed by the platform
*/
reserve_irq_legacy();
/* Perform the machine specific initialisation */
if (sh_mv.mv_init_irq)
sh_mv.mv_init_irq();

View File

@ -78,7 +78,7 @@ static void __init intc_register_irq(struct intc_desc *desc,
* Register the IRQ position with the global IRQ map, then insert
* it in to the radix tree.
*/
reserve_irq_vector(irq);
irq_reserve_irqs(irq, 1);
raw_spin_lock_irqsave(&intc_big_lock, flags);
radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq));

View File

@ -17,7 +17,7 @@
#include "internals.h" /* only for activate_irq() damage.. */
/*
* The intc_irq_map provides a global map of bound IRQ vectors for a
* The IRQ bitmap provides a global map of bound IRQ vectors for a
* given platform. Allocation of IRQs are either static through the CPU
* vector map, or dynamic in the case of board mux vectors or MSI.
*
@ -27,104 +27,38 @@
* when dynamically creating IRQs, as well as tying in to otherwise
* unused irq_desc positions in the sparse array.
*/
static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
static DEFINE_RAW_SPINLOCK(vector_lock);
/*
* Dynamic IRQ allocation and deallocation
*/
unsigned int create_irq_nr(unsigned int irq_want, int node)
{
unsigned int irq = 0, new;
unsigned long flags;
raw_spin_lock_irqsave(&vector_lock, flags);
/*
* First try the wanted IRQ
*/
if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
new = irq_want;
} else {
/* .. then fall back to scanning. */
new = find_first_zero_bit(intc_irq_map, nr_irqs);
if (unlikely(new == nr_irqs))
goto out_unlock;
__set_bit(new, intc_irq_map);
}
raw_spin_unlock_irqrestore(&vector_lock, flags);
irq = irq_alloc_desc_at(new, node);
if (unlikely(irq != new)) {
pr_err("can't get irq_desc for %d\n", new);
int irq = irq_alloc_desc_at(irq_want, node);
if (irq < 0)
return 0;
}
activate_irq(irq);
return 0;
out_unlock:
raw_spin_unlock_irqrestore(&vector_lock, flags);
return 0;
return irq;
}
int create_irq(void)
{
int nid = cpu_to_node(smp_processor_id());
int irq;
irq = create_irq_nr(NR_IRQS_LEGACY, nid);
if (irq == 0)
irq = -1;
int irq = irq_alloc_desc(numa_node_id());
if (irq >= 0)
activate_irq(irq);
return irq;
}
void destroy_irq(unsigned int irq)
{
unsigned long flags;
irq_free_desc(irq);
raw_spin_lock_irqsave(&vector_lock, flags);
__clear_bit(irq, intc_irq_map);
raw_spin_unlock_irqrestore(&vector_lock, flags);
}
int reserve_irq_vector(unsigned int irq)
{
unsigned long flags;
int ret = 0;
raw_spin_lock_irqsave(&vector_lock, flags);
if (test_and_set_bit(irq, intc_irq_map))
ret = -EBUSY;
raw_spin_unlock_irqrestore(&vector_lock, flags);
return ret;
}
void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
{
unsigned long flags;
int i;
raw_spin_lock_irqsave(&vector_lock, flags);
for (i = 0; i < nr_vecs; i++)
__set_bit(evt2irq(vectors[i].vect), intc_irq_map);
raw_spin_unlock_irqrestore(&vector_lock, flags);
}
void reserve_irq_legacy(void)
{
unsigned long flags;
int i, j;
raw_spin_lock_irqsave(&vector_lock, flags);
j = find_first_bit(intc_irq_map, nr_irqs);
for (i = 0; i < j; i++)
__set_bit(i, intc_irq_map);
raw_spin_unlock_irqrestore(&vector_lock, flags);
irq_reserve_irqs(evt2irq(vectors[i].vect), 1);
}

View File

@ -129,7 +129,4 @@ static inline int register_intc_userimask(unsigned long addr)
}
#endif
int reserve_irq_vector(unsigned int irq);
void reserve_irq_legacy(void);
#endif /* __SH_INTC_H */