Re: [PATCH 2/2] powerpc - Make the irq reverse mapping radix tree lockless
On Thu, 04 Sep 2008 12:52:19 +1000 Benjamin Herrenschmidt [EMAIL PROTECTED] wrote: On Wed, 2008-09-03 at 15:41 +0200, Sebastien Dugue wrote: On Wed, 20 Aug 2008 15:23:01 +1000 Benjamin Herrenschmidt [EMAIL PROTECTED] wrote: BTW. It would be good to try to turn the GFP_ATOMIC into GFP_KERNEL, That would be nice indeed maybe using a semaphore instead of a lock to protect insertion vs. initialisation. a semaphore? are you meaning a mutex? If not, I fail to understand what you're implying. Right, a mutex, bad habit calling those semaphores from the old days :-) OK, then we're on the same line ;-) Right, that's the problem with this new scheme and I'm still trying to find a way to handle memory allocation failures be it for GFP_ATOMIC or GFP_KERNEL. I could not think of anything simple so far and I'm open for suggestions. GFP_KERNEL should not fail, it will just block no ? No it won't block and will fail (returns NULL). If it fails, it's probably catastrophic enough not to care. Yep, I'd tend to agree with that. You can always fallback to linear lookup. I will have to add that back as there is no more fallback. I don't know if it's worth trying to fire off a new allocation attempt later, probably not. I've been pondering with this lately, but I think that adding a linear lookup fallback should be OK. Thanks, Sebastien. Ben. ___ Linuxppc-dev mailing list Linuxppc-dev@ozlabs.org https://ozlabs.org/mailman/listinfo/linuxppc-dev ___ Linuxppc-dev mailing list Linuxppc-dev@ozlabs.org https://ozlabs.org/mailman/listinfo/linuxppc-dev
Re: [PATCH 2/2] powerpc - Make the irq reverse mapping radix tree lockless
There's nothing to 'de-initialize' here, or am I missing something? radix_tree_insert() will return ENOMEM and won't insert anything. Forget my comment, just fallback. Or you can fallback if you don't find, as easy, probably easier since it shouldn't happen in practice. That's what I had in mind. Thanks for doing that work ! Cheers, Ben. ___ Linuxppc-dev mailing list Linuxppc-dev@ozlabs.org https://ozlabs.org/mailman/listinfo/linuxppc-dev
Re: [PATCH 2/2] powerpc - Make the irq reverse mapping radix tree lockless
On Thu, 04 Sep 2008 17:58:56 +1000 Benjamin Herrenschmidt [EMAIL PROTECTED] wrote: There's nothing to 'de-initialize' here, or am I missing something? radix_tree_insert() will return ENOMEM and won't insert anything. Forget my comment, just fallback. Or you can fallback if you don't find, as easy, probably easier since it shouldn't happen in practice. That's what I had in mind. Thanks for doing that work ! Will do that way. Thanks, Sebastien. ___ Linuxppc-dev mailing list Linuxppc-dev@ozlabs.org https://ozlabs.org/mailman/listinfo/linuxppc-dev
[PATCH 2/2] powerpc - Make the irq reverse mapping radix tree lockless
The radix trees used by interrupt controllers for their irq reverse mapping (currently only the XICS found on pSeries) have a complex locking scheme dating back to before the advent of the lockless radix tree. Take advantage of this and of the fact that the items of the tree are pointers to a static array (irq_map) elements which can never go under us to simplify the locking. Concurrency between readers and writers is handled by the intrinsic properties of the lockless radix tree. Concurrency between writers is handled with a global mutex. Signed-off-by: Sebastien Dugue [EMAIL PROTECTED] Cc: Paul Mackerras [EMAIL PROTECTED] Cc: Benjamin Herrenschmidt [EMAIL PROTECTED] Cc: Michael Ellerman [EMAIL PROTECTED] --- arch/powerpc/kernel/irq.c | 76 ++-- 1 files changed, 11 insertions(+), 65 deletions(-) diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 2656924..ac222d0 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -439,9 +439,8 @@ void do_softirq(void) static LIST_HEAD(irq_hosts); static DEFINE_SPINLOCK(irq_big_lock); -static DEFINE_PER_CPU(unsigned int, irq_radix_reader); -static unsigned int irq_radix_writer; static unsigned int revmap_trees_allocated; +static DEFINE_MUTEX(revmap_trees_mutex); struct irq_map_entry irq_map[NR_IRQS]; static unsigned int irq_virq_count = NR_IRQS; static struct irq_host *irq_default_host; @@ -584,57 +583,6 @@ void irq_set_virq_count(unsigned int count) irq_virq_count = count; } -/* radix tree not lockless safe ! we use a brlock-type mecanism - * for now, until we can use a lockless radix tree - */ -static void irq_radix_wrlock(unsigned long *flags) -{ - unsigned int cpu, ok; - - spin_lock_irqsave(irq_big_lock, *flags); - irq_radix_writer = 1; - smp_mb(); - do { - barrier(); - ok = 1; - for_each_possible_cpu(cpu) { - if (per_cpu(irq_radix_reader, cpu)) { - ok = 0; - break; - } - } - if (!ok) - cpu_relax(); - } while(!ok); -} - -static void irq_radix_wrunlock(unsigned long flags) -{ - smp_wmb(); - irq_radix_writer = 0; - spin_unlock_irqrestore(irq_big_lock, flags); -} - -static void irq_radix_rdlock(unsigned long *flags) -{ - local_irq_save(*flags); - __get_cpu_var(irq_radix_reader) = 1; - smp_mb(); - if (likely(irq_radix_writer == 0)) - return; - __get_cpu_var(irq_radix_reader) = 0; - smp_wmb(); - spin_lock(irq_big_lock); - __get_cpu_var(irq_radix_reader) = 1; - spin_unlock(irq_big_lock); -} - -static void irq_radix_rdunlock(unsigned long flags) -{ - __get_cpu_var(irq_radix_reader) = 0; - local_irq_restore(flags); -} - static int irq_setup_virq(struct irq_host *host, unsigned int virq, irq_hw_number_t hwirq) { @@ -789,7 +737,6 @@ void irq_dispose_mapping(unsigned int virq) { struct irq_host *host; irq_hw_number_t hwirq; - unsigned long flags; if (virq == NO_IRQ) return; @@ -829,9 +776,9 @@ void irq_dispose_mapping(unsigned int virq) smp_rmb(); if (revmap_trees_allocated 1) break; - irq_radix_wrlock(flags); + mutex_lock(revmap_trees_mutex); radix_tree_delete(host-revmap_data.tree, hwirq); - irq_radix_wrunlock(flags); + mutex_unlock(revmap_trees_mutex); break; } @@ -885,7 +832,6 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, { struct irq_map_entry *ptr; unsigned int virq; - unsigned long flags; WARN_ON(host-revmap_type != IRQ_HOST_MAP_TREE); @@ -897,9 +843,11 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, return irq_find_mapping(host, hwirq); /* Now try to resolve */ - irq_radix_rdlock(flags); + /* +* No rcu_read_lock(ing) needed, the ptr returned can't go under us +* as it's referencing an entry in the static irq_map table. +*/ ptr = radix_tree_lookup(host-revmap_data.tree, hwirq); - irq_radix_rdunlock(flags); /* * If found in radix tree, then fine. @@ -917,7 +865,6 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, irq_hw_number_t hwirq) { - unsigned long flags; WARN_ON(host-revmap_type != IRQ_HOST_MAP_TREE); @@ -931,10 +878,10 @@ void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, return; if (virq != NO_IRQ) { - irq_radix_wrlock(flags); +
Re: [PATCH 2/2] powerpc - Make the irq reverse mapping radix tree lockless
On Wed, 20 Aug 2008 15:22:06 +1000 Benjamin Herrenschmidt [EMAIL PROTECTED] wrote: On Wed, 2008-08-06 at 15:30 +0200, Sebastien Dugue wrote: The radix trees used by interrupt controllers for their irq reverse mapping (currently only the XICS found on pSeries) have a complex locking scheme dating back to before the advent of the lockless radix tree. Take advantage of this and of the fact that the items of the tree are pointers to a static array (irq_map) elements which can never go under us to simplify the locking. Concurrency between readers and writers is handled by the intrinsic properties of the lockless radix tree. Concurrency between writers is handled with a spinlock added to the irq_host structure. No need for a spinlock in the irq_host. Make it one global lock, it's not like scalability of irq_create_mapping() was a big deal and there's usually only one of those type of hosts anyway. Right, done. Thanks, Sebastien. Signed-off-by: Sebastien Dugue [EMAIL PROTECTED] Cc: Paul Mackerras [EMAIL PROTECTED] Cc: Benjamin Herrenschmidt [EMAIL PROTECTED] Cc: Michael Ellerman [EMAIL PROTECTED] --- arch/powerpc/include/asm/irq.h |1 + arch/powerpc/kernel/irq.c | 74 ++-- 2 files changed, 12 insertions(+), 63 deletions(-) diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 0a51376..72fd036 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -119,6 +119,7 @@ struct irq_host { } linear; struct radix_tree_root tree; } revmap_data; + spinlock_t tree_lock; struct irq_host_ops *ops; void*host_data; irq_hw_number_t inval_irq; diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index dc8663a..7a19103 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -439,8 +439,6 @@ void do_softirq(void) static LIST_HEAD(irq_hosts); static DEFINE_SPINLOCK(irq_big_lock); -static DEFINE_PER_CPU(unsigned int, irq_radix_reader); -static unsigned int irq_radix_writer; static atomic_t revmap_trees_allocated = ATOMIC_INIT(0); struct irq_map_entry irq_map[NR_IRQS]; static unsigned int irq_virq_count = NR_IRQS; @@ -584,57 +582,6 @@ void irq_set_virq_count(unsigned int count) irq_virq_count = count; } -/* radix tree not lockless safe ! we use a brlock-type mecanism - * for now, until we can use a lockless radix tree - */ -static void irq_radix_wrlock(unsigned long *flags) -{ - unsigned int cpu, ok; - - spin_lock_irqsave(irq_big_lock, *flags); - irq_radix_writer = 1; - smp_mb(); - do { - barrier(); - ok = 1; - for_each_possible_cpu(cpu) { - if (per_cpu(irq_radix_reader, cpu)) { - ok = 0; - break; - } - } - if (!ok) - cpu_relax(); - } while(!ok); -} - -static void irq_radix_wrunlock(unsigned long flags) -{ - smp_wmb(); - irq_radix_writer = 0; - spin_unlock_irqrestore(irq_big_lock, flags); -} - -static void irq_radix_rdlock(unsigned long *flags) -{ - local_irq_save(*flags); - __get_cpu_var(irq_radix_reader) = 1; - smp_mb(); - if (likely(irq_radix_writer == 0)) - return; - __get_cpu_var(irq_radix_reader) = 0; - smp_wmb(); - spin_lock(irq_big_lock); - __get_cpu_var(irq_radix_reader) = 1; - spin_unlock(irq_big_lock); -} - -static void irq_radix_rdunlock(unsigned long flags) -{ - __get_cpu_var(irq_radix_reader) = 0; - local_irq_restore(flags); -} - static int irq_setup_virq(struct irq_host *host, unsigned int virq, irq_hw_number_t hwirq) { @@ -789,7 +736,6 @@ void irq_dispose_mapping(unsigned int virq) { struct irq_host *host; irq_hw_number_t hwirq; - unsigned long flags; if (virq == NO_IRQ) return; @@ -825,9 +771,9 @@ void irq_dispose_mapping(unsigned int virq) /* Check if radix tree allocated yet */ if (atomic_read(revmap_trees_allocated) == 0) break; - irq_radix_wrlock(flags); + spin_lock(host-tree_lock); radix_tree_delete(host-revmap_data.tree, hwirq); - irq_radix_wrunlock(flags); + spin_unlock(host-tree_lock); break; } @@ -881,7 +827,6 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, { struct irq_map_entry *ptr; unsigned int virq = NO_IRQ; - unsigned long flags; WARN_ON(host-revmap_type != IRQ_HOST_MAP_TREE); @@ -893,9 +838,11 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
Re: [PATCH 2/2] powerpc - Make the irq reverse mapping radix tree lockless
On Wed, 20 Aug 2008 15:23:01 +1000 Benjamin Herrenschmidt [EMAIL PROTECTED] wrote: BTW. It would be good to try to turn the GFP_ATOMIC into GFP_KERNEL, That would be nice indeed maybe using a semaphore instead of a lock to protect insertion vs. initialisation. a semaphore? are you meaning a mutex? If not, I fail to understand what you're implying. The old scheme was fine because if the atomic allocation failed, it could fallback to the linear search and try again on the next interrupt. Not anymore. Right, that's the problem with this new scheme and I'm still trying to find a way to handle memory allocation failures be it for GFP_ATOMIC or GFP_KERNEL. I could not think of anything simple so far and I'm open for suggestions. Thanks, Sebastien. ___ Linuxppc-dev mailing list Linuxppc-dev@ozlabs.org https://ozlabs.org/mailman/listinfo/linuxppc-dev
Re: [PATCH 2/2] powerpc - Make the irq reverse mapping radix tree lockless
On Wed, 2008-09-03 at 15:41 +0200, Sebastien Dugue wrote: On Wed, 20 Aug 2008 15:23:01 +1000 Benjamin Herrenschmidt [EMAIL PROTECTED] wrote: BTW. It would be good to try to turn the GFP_ATOMIC into GFP_KERNEL, That would be nice indeed maybe using a semaphore instead of a lock to protect insertion vs. initialisation. a semaphore? are you meaning a mutex? If not, I fail to understand what you're implying. Right, a mutex, bad habit calling those semaphores from the old days :-) Right, that's the problem with this new scheme and I'm still trying to find a way to handle memory allocation failures be it for GFP_ATOMIC or GFP_KERNEL. I could not think of anything simple so far and I'm open for suggestions. GFP_KERNEL should not fail, it will just block no ? If it fails, it's probably catastrophic enough not to care. You can always fallback to linear lookup. I don't know if it's worth trying to fire off a new allocation attempt later, probably not. Ben. ___ Linuxppc-dev mailing list Linuxppc-dev@ozlabs.org https://ozlabs.org/mailman/listinfo/linuxppc-dev
Re: [PATCH 2/2] powerpc - Make the irq reverse mapping radix tree lockless
On Wed, 2008-08-06 at 15:30 +0200, Sebastien Dugue wrote: The radix trees used by interrupt controllers for their irq reverse mapping (currently only the XICS found on pSeries) have a complex locking scheme dating back to before the advent of the lockless radix tree. Take advantage of this and of the fact that the items of the tree are pointers to a static array (irq_map) elements which can never go under us to simplify the locking. Concurrency between readers and writers is handled by the intrinsic properties of the lockless radix tree. Concurrency between writers is handled with a spinlock added to the irq_host structure. No need for a spinlock in the irq_host. Make it one global lock, it's not like scalability of irq_create_mapping() was a big deal and there's usually only one of those type of hosts anyway. Signed-off-by: Sebastien Dugue [EMAIL PROTECTED] Cc: Paul Mackerras [EMAIL PROTECTED] Cc: Benjamin Herrenschmidt [EMAIL PROTECTED] Cc: Michael Ellerman [EMAIL PROTECTED] --- arch/powerpc/include/asm/irq.h |1 + arch/powerpc/kernel/irq.c | 74 ++-- 2 files changed, 12 insertions(+), 63 deletions(-) diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 0a51376..72fd036 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -119,6 +119,7 @@ struct irq_host { } linear; struct radix_tree_root tree; } revmap_data; + spinlock_t tree_lock; struct irq_host_ops *ops; void*host_data; irq_hw_number_t inval_irq; diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index dc8663a..7a19103 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -439,8 +439,6 @@ void do_softirq(void) static LIST_HEAD(irq_hosts); static DEFINE_SPINLOCK(irq_big_lock); -static DEFINE_PER_CPU(unsigned int, irq_radix_reader); -static unsigned int irq_radix_writer; static atomic_t revmap_trees_allocated = ATOMIC_INIT(0); struct irq_map_entry irq_map[NR_IRQS]; static unsigned int irq_virq_count = NR_IRQS; @@ -584,57 +582,6 @@ void irq_set_virq_count(unsigned int count) irq_virq_count = count; } -/* radix tree not lockless safe ! we use a brlock-type mecanism - * for now, until we can use a lockless radix tree - */ -static void irq_radix_wrlock(unsigned long *flags) -{ - unsigned int cpu, ok; - - spin_lock_irqsave(irq_big_lock, *flags); - irq_radix_writer = 1; - smp_mb(); - do { - barrier(); - ok = 1; - for_each_possible_cpu(cpu) { - if (per_cpu(irq_radix_reader, cpu)) { - ok = 0; - break; - } - } - if (!ok) - cpu_relax(); - } while(!ok); -} - -static void irq_radix_wrunlock(unsigned long flags) -{ - smp_wmb(); - irq_radix_writer = 0; - spin_unlock_irqrestore(irq_big_lock, flags); -} - -static void irq_radix_rdlock(unsigned long *flags) -{ - local_irq_save(*flags); - __get_cpu_var(irq_radix_reader) = 1; - smp_mb(); - if (likely(irq_radix_writer == 0)) - return; - __get_cpu_var(irq_radix_reader) = 0; - smp_wmb(); - spin_lock(irq_big_lock); - __get_cpu_var(irq_radix_reader) = 1; - spin_unlock(irq_big_lock); -} - -static void irq_radix_rdunlock(unsigned long flags) -{ - __get_cpu_var(irq_radix_reader) = 0; - local_irq_restore(flags); -} - static int irq_setup_virq(struct irq_host *host, unsigned int virq, irq_hw_number_t hwirq) { @@ -789,7 +736,6 @@ void irq_dispose_mapping(unsigned int virq) { struct irq_host *host; irq_hw_number_t hwirq; - unsigned long flags; if (virq == NO_IRQ) return; @@ -825,9 +771,9 @@ void irq_dispose_mapping(unsigned int virq) /* Check if radix tree allocated yet */ if (atomic_read(revmap_trees_allocated) == 0) break; - irq_radix_wrlock(flags); + spin_lock(host-tree_lock); radix_tree_delete(host-revmap_data.tree, hwirq); - irq_radix_wrunlock(flags); + spin_unlock(host-tree_lock); break; } @@ -881,7 +827,6 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, { struct irq_map_entry *ptr; unsigned int virq = NO_IRQ; - unsigned long flags; WARN_ON(host-revmap_type != IRQ_HOST_MAP_TREE); @@ -893,9 +838,11 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, return irq_find_mapping(host, hwirq); /* Now try to resolve */ - irq_radix_rdlock(flags); + /* + * No
Re: [PATCH 2/2] powerpc - Make the irq reverse mapping radix tree lockless
BTW. It would be good to try to turn the GFP_ATOMIC into GFP_KERNEL, maybe using a semaphore instead of a lock to protect insertion vs. initialisation. The old scheme was fine because if the atomic allocation failed, it could fallback to the linear search and try again on the next interrupt. Not anymore. Ben. ___ Linuxppc-dev mailing list Linuxppc-dev@ozlabs.org https://ozlabs.org/mailman/listinfo/linuxppc-dev
[PATCH 2/2] powerpc - Make the irq reverse mapping radix tree lockless
The radix trees used by interrupt controllers for their irq reverse mapping (currently only the XICS found on pSeries) have a complex locking scheme dating back to before the advent of the lockless radix tree. Take advantage of this and of the fact that the items of the tree are pointers to a static array (irq_map) elements which can never go under us to simplify the locking. Concurrency between readers and writers is handled by the intrinsic properties of the lockless radix tree. Concurrency between writers is handled with a spinlock added to the irq_host structure. Signed-off-by: Sebastien Dugue [EMAIL PROTECTED] Cc: Paul Mackerras [EMAIL PROTECTED] Cc: Benjamin Herrenschmidt [EMAIL PROTECTED] Cc: Michael Ellerman [EMAIL PROTECTED] --- arch/powerpc/include/asm/irq.h |1 + arch/powerpc/kernel/irq.c | 74 ++-- 2 files changed, 12 insertions(+), 63 deletions(-) diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 0a51376..72fd036 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -119,6 +119,7 @@ struct irq_host { } linear; struct radix_tree_root tree; } revmap_data; + spinlock_t tree_lock; struct irq_host_ops *ops; void*host_data; irq_hw_number_t inval_irq; diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index dc8663a..7a19103 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -439,8 +439,6 @@ void do_softirq(void) static LIST_HEAD(irq_hosts); static DEFINE_SPINLOCK(irq_big_lock); -static DEFINE_PER_CPU(unsigned int, irq_radix_reader); -static unsigned int irq_radix_writer; static atomic_t revmap_trees_allocated = ATOMIC_INIT(0); struct irq_map_entry irq_map[NR_IRQS]; static unsigned int irq_virq_count = NR_IRQS; @@ -584,57 +582,6 @@ void irq_set_virq_count(unsigned int count) irq_virq_count = count; } -/* radix tree not lockless safe ! we use a brlock-type mecanism - * for now, until we can use a lockless radix tree - */ -static void irq_radix_wrlock(unsigned long *flags) -{ - unsigned int cpu, ok; - - spin_lock_irqsave(irq_big_lock, *flags); - irq_radix_writer = 1; - smp_mb(); - do { - barrier(); - ok = 1; - for_each_possible_cpu(cpu) { - if (per_cpu(irq_radix_reader, cpu)) { - ok = 0; - break; - } - } - if (!ok) - cpu_relax(); - } while(!ok); -} - -static void irq_radix_wrunlock(unsigned long flags) -{ - smp_wmb(); - irq_radix_writer = 0; - spin_unlock_irqrestore(irq_big_lock, flags); -} - -static void irq_radix_rdlock(unsigned long *flags) -{ - local_irq_save(*flags); - __get_cpu_var(irq_radix_reader) = 1; - smp_mb(); - if (likely(irq_radix_writer == 0)) - return; - __get_cpu_var(irq_radix_reader) = 0; - smp_wmb(); - spin_lock(irq_big_lock); - __get_cpu_var(irq_radix_reader) = 1; - spin_unlock(irq_big_lock); -} - -static void irq_radix_rdunlock(unsigned long flags) -{ - __get_cpu_var(irq_radix_reader) = 0; - local_irq_restore(flags); -} - static int irq_setup_virq(struct irq_host *host, unsigned int virq, irq_hw_number_t hwirq) { @@ -789,7 +736,6 @@ void irq_dispose_mapping(unsigned int virq) { struct irq_host *host; irq_hw_number_t hwirq; - unsigned long flags; if (virq == NO_IRQ) return; @@ -825,9 +771,9 @@ void irq_dispose_mapping(unsigned int virq) /* Check if radix tree allocated yet */ if (atomic_read(revmap_trees_allocated) == 0) break; - irq_radix_wrlock(flags); + spin_lock(host-tree_lock); radix_tree_delete(host-revmap_data.tree, hwirq); - irq_radix_wrunlock(flags); + spin_unlock(host-tree_lock); break; } @@ -881,7 +827,6 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, { struct irq_map_entry *ptr; unsigned int virq = NO_IRQ; - unsigned long flags; WARN_ON(host-revmap_type != IRQ_HOST_MAP_TREE); @@ -893,9 +838,11 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, return irq_find_mapping(host, hwirq); /* Now try to resolve */ - irq_radix_rdlock(flags); + /* +* No rcu_read_lock(ing) needed, the ptr returned can't go under us +* as it's referencing an entry in the static irq_map table. +*/ ptr = radix_tree_lookup(host-revmap_data.tree, hwirq); - irq_radix_rdunlock(flags); /* Found it, return */