Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2013-01-24 Thread Ingo Molnar

* Joonsoo Kim  wrote:

> The purpose of iterating a vmlist is finding vm area with specific
> virtual address. find_vm_area() is provided for this purpose
> and more efficient, because it uses a rbtree.
> So change it.
> 
> Cc: Chris Metcalf 
> Cc: Guan Xuetao 
> Cc: Thomas Gleixner 
> Cc: Ingo Molnar 
> Cc: "H. Peter Anvin" 
> Signed-off-by: Joonsoo Kim 
> 
> diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
> index de0de0c..862782d 100644
> --- a/arch/tile/mm/pgtable.c
> +++ b/arch/tile/mm/pgtable.c
> @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
>  in parallel. Reuse of the virtual address is prevented by
>  leaving it in the global lists until we're done with it.
>  cpa takes care of the direct mappings. */
> - read_lock(_lock);
> - for (p = vmlist; p; p = p->next) {
> - if (p->addr == addr)
> - break;
> - }
> - read_unlock(_lock);
> + p = find_vm_area((void *)addr);
>  
>   if (!p) {
>   pr_err("iounmap: bad address %p\n", addr);
> diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
> index b7a6055..13068ee 100644
> --- a/arch/unicore32/mm/ioremap.c
> +++ b/arch/unicore32/mm/ioremap.c
> @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
>  void __uc32_iounmap(volatile void __iomem *io_addr)
>  {
>   void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
> - struct vm_struct **p, *tmp;
> + struct vm_struct *vm;
>  
>   /*
>* If this is a section based mapping we need to handle it
> @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
>* all the mappings before the area can be reclaimed
>* by someone else.
>*/
> - write_lock(_lock);
> - for (p =  ; (tmp = *p) ; p = >next) {
> - if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
> - if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
> - unmap_area_sections((unsigned long)tmp->addr,
> - tmp->size);
> - }
> - break;
> - }
> - }
> - write_unlock(_lock);
> + vm = find_vm_area(addr);
> + if (vm && (vm->flags & VM_IOREMAP) &&
> + (vm->flags & VM_UNICORE_SECTION_MAPPING))
> + unmap_area_sections((unsigned long)vm->addr, vm->size);
>  
>   vunmap(addr);
>  }
> diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
> index 78fe3f1..9a1e658 100644
> --- a/arch/x86/mm/ioremap.c
> +++ b/arch/x86/mm/ioremap.c
> @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
>  in parallel. Reuse of the virtual address is prevented by
>  leaving it in the global lists until we're done with it.
>  cpa takes care of the direct mappings. */
> - read_lock(_lock);
> - for (p = vmlist; p; p = p->next) {
> - if (p->addr == (void __force *)addr)
> - break;
> - }
> - read_unlock(_lock);
> + p = find_vm_area((void __force *)addr);
>  
>   if (!p) {
>   printk(KERN_ERR "iounmap: bad address %p\n", addr);

For the x86 bits, provided it gets some good testing:

Acked-by: Ingo Molnar 

Thanks,

Ingo
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2013-01-24 Thread Ingo Molnar

* Joonsoo Kim js1...@gmail.com wrote:

 The purpose of iterating a vmlist is finding vm area with specific
 virtual address. find_vm_area() is provided for this purpose
 and more efficient, because it uses a rbtree.
 So change it.
 
 Cc: Chris Metcalf cmetc...@tilera.com
 Cc: Guan Xuetao g...@mprc.pku.edu.cn
 Cc: Thomas Gleixner t...@linutronix.de
 Cc: Ingo Molnar mi...@redhat.com
 Cc: H. Peter Anvin h...@zytor.com
 Signed-off-by: Joonsoo Kim js1...@gmail.com
 
 diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
 index de0de0c..862782d 100644
 --- a/arch/tile/mm/pgtable.c
 +++ b/arch/tile/mm/pgtable.c
 @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
  in parallel. Reuse of the virtual address is prevented by
  leaving it in the global lists until we're done with it.
  cpa takes care of the direct mappings. */
 - read_lock(vmlist_lock);
 - for (p = vmlist; p; p = p-next) {
 - if (p-addr == addr)
 - break;
 - }
 - read_unlock(vmlist_lock);
 + p = find_vm_area((void *)addr);
  
   if (!p) {
   pr_err(iounmap: bad address %p\n, addr);
 diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
 index b7a6055..13068ee 100644
 --- a/arch/unicore32/mm/ioremap.c
 +++ b/arch/unicore32/mm/ioremap.c
 @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
  void __uc32_iounmap(volatile void __iomem *io_addr)
  {
   void *addr = (void *)(PAGE_MASK  (unsigned long)io_addr);
 - struct vm_struct **p, *tmp;
 + struct vm_struct *vm;
  
   /*
* If this is a section based mapping we need to handle it
 @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
* all the mappings before the area can be reclaimed
* by someone else.
*/
 - write_lock(vmlist_lock);
 - for (p = vmlist ; (tmp = *p) ; p = tmp-next) {
 - if ((tmp-flags  VM_IOREMAP)  (tmp-addr == addr)) {
 - if (tmp-flags  VM_UNICORE_SECTION_MAPPING) {
 - unmap_area_sections((unsigned long)tmp-addr,
 - tmp-size);
 - }
 - break;
 - }
 - }
 - write_unlock(vmlist_lock);
 + vm = find_vm_area(addr);
 + if (vm  (vm-flags  VM_IOREMAP) 
 + (vm-flags  VM_UNICORE_SECTION_MAPPING))
 + unmap_area_sections((unsigned long)vm-addr, vm-size);
  
   vunmap(addr);
  }
 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
 index 78fe3f1..9a1e658 100644
 --- a/arch/x86/mm/ioremap.c
 +++ b/arch/x86/mm/ioremap.c
 @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
  in parallel. Reuse of the virtual address is prevented by
  leaving it in the global lists until we're done with it.
  cpa takes care of the direct mappings. */
 - read_lock(vmlist_lock);
 - for (p = vmlist; p; p = p-next) {
 - if (p-addr == (void __force *)addr)
 - break;
 - }
 - read_unlock(vmlist_lock);
 + p = find_vm_area((void __force *)addr);
  
   if (!p) {
   printk(KERN_ERR iounmap: bad address %p\n, addr);

For the x86 bits, provided it gets some good testing:

Acked-by: Ingo Molnar mi...@kernel.org

Thanks,

Ingo
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-10 Thread Chris Metcalf
On 12/6/2012 11:09 AM, Joonsoo Kim wrote:
> The purpose of iterating a vmlist is finding vm area with specific
> virtual address. find_vm_area() is provided for this purpose
> and more efficient, because it uses a rbtree.
> So change it.

If you get an Acked-by for the x86 change, feel free to apply it to the tile 
file as well.  You'll note that for tile it's under an #if 0, which in 
retrospect I shouldn't have pushed anyway.  So I don't feel strongly :-)

FWIW, the change certainly seems at least plausible to me.

-- 
Chris Metcalf, Tilera Corp.
http://www.tilera.com

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-10 Thread Chris Metcalf
On 12/6/2012 11:09 AM, Joonsoo Kim wrote:
 The purpose of iterating a vmlist is finding vm area with specific
 virtual address. find_vm_area() is provided for this purpose
 and more efficient, because it uses a rbtree.
 So change it.

If you get an Acked-by for the x86 change, feel free to apply it to the tile 
file as well.  You'll note that for tile it's under an #if 0, which in 
retrospect I shouldn't have pushed anyway.  So I don't feel strongly :-)

FWIW, the change certainly seems at least plausible to me.

-- 
Chris Metcalf, Tilera Corp.
http://www.tilera.com

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-09 Thread guanxuetao
> The purpose of iterating a vmlist is finding vm area with specific
> virtual address. find_vm_area() is provided for this purpose
> and more efficient, because it uses a rbtree.
> So change it.
>
> Cc: Chris Metcalf 
> Cc: Guan Xuetao 
> Cc: Thomas Gleixner 
> Cc: Ingo Molnar 
> Cc: "H. Peter Anvin" 
> Signed-off-by: Joonsoo Kim 

For UniCore32 bits:
Acked-by: Guan Xuetao 

>
> diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
> index de0de0c..862782d 100644
> --- a/arch/tile/mm/pgtable.c
> +++ b/arch/tile/mm/pgtable.c
> @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
>  in parallel. Reuse of the virtual address is prevented by
>  leaving it in the global lists until we're done with it.
>  cpa takes care of the direct mappings. */
> - read_lock(_lock);
> - for (p = vmlist; p; p = p->next) {
> - if (p->addr == addr)
> - break;
> - }
> - read_unlock(_lock);
> + p = find_vm_area((void *)addr);
>
>   if (!p) {
>   pr_err("iounmap: bad address %p\n", addr);
> diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
> index b7a6055..13068ee 100644
> --- a/arch/unicore32/mm/ioremap.c
> +++ b/arch/unicore32/mm/ioremap.c
> @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
>  void __uc32_iounmap(volatile void __iomem *io_addr)
>  {
>   void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
> - struct vm_struct **p, *tmp;
> + struct vm_struct *vm;
>
>   /*
>* If this is a section based mapping we need to handle it
> @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
>* all the mappings before the area can be reclaimed
>* by someone else.
>*/
> - write_lock(_lock);
> - for (p =  ; (tmp = *p) ; p = >next) {
> - if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
> - if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
> - unmap_area_sections((unsigned long)tmp->addr,
> - tmp->size);
> - }
> - break;
> - }
> - }
> - write_unlock(_lock);
> + vm = find_vm_area(addr);
> + if (vm && (vm->flags & VM_IOREMAP) &&
> + (vm->flags & VM_UNICORE_SECTION_MAPPING))
> + unmap_area_sections((unsigned long)vm->addr, vm->size);
>
>   vunmap(addr);
>  }
> diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
> index 78fe3f1..9a1e658 100644
> --- a/arch/x86/mm/ioremap.c
> +++ b/arch/x86/mm/ioremap.c
> @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
>  in parallel. Reuse of the virtual address is prevented by
>  leaving it in the global lists until we're done with it.
>  cpa takes care of the direct mappings. */
> - read_lock(_lock);
> - for (p = vmlist; p; p = p->next) {
> - if (p->addr == (void __force *)addr)
> - break;
> - }
> - read_unlock(_lock);
> + p = find_vm_area((void __force *)addr);
>
>   if (!p) {
>   printk(KERN_ERR "iounmap: bad address %p\n", addr);
> --
> 1.7.9.5
>

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-09 Thread guanxuetao
 The purpose of iterating a vmlist is finding vm area with specific
 virtual address. find_vm_area() is provided for this purpose
 and more efficient, because it uses a rbtree.
 So change it.

 Cc: Chris Metcalf cmetc...@tilera.com
 Cc: Guan Xuetao g...@mprc.pku.edu.cn
 Cc: Thomas Gleixner t...@linutronix.de
 Cc: Ingo Molnar mi...@redhat.com
 Cc: H. Peter Anvin h...@zytor.com
 Signed-off-by: Joonsoo Kim js1...@gmail.com

For UniCore32 bits:
Acked-by: Guan Xuetao g...@mprc.pku.edu.cn


 diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
 index de0de0c..862782d 100644
 --- a/arch/tile/mm/pgtable.c
 +++ b/arch/tile/mm/pgtable.c
 @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
  in parallel. Reuse of the virtual address is prevented by
  leaving it in the global lists until we're done with it.
  cpa takes care of the direct mappings. */
 - read_lock(vmlist_lock);
 - for (p = vmlist; p; p = p-next) {
 - if (p-addr == addr)
 - break;
 - }
 - read_unlock(vmlist_lock);
 + p = find_vm_area((void *)addr);

   if (!p) {
   pr_err(iounmap: bad address %p\n, addr);
 diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
 index b7a6055..13068ee 100644
 --- a/arch/unicore32/mm/ioremap.c
 +++ b/arch/unicore32/mm/ioremap.c
 @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
  void __uc32_iounmap(volatile void __iomem *io_addr)
  {
   void *addr = (void *)(PAGE_MASK  (unsigned long)io_addr);
 - struct vm_struct **p, *tmp;
 + struct vm_struct *vm;

   /*
* If this is a section based mapping we need to handle it
 @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
* all the mappings before the area can be reclaimed
* by someone else.
*/
 - write_lock(vmlist_lock);
 - for (p = vmlist ; (tmp = *p) ; p = tmp-next) {
 - if ((tmp-flags  VM_IOREMAP)  (tmp-addr == addr)) {
 - if (tmp-flags  VM_UNICORE_SECTION_MAPPING) {
 - unmap_area_sections((unsigned long)tmp-addr,
 - tmp-size);
 - }
 - break;
 - }
 - }
 - write_unlock(vmlist_lock);
 + vm = find_vm_area(addr);
 + if (vm  (vm-flags  VM_IOREMAP) 
 + (vm-flags  VM_UNICORE_SECTION_MAPPING))
 + unmap_area_sections((unsigned long)vm-addr, vm-size);

   vunmap(addr);
  }
 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
 index 78fe3f1..9a1e658 100644
 --- a/arch/x86/mm/ioremap.c
 +++ b/arch/x86/mm/ioremap.c
 @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
  in parallel. Reuse of the virtual address is prevented by
  leaving it in the global lists until we're done with it.
  cpa takes care of the direct mappings. */
 - read_lock(vmlist_lock);
 - for (p = vmlist; p; p = p-next) {
 - if (p-addr == (void __force *)addr)
 - break;
 - }
 - read_unlock(vmlist_lock);
 + p = find_vm_area((void __force *)addr);

   if (!p) {
   printk(KERN_ERR iounmap: bad address %p\n, addr);
 --
 1.7.9.5


--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-07 Thread JoonSoo Kim
Hello, Pekka.

2012/12/7 Pekka Enberg :
> On Thu, Dec 6, 2012 at 6:09 PM, Joonsoo Kim  wrote:
>> The purpose of iterating a vmlist is finding vm area with specific
>> virtual address. find_vm_area() is provided for this purpose
>> and more efficient, because it uses a rbtree.
>> So change it.
>
> You no longer take the 'vmlist_lock'. This is safe, because...?

As Bob mentioned, find_vm_area() hold a 'vmap_area_lock' during
searching a area.
When we hold a 'vmap_area_lock', area can't be removed.
So this change is safe.

Thanks.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-07 Thread Bob Liu
On Fri, Dec 7, 2012 at 3:44 PM, Pekka Enberg  wrote:
> On Thu, Dec 6, 2012 at 6:09 PM, Joonsoo Kim  wrote:
>> The purpose of iterating a vmlist is finding vm area with specific
>> virtual address. find_vm_area() is provided for this purpose
>> and more efficient, because it uses a rbtree.
>> So change it.
>
> You no longer take the 'vmlist_lock'. This is safe, because...?
>

I think it's because find_vm_area() -> find_vmap_area() will use
vmap_area_lock instead.

>> Cc: Chris Metcalf 
>> Cc: Guan Xuetao 
>> Cc: Thomas Gleixner 
>> Cc: Ingo Molnar 
>> Cc: "H. Peter Anvin" 
>> Signed-off-by: Joonsoo Kim 
>>
>> diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
>> index de0de0c..862782d 100644
>> --- a/arch/tile/mm/pgtable.c
>> +++ b/arch/tile/mm/pgtable.c
>> @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
>>in parallel. Reuse of the virtual address is prevented by
>>leaving it in the global lists until we're done with it.
>>cpa takes care of the direct mappings. */
>> -   read_lock(_lock);
>> -   for (p = vmlist; p; p = p->next) {
>> -   if (p->addr == addr)
>> -   break;
>> -   }
>> -   read_unlock(_lock);
>> +   p = find_vm_area((void *)addr);
>>
>> if (!p) {
>> pr_err("iounmap: bad address %p\n", addr);
>> diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
>> index b7a6055..13068ee 100644
>> --- a/arch/unicore32/mm/ioremap.c
>> +++ b/arch/unicore32/mm/ioremap.c
>> @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
>>  void __uc32_iounmap(volatile void __iomem *io_addr)
>>  {
>> void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
>> -   struct vm_struct **p, *tmp;
>> +   struct vm_struct *vm;
>>
>> /*
>>  * If this is a section based mapping we need to handle it
>> @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
>>  * all the mappings before the area can be reclaimed
>>  * by someone else.
>>  */
>> -   write_lock(_lock);
>> -   for (p =  ; (tmp = *p) ; p = >next) {
>> -   if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
>> -   if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
>> -   unmap_area_sections((unsigned long)tmp->addr,
>> -   tmp->size);
>> -   }
>> -   break;
>> -   }
>> -   }
>> -   write_unlock(_lock);
>> +   vm = find_vm_area(addr);
>> +   if (vm && (vm->flags & VM_IOREMAP) &&
>> +   (vm->flags & VM_UNICORE_SECTION_MAPPING))
>> +   unmap_area_sections((unsigned long)vm->addr, vm->size);
>>
>> vunmap(addr);
>>  }
>> diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
>> index 78fe3f1..9a1e658 100644
>> --- a/arch/x86/mm/ioremap.c
>> +++ b/arch/x86/mm/ioremap.c
>> @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
>>in parallel. Reuse of the virtual address is prevented by
>>leaving it in the global lists until we're done with it.
>>cpa takes care of the direct mappings. */
>> -   read_lock(_lock);
>> -   for (p = vmlist; p; p = p->next) {
>> -   if (p->addr == (void __force *)addr)
>> -   break;
>> -   }
>> -   read_unlock(_lock);
>> +   p = find_vm_area((void __force *)addr);
>>
>> if (!p) {
>> printk(KERN_ERR "iounmap: bad address %p\n", addr);
>> --
>> 1.7.9.5

-- 
Thanks,
--Bob
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-07 Thread Bob Liu
On Fri, Dec 7, 2012 at 3:44 PM, Pekka Enberg penb...@kernel.org wrote:
 On Thu, Dec 6, 2012 at 6:09 PM, Joonsoo Kim js1...@gmail.com wrote:
 The purpose of iterating a vmlist is finding vm area with specific
 virtual address. find_vm_area() is provided for this purpose
 and more efficient, because it uses a rbtree.
 So change it.

 You no longer take the 'vmlist_lock'. This is safe, because...?


I think it's because find_vm_area() - find_vmap_area() will use
vmap_area_lock instead.

 Cc: Chris Metcalf cmetc...@tilera.com
 Cc: Guan Xuetao g...@mprc.pku.edu.cn
 Cc: Thomas Gleixner t...@linutronix.de
 Cc: Ingo Molnar mi...@redhat.com
 Cc: H. Peter Anvin h...@zytor.com
 Signed-off-by: Joonsoo Kim js1...@gmail.com

 diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
 index de0de0c..862782d 100644
 --- a/arch/tile/mm/pgtable.c
 +++ b/arch/tile/mm/pgtable.c
 @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
 -   read_lock(vmlist_lock);
 -   for (p = vmlist; p; p = p-next) {
 -   if (p-addr == addr)
 -   break;
 -   }
 -   read_unlock(vmlist_lock);
 +   p = find_vm_area((void *)addr);

 if (!p) {
 pr_err(iounmap: bad address %p\n, addr);
 diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
 index b7a6055..13068ee 100644
 --- a/arch/unicore32/mm/ioremap.c
 +++ b/arch/unicore32/mm/ioremap.c
 @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
  void __uc32_iounmap(volatile void __iomem *io_addr)
  {
 void *addr = (void *)(PAGE_MASK  (unsigned long)io_addr);
 -   struct vm_struct **p, *tmp;
 +   struct vm_struct *vm;

 /*
  * If this is a section based mapping we need to handle it
 @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
  * all the mappings before the area can be reclaimed
  * by someone else.
  */
 -   write_lock(vmlist_lock);
 -   for (p = vmlist ; (tmp = *p) ; p = tmp-next) {
 -   if ((tmp-flags  VM_IOREMAP)  (tmp-addr == addr)) {
 -   if (tmp-flags  VM_UNICORE_SECTION_MAPPING) {
 -   unmap_area_sections((unsigned long)tmp-addr,
 -   tmp-size);
 -   }
 -   break;
 -   }
 -   }
 -   write_unlock(vmlist_lock);
 +   vm = find_vm_area(addr);
 +   if (vm  (vm-flags  VM_IOREMAP) 
 +   (vm-flags  VM_UNICORE_SECTION_MAPPING))
 +   unmap_area_sections((unsigned long)vm-addr, vm-size);

 vunmap(addr);
  }
 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
 index 78fe3f1..9a1e658 100644
 --- a/arch/x86/mm/ioremap.c
 +++ b/arch/x86/mm/ioremap.c
 @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
 -   read_lock(vmlist_lock);
 -   for (p = vmlist; p; p = p-next) {
 -   if (p-addr == (void __force *)addr)
 -   break;
 -   }
 -   read_unlock(vmlist_lock);
 +   p = find_vm_area((void __force *)addr);

 if (!p) {
 printk(KERN_ERR iounmap: bad address %p\n, addr);
 --
 1.7.9.5

-- 
Thanks,
--Bob
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-07 Thread JoonSoo Kim
Hello, Pekka.

2012/12/7 Pekka Enberg penb...@kernel.org:
 On Thu, Dec 6, 2012 at 6:09 PM, Joonsoo Kim js1...@gmail.com wrote:
 The purpose of iterating a vmlist is finding vm area with specific
 virtual address. find_vm_area() is provided for this purpose
 and more efficient, because it uses a rbtree.
 So change it.

 You no longer take the 'vmlist_lock'. This is safe, because...?

As Bob mentioned, find_vm_area() hold a 'vmap_area_lock' during
searching a area.
When we hold a 'vmap_area_lock', area can't be removed.
So this change is safe.

Thanks.
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-06 Thread Pekka Enberg
On Thu, Dec 6, 2012 at 6:09 PM, Joonsoo Kim  wrote:
> The purpose of iterating a vmlist is finding vm area with specific
> virtual address. find_vm_area() is provided for this purpose
> and more efficient, because it uses a rbtree.
> So change it.

You no longer take the 'vmlist_lock'. This is safe, because...?

> Cc: Chris Metcalf 
> Cc: Guan Xuetao 
> Cc: Thomas Gleixner 
> Cc: Ingo Molnar 
> Cc: "H. Peter Anvin" 
> Signed-off-by: Joonsoo Kim 
>
> diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
> index de0de0c..862782d 100644
> --- a/arch/tile/mm/pgtable.c
> +++ b/arch/tile/mm/pgtable.c
> @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
>in parallel. Reuse of the virtual address is prevented by
>leaving it in the global lists until we're done with it.
>cpa takes care of the direct mappings. */
> -   read_lock(_lock);
> -   for (p = vmlist; p; p = p->next) {
> -   if (p->addr == addr)
> -   break;
> -   }
> -   read_unlock(_lock);
> +   p = find_vm_area((void *)addr);
>
> if (!p) {
> pr_err("iounmap: bad address %p\n", addr);
> diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
> index b7a6055..13068ee 100644
> --- a/arch/unicore32/mm/ioremap.c
> +++ b/arch/unicore32/mm/ioremap.c
> @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
>  void __uc32_iounmap(volatile void __iomem *io_addr)
>  {
> void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
> -   struct vm_struct **p, *tmp;
> +   struct vm_struct *vm;
>
> /*
>  * If this is a section based mapping we need to handle it
> @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
>  * all the mappings before the area can be reclaimed
>  * by someone else.
>  */
> -   write_lock(_lock);
> -   for (p =  ; (tmp = *p) ; p = >next) {
> -   if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
> -   if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
> -   unmap_area_sections((unsigned long)tmp->addr,
> -   tmp->size);
> -   }
> -   break;
> -   }
> -   }
> -   write_unlock(_lock);
> +   vm = find_vm_area(addr);
> +   if (vm && (vm->flags & VM_IOREMAP) &&
> +   (vm->flags & VM_UNICORE_SECTION_MAPPING))
> +   unmap_area_sections((unsigned long)vm->addr, vm->size);
>
> vunmap(addr);
>  }
> diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
> index 78fe3f1..9a1e658 100644
> --- a/arch/x86/mm/ioremap.c
> +++ b/arch/x86/mm/ioremap.c
> @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
>in parallel. Reuse of the virtual address is prevented by
>leaving it in the global lists until we're done with it.
>cpa takes care of the direct mappings. */
> -   read_lock(_lock);
> -   for (p = vmlist; p; p = p->next) {
> -   if (p->addr == (void __force *)addr)
> -   break;
> -   }
> -   read_unlock(_lock);
> +   p = find_vm_area((void __force *)addr);
>
> if (!p) {
> printk(KERN_ERR "iounmap: bad address %p\n", addr);
> --
> 1.7.9.5
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majord...@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: mailto:"d...@kvack.org;> em...@kvack.org 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-06 Thread Joonsoo Kim
The purpose of iterating a vmlist is finding vm area with specific
virtual address. find_vm_area() is provided for this purpose
and more efficient, because it uses a rbtree.
So change it.

Cc: Chris Metcalf 
Cc: Guan Xuetao 
Cc: Thomas Gleixner 
Cc: Ingo Molnar 
Cc: "H. Peter Anvin" 
Signed-off-by: Joonsoo Kim 

diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index de0de0c..862782d 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
   in parallel. Reuse of the virtual address is prevented by
   leaving it in the global lists until we're done with it.
   cpa takes care of the direct mappings. */
-   read_lock(_lock);
-   for (p = vmlist; p; p = p->next) {
-   if (p->addr == addr)
-   break;
-   }
-   read_unlock(_lock);
+   p = find_vm_area((void *)addr);
 
if (!p) {
pr_err("iounmap: bad address %p\n", addr);
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
index b7a6055..13068ee 100644
--- a/arch/unicore32/mm/ioremap.c
+++ b/arch/unicore32/mm/ioremap.c
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
 void __uc32_iounmap(volatile void __iomem *io_addr)
 {
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
-   struct vm_struct **p, *tmp;
+   struct vm_struct *vm;
 
/*
 * If this is a section based mapping we need to handle it
@@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
 * all the mappings before the area can be reclaimed
 * by someone else.
 */
-   write_lock(_lock);
-   for (p =  ; (tmp = *p) ; p = >next) {
-   if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
-   if (tmp->flags & VM_UNICORE_SECTION_MAPPING) {
-   unmap_area_sections((unsigned long)tmp->addr,
-   tmp->size);
-   }
-   break;
-   }
-   }
-   write_unlock(_lock);
+   vm = find_vm_area(addr);
+   if (vm && (vm->flags & VM_IOREMAP) &&
+   (vm->flags & VM_UNICORE_SECTION_MAPPING))
+   unmap_area_sections((unsigned long)vm->addr, vm->size);
 
vunmap(addr);
 }
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 78fe3f1..9a1e658 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
   in parallel. Reuse of the virtual address is prevented by
   leaving it in the global lists until we're done with it.
   cpa takes care of the direct mappings. */
-   read_lock(_lock);
-   for (p = vmlist; p; p = p->next) {
-   if (p->addr == (void __force *)addr)
-   break;
-   }
-   read_unlock(_lock);
+   p = find_vm_area((void __force *)addr);
 
if (!p) {
printk(KERN_ERR "iounmap: bad address %p\n", addr);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-06 Thread Joonsoo Kim
The purpose of iterating a vmlist is finding vm area with specific
virtual address. find_vm_area() is provided for this purpose
and more efficient, because it uses a rbtree.
So change it.

Cc: Chris Metcalf cmetc...@tilera.com
Cc: Guan Xuetao g...@mprc.pku.edu.cn
Cc: Thomas Gleixner t...@linutronix.de
Cc: Ingo Molnar mi...@redhat.com
Cc: H. Peter Anvin h...@zytor.com
Signed-off-by: Joonsoo Kim js1...@gmail.com

diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index de0de0c..862782d 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
   in parallel. Reuse of the virtual address is prevented by
   leaving it in the global lists until we're done with it.
   cpa takes care of the direct mappings. */
-   read_lock(vmlist_lock);
-   for (p = vmlist; p; p = p-next) {
-   if (p-addr == addr)
-   break;
-   }
-   read_unlock(vmlist_lock);
+   p = find_vm_area((void *)addr);
 
if (!p) {
pr_err(iounmap: bad address %p\n, addr);
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
index b7a6055..13068ee 100644
--- a/arch/unicore32/mm/ioremap.c
+++ b/arch/unicore32/mm/ioremap.c
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
 void __uc32_iounmap(volatile void __iomem *io_addr)
 {
void *addr = (void *)(PAGE_MASK  (unsigned long)io_addr);
-   struct vm_struct **p, *tmp;
+   struct vm_struct *vm;
 
/*
 * If this is a section based mapping we need to handle it
@@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
 * all the mappings before the area can be reclaimed
 * by someone else.
 */
-   write_lock(vmlist_lock);
-   for (p = vmlist ; (tmp = *p) ; p = tmp-next) {
-   if ((tmp-flags  VM_IOREMAP)  (tmp-addr == addr)) {
-   if (tmp-flags  VM_UNICORE_SECTION_MAPPING) {
-   unmap_area_sections((unsigned long)tmp-addr,
-   tmp-size);
-   }
-   break;
-   }
-   }
-   write_unlock(vmlist_lock);
+   vm = find_vm_area(addr);
+   if (vm  (vm-flags  VM_IOREMAP) 
+   (vm-flags  VM_UNICORE_SECTION_MAPPING))
+   unmap_area_sections((unsigned long)vm-addr, vm-size);
 
vunmap(addr);
 }
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 78fe3f1..9a1e658 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
   in parallel. Reuse of the virtual address is prevented by
   leaving it in the global lists until we're done with it.
   cpa takes care of the direct mappings. */
-   read_lock(vmlist_lock);
-   for (p = vmlist; p; p = p-next) {
-   if (p-addr == (void __force *)addr)
-   break;
-   }
-   read_unlock(vmlist_lock);
+   p = find_vm_area((void __force *)addr);
 
if (!p) {
printk(KERN_ERR iounmap: bad address %p\n, addr);
-- 
1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [RFC PATCH 1/8] mm, vmalloc: change iterating a vmlist to find_vm_area()

2012-12-06 Thread Pekka Enberg
On Thu, Dec 6, 2012 at 6:09 PM, Joonsoo Kim js1...@gmail.com wrote:
 The purpose of iterating a vmlist is finding vm area with specific
 virtual address. find_vm_area() is provided for this purpose
 and more efficient, because it uses a rbtree.
 So change it.

You no longer take the 'vmlist_lock'. This is safe, because...?

 Cc: Chris Metcalf cmetc...@tilera.com
 Cc: Guan Xuetao g...@mprc.pku.edu.cn
 Cc: Thomas Gleixner t...@linutronix.de
 Cc: Ingo Molnar mi...@redhat.com
 Cc: H. Peter Anvin h...@zytor.com
 Signed-off-by: Joonsoo Kim js1...@gmail.com

 diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
 index de0de0c..862782d 100644
 --- a/arch/tile/mm/pgtable.c
 +++ b/arch/tile/mm/pgtable.c
 @@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
 -   read_lock(vmlist_lock);
 -   for (p = vmlist; p; p = p-next) {
 -   if (p-addr == addr)
 -   break;
 -   }
 -   read_unlock(vmlist_lock);
 +   p = find_vm_area((void *)addr);

 if (!p) {
 pr_err(iounmap: bad address %p\n, addr);
 diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
 index b7a6055..13068ee 100644
 --- a/arch/unicore32/mm/ioremap.c
 +++ b/arch/unicore32/mm/ioremap.c
 @@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
  void __uc32_iounmap(volatile void __iomem *io_addr)
  {
 void *addr = (void *)(PAGE_MASK  (unsigned long)io_addr);
 -   struct vm_struct **p, *tmp;
 +   struct vm_struct *vm;

 /*
  * If this is a section based mapping we need to handle it
 @@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
  * all the mappings before the area can be reclaimed
  * by someone else.
  */
 -   write_lock(vmlist_lock);
 -   for (p = vmlist ; (tmp = *p) ; p = tmp-next) {
 -   if ((tmp-flags  VM_IOREMAP)  (tmp-addr == addr)) {
 -   if (tmp-flags  VM_UNICORE_SECTION_MAPPING) {
 -   unmap_area_sections((unsigned long)tmp-addr,
 -   tmp-size);
 -   }
 -   break;
 -   }
 -   }
 -   write_unlock(vmlist_lock);
 +   vm = find_vm_area(addr);
 +   if (vm  (vm-flags  VM_IOREMAP) 
 +   (vm-flags  VM_UNICORE_SECTION_MAPPING))
 +   unmap_area_sections((unsigned long)vm-addr, vm-size);

 vunmap(addr);
  }
 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
 index 78fe3f1..9a1e658 100644
 --- a/arch/x86/mm/ioremap.c
 +++ b/arch/x86/mm/ioremap.c
 @@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
in parallel. Reuse of the virtual address is prevented by
leaving it in the global lists until we're done with it.
cpa takes care of the direct mappings. */
 -   read_lock(vmlist_lock);
 -   for (p = vmlist; p; p = p-next) {
 -   if (p-addr == (void __force *)addr)
 -   break;
 -   }
 -   read_unlock(vmlist_lock);
 +   p = find_vm_area((void __force *)addr);

 if (!p) {
 printk(KERN_ERR iounmap: bad address %p\n, addr);
 --
 1.7.9.5

 --
 To unsubscribe, send a message with 'unsubscribe linux-mm' in
 the body to majord...@kvack.org.  For more info on Linux MM,
 see: http://www.linux-mm.org/ .
 Don't email: a href=mailto:d...@kvack.org; em...@kvack.org /a
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/