Re: [PATCH 9/9] mm, hugetlb: decrement reserve count if VM_NORESERVE alloc page cache

2013-07-15 Thread Aneesh Kumar K.V
Joonsoo Kim  writes:

> If a vma with VM_NORESERVE allocate a new page for page cache, we should
> check whether this area is reserved or not. If this address is
> already reserved by other process(in case of chg == 0), we should
> decrement reserve count, because this allocated page will go into page
> cache and currently, there is no way to know that this page comes from
> reserved pool or not when releasing inode. This may introduce
> over-counting problem to reserved count. With following example code,
> you can easily reproduce this situation.
>
> size = 20 * MB;
> flag = MAP_SHARED;
> p = mmap(NULL, size, PROT_READ|PROT_WRITE, flag, fd, 0);
> if (p == MAP_FAILED) {
> fprintf(stderr, "mmap() failed: %s\n", strerror(errno));
> return -1;
> }
>
> flag = MAP_SHARED | MAP_NORESERVE;
> q = mmap(NULL, size, PROT_READ|PROT_WRITE, flag, fd, 0);
> if (q == MAP_FAILED) {
> fprintf(stderr, "mmap() failed: %s\n", strerror(errno));
> }
> q[0] = 'c';
>
> This patch solve this problem.
>
> Signed-off-by: Joonsoo Kim 

Reviewed-by: Aneesh Kumar K.V 

>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index ed2d0af..defb180 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -443,10 +443,23 @@ void reset_vma_resv_huge_pages(struct vm_area_struct 
> *vma)
>  }
>
>  /* Returns true if the VMA has associated reserve pages */
> -static int vma_has_reserves(struct vm_area_struct *vma)
> +static int vma_has_reserves(struct vm_area_struct *vma, long chg)
>  {
> - if (vma->vm_flags & VM_NORESERVE)
> - return 0;
> + if (vma->vm_flags & VM_NORESERVE) {
> + /*
> +  * This address is already reserved by other process(chg == 0),
> +  * so, we should decreament reserved count. Without
> +  * decreamenting, reserve count is remained after releasing
> +  * inode, because this allocated page will go into page cache
> +  * and is regarded as coming from reserved pool in releasing
> +  * step. Currently, we don't have any other solution to deal
> +  * with this situation properly, so add work-around here.
> +  */
> + if (vma->vm_flags & VM_MAYSHARE && chg == 0)
> + return 1;
> + else
> + return 0;
> + }
>
>   /* Shared mappings always use reserves */
>   if (vma->vm_flags & VM_MAYSHARE)
> @@ -520,7 +533,8 @@ static struct page *dequeue_huge_page_node(struct hstate 
> *h, int nid)
>
>  static struct page *dequeue_huge_page_vma(struct hstate *h,
>   struct vm_area_struct *vma,
> - unsigned long address, int avoid_reserve)
> + unsigned long address, int avoid_reserve,
> + long chg)
>  {
>   struct page *page = NULL;
>   struct mempolicy *mpol;
> @@ -535,7 +549,7 @@ static struct page *dequeue_huge_page_vma(struct hstate 
> *h,
>* have no page reserves. This check ensures that reservations are
>* not "stolen". The child may still get SIGKILLed
>*/
> - if (!vma_has_reserves(vma) &&
> + if (!vma_has_reserves(vma, chg) &&
>   h->free_huge_pages - h->resv_huge_pages == 0)
>   return NULL;
>
> @@ -553,8 +567,12 @@ retry_cpuset:
>   if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
>   page = dequeue_huge_page_node(h, zone_to_nid(zone));
>   if (page) {
> - if (!avoid_reserve && vma_has_reserves(vma))
> - h->resv_huge_pages--;
> + if (avoid_reserve)
> + break;
> + if (!vma_has_reserves(vma, chg))
> + break;
> +
> + h->resv_huge_pages--;
>   break;
>   }
>   }
> @@ -1139,7 +1157,7 @@ static struct page *alloc_huge_page(struct 
> vm_area_struct *vma,
>   }
>
>   spin_lock(_lock);
> - page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
> + page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
>   if (!page) {
>   spin_unlock(_lock);
>   page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
> -- 
> 1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 9/9] mm, hugetlb: decrement reserve count if VM_NORESERVE alloc page cache

2013-07-15 Thread Aneesh Kumar K.V
Joonsoo Kim iamjoonsoo@lge.com writes:

 If a vma with VM_NORESERVE allocate a new page for page cache, we should
 check whether this area is reserved or not. If this address is
 already reserved by other process(in case of chg == 0), we should
 decrement reserve count, because this allocated page will go into page
 cache and currently, there is no way to know that this page comes from
 reserved pool or not when releasing inode. This may introduce
 over-counting problem to reserved count. With following example code,
 you can easily reproduce this situation.

 size = 20 * MB;
 flag = MAP_SHARED;
 p = mmap(NULL, size, PROT_READ|PROT_WRITE, flag, fd, 0);
 if (p == MAP_FAILED) {
 fprintf(stderr, mmap() failed: %s\n, strerror(errno));
 return -1;
 }

 flag = MAP_SHARED | MAP_NORESERVE;
 q = mmap(NULL, size, PROT_READ|PROT_WRITE, flag, fd, 0);
 if (q == MAP_FAILED) {
 fprintf(stderr, mmap() failed: %s\n, strerror(errno));
 }
 q[0] = 'c';

 This patch solve this problem.

 Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com

Reviewed-by: Aneesh Kumar K.V aneesh.ku...@linux.vnet.ibm.com


 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
 index ed2d0af..defb180 100644
 --- a/mm/hugetlb.c
 +++ b/mm/hugetlb.c
 @@ -443,10 +443,23 @@ void reset_vma_resv_huge_pages(struct vm_area_struct 
 *vma)
  }

  /* Returns true if the VMA has associated reserve pages */
 -static int vma_has_reserves(struct vm_area_struct *vma)
 +static int vma_has_reserves(struct vm_area_struct *vma, long chg)
  {
 - if (vma-vm_flags  VM_NORESERVE)
 - return 0;
 + if (vma-vm_flags  VM_NORESERVE) {
 + /*
 +  * This address is already reserved by other process(chg == 0),
 +  * so, we should decreament reserved count. Without
 +  * decreamenting, reserve count is remained after releasing
 +  * inode, because this allocated page will go into page cache
 +  * and is regarded as coming from reserved pool in releasing
 +  * step. Currently, we don't have any other solution to deal
 +  * with this situation properly, so add work-around here.
 +  */
 + if (vma-vm_flags  VM_MAYSHARE  chg == 0)
 + return 1;
 + else
 + return 0;
 + }

   /* Shared mappings always use reserves */
   if (vma-vm_flags  VM_MAYSHARE)
 @@ -520,7 +533,8 @@ static struct page *dequeue_huge_page_node(struct hstate 
 *h, int nid)

  static struct page *dequeue_huge_page_vma(struct hstate *h,
   struct vm_area_struct *vma,
 - unsigned long address, int avoid_reserve)
 + unsigned long address, int avoid_reserve,
 + long chg)
  {
   struct page *page = NULL;
   struct mempolicy *mpol;
 @@ -535,7 +549,7 @@ static struct page *dequeue_huge_page_vma(struct hstate 
 *h,
* have no page reserves. This check ensures that reservations are
* not stolen. The child may still get SIGKILLed
*/
 - if (!vma_has_reserves(vma) 
 + if (!vma_has_reserves(vma, chg) 
   h-free_huge_pages - h-resv_huge_pages == 0)
   return NULL;

 @@ -553,8 +567,12 @@ retry_cpuset:
   if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
   page = dequeue_huge_page_node(h, zone_to_nid(zone));
   if (page) {
 - if (!avoid_reserve  vma_has_reserves(vma))
 - h-resv_huge_pages--;
 + if (avoid_reserve)
 + break;
 + if (!vma_has_reserves(vma, chg))
 + break;
 +
 + h-resv_huge_pages--;
   break;
   }
   }
 @@ -1139,7 +1157,7 @@ static struct page *alloc_huge_page(struct 
 vm_area_struct *vma,
   }

   spin_lock(hugetlb_lock);
 - page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
 + page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
   if (!page) {
   spin_unlock(hugetlb_lock);
   page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
 -- 
 1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/