[PATCH 5/7] mm rmap: remove vma_address check for address inside vma

2012-09-04 Thread Michel Lespinasse
In file and anon rmap, we use interval trees to find potentially relevant
vmas and then call vma_address() to find the virtual address the given
page might be found at in these vmas. vma_address() used to include a
check that the returned address falls within the limits of the vma, but
this check isn't necessary now that we always use interval trees in rmap:
the interval tree just doesn't return any vmas which this check would find
to be irrelevant. As a result, we can replace the use of -EFAULT error code
(which then needed to be checked in every call site) with a VM_BUG_ON().

Signed-off-by: Michel Lespinasse 
---
 mm/huge_memory.c |4 
 mm/rmap.c|   48 +---
 2 files changed, 21 insertions(+), 31 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index fe119cb71b41..91b65f962320 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1432,8 +1432,6 @@ static void __split_huge_page(struct page *page,
struct vm_area_struct *vma = avc->vma;
unsigned long addr = vma_address(page, vma);
BUG_ON(is_vma_temporary_stack(vma));
-   if (addr == -EFAULT)
-   continue;
mapcount += __split_huge_page_splitting(page, vma, addr);
}
/*
@@ -1458,8 +1456,6 @@ static void __split_huge_page(struct page *page,
struct vm_area_struct *vma = avc->vma;
unsigned long addr = vma_address(page, vma);
BUG_ON(is_vma_temporary_stack(vma));
-   if (addr == -EFAULT)
-   continue;
mapcount2 += __split_huge_page_map(page, vma, addr);
}
if (mapcount != mapcount2)
diff --git a/mm/rmap.c b/mm/rmap.c
index 9c61bf387fd1..28777412de62 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -510,22 +510,26 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma)
 
 /*
  * At what user virtual address is page expected in @vma?
- * Returns virtual address or -EFAULT if page's index/offset is not
- * within the range mapped the @vma.
  */
-inline unsigned long
-vma_address(struct page *page, struct vm_area_struct *vma)
+static inline unsigned long
+__vma_address(struct page *page, struct vm_area_struct *vma)
 {
pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-   unsigned long address;
 
if (unlikely(is_vm_hugetlb_page(vma)))
pgoff = page->index << huge_page_order(page_hstate(page));
-   address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
-   if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
-   /* page should be within @vma mapping range */
-   return -EFAULT;
-   }
+
+   return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+}
+
+inline unsigned long
+vma_address(struct page *page, struct vm_area_struct *vma)
+{
+   unsigned long address = __vma_address(page, vma);
+
+   /* page should be within @vma mapping range */
+   VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+
return address;
 }
 
@@ -535,6 +539,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
  */
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct 
*vma)
 {
+   unsigned long address;
if (PageAnon(page)) {
struct anon_vma *page__anon_vma = page_anon_vma(page);
/*
@@ -550,7 +555,10 @@ unsigned long page_address_in_vma(struct page *page, 
struct vm_area_struct *vma)
return -EFAULT;
} else
return -EFAULT;
-   return vma_address(page, vma);
+   address = __vma_address(page, vma);
+   if (unlikely(address < vma->vm_start || address >= vma->vm_end))
+   return -EFAULT;
+   return address;
 }
 
 /*
@@ -624,8 +632,8 @@ int page_mapped_in_vma(struct page *page, struct 
vm_area_struct *vma)
pte_t *pte;
spinlock_t *ptl;
 
-   address = vma_address(page, vma);
-   if (address == -EFAULT) /* out of vma range */
+   address = __vma_address(page, vma);
+   if (unlikely(address < vma->vm_start || address >= vma->vm_end))
return 0;
pte = page_check_address(page, vma->vm_mm, address, , 1);
if (!pte)   /* the page is not in this mm */
@@ -732,8 +740,6 @@ static int page_referenced_anon(struct page *page,
anon_vma_interval_tree_foreach(avc, _vma->rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_address(page, vma);
-   if (address == -EFAULT)
-   continue;
/*
 * If we are reclaiming on behalf of a cgroup, skip
 * counting on behalf of references from different
@@ -799,8 +805,6 @@ static int page_referenced_file(struct page *page,
 

[PATCH 5/7] mm rmap: remove vma_address check for address inside vma

2012-09-04 Thread Michel Lespinasse
In file and anon rmap, we use interval trees to find potentially relevant
vmas and then call vma_address() to find the virtual address the given
page might be found at in these vmas. vma_address() used to include a
check that the returned address falls within the limits of the vma, but
this check isn't necessary now that we always use interval trees in rmap:
the interval tree just doesn't return any vmas which this check would find
to be irrelevant. As a result, we can replace the use of -EFAULT error code
(which then needed to be checked in every call site) with a VM_BUG_ON().

Signed-off-by: Michel Lespinasse wal...@google.com
---
 mm/huge_memory.c |4 
 mm/rmap.c|   48 +---
 2 files changed, 21 insertions(+), 31 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index fe119cb71b41..91b65f962320 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1432,8 +1432,6 @@ static void __split_huge_page(struct page *page,
struct vm_area_struct *vma = avc-vma;
unsigned long addr = vma_address(page, vma);
BUG_ON(is_vma_temporary_stack(vma));
-   if (addr == -EFAULT)
-   continue;
mapcount += __split_huge_page_splitting(page, vma, addr);
}
/*
@@ -1458,8 +1456,6 @@ static void __split_huge_page(struct page *page,
struct vm_area_struct *vma = avc-vma;
unsigned long addr = vma_address(page, vma);
BUG_ON(is_vma_temporary_stack(vma));
-   if (addr == -EFAULT)
-   continue;
mapcount2 += __split_huge_page_map(page, vma, addr);
}
if (mapcount != mapcount2)
diff --git a/mm/rmap.c b/mm/rmap.c
index 9c61bf387fd1..28777412de62 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -510,22 +510,26 @@ void page_unlock_anon_vma(struct anon_vma *anon_vma)
 
 /*
  * At what user virtual address is page expected in @vma?
- * Returns virtual address or -EFAULT if page's index/offset is not
- * within the range mapped the @vma.
  */
-inline unsigned long
-vma_address(struct page *page, struct vm_area_struct *vma)
+static inline unsigned long
+__vma_address(struct page *page, struct vm_area_struct *vma)
 {
pgoff_t pgoff = page-index  (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-   unsigned long address;
 
if (unlikely(is_vm_hugetlb_page(vma)))
pgoff = page-index  huge_page_order(page_hstate(page));
-   address = vma-vm_start + ((pgoff - vma-vm_pgoff)  PAGE_SHIFT);
-   if (unlikely(address  vma-vm_start || address = vma-vm_end)) {
-   /* page should be within @vma mapping range */
-   return -EFAULT;
-   }
+
+   return vma-vm_start + ((pgoff - vma-vm_pgoff)  PAGE_SHIFT);
+}
+
+inline unsigned long
+vma_address(struct page *page, struct vm_area_struct *vma)
+{
+   unsigned long address = __vma_address(page, vma);
+
+   /* page should be within @vma mapping range */
+   VM_BUG_ON(address  vma-vm_start || address = vma-vm_end);
+
return address;
 }
 
@@ -535,6 +539,7 @@ vma_address(struct page *page, struct vm_area_struct *vma)
  */
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct 
*vma)
 {
+   unsigned long address;
if (PageAnon(page)) {
struct anon_vma *page__anon_vma = page_anon_vma(page);
/*
@@ -550,7 +555,10 @@ unsigned long page_address_in_vma(struct page *page, 
struct vm_area_struct *vma)
return -EFAULT;
} else
return -EFAULT;
-   return vma_address(page, vma);
+   address = __vma_address(page, vma);
+   if (unlikely(address  vma-vm_start || address = vma-vm_end))
+   return -EFAULT;
+   return address;
 }
 
 /*
@@ -624,8 +632,8 @@ int page_mapped_in_vma(struct page *page, struct 
vm_area_struct *vma)
pte_t *pte;
spinlock_t *ptl;
 
-   address = vma_address(page, vma);
-   if (address == -EFAULT) /* out of vma range */
+   address = __vma_address(page, vma);
+   if (unlikely(address  vma-vm_start || address = vma-vm_end))
return 0;
pte = page_check_address(page, vma-vm_mm, address, ptl, 1);
if (!pte)   /* the page is not in this mm */
@@ -732,8 +740,6 @@ static int page_referenced_anon(struct page *page,
anon_vma_interval_tree_foreach(avc, anon_vma-rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc-vma;
unsigned long address = vma_address(page, vma);
-   if (address == -EFAULT)
-   continue;
/*
 * If we are reclaiming on behalf of a cgroup, skip
 * counting on behalf of references from different
@@ -799,8 +805,6 @@ static int page_referenced_file(struct page *page,
 
vma_interval_tree_foreach(vma,