Re: [PATCH 9/9] mm/rmap: use rmap_walk() in page_mkclean()

2013-12-02 Thread Naoya Horiguchi
On Thu, Nov 28, 2013 at 04:48:46PM +0900, Joonsoo Kim wrote:
> Now, we have an infrastructure in rmap_walk() to handle difference
> from variants of rmap traversing functions.
> 
> So, just use it in page_mkclean().
> 
> In this patch, I change following things.
> 
> 1. remove some variants of rmap traversing functions.
> cf> page_mkclean_file
> 2. mechanical change to use rmap_walk() in page_mkclean().
> 
> Signed-off-by: Joonsoo Kim 

Reviewed-by: Naoya Horiguchi 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 9/9] mm/rmap: use rmap_walk() in page_mkclean()

2013-12-02 Thread Naoya Horiguchi
On Thu, Nov 28, 2013 at 04:48:46PM +0900, Joonsoo Kim wrote:
 Now, we have an infrastructure in rmap_walk() to handle difference
 from variants of rmap traversing functions.
 
 So, just use it in page_mkclean().
 
 In this patch, I change following things.
 
 1. remove some variants of rmap traversing functions.
 cf page_mkclean_file
 2. mechanical change to use rmap_walk() in page_mkclean().
 
 Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com

Reviewed-by: Naoya Horiguchi n-horigu...@ah.jp.nec.com
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 9/9] mm/rmap: use rmap_walk() in page_mkclean()

2013-11-27 Thread Joonsoo Kim
Now, we have an infrastructure in rmap_walk() to handle difference
from variants of rmap traversing functions.

So, just use it in page_mkclean().

In this patch, I change following things.

1. remove some variants of rmap traversing functions.
cf> page_mkclean_file
2. mechanical change to use rmap_walk() in page_mkclean().

Signed-off-by: Joonsoo Kim 

diff --git a/mm/rmap.c b/mm/rmap.c
index 5e78d5c..bbbc705 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -809,12 +809,13 @@ int page_referenced(struct page *page,
 }
 
 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
-   unsigned long address)
+   unsigned long address, void *arg)
 {
struct mm_struct *mm = vma->vm_mm;
pte_t *pte;
spinlock_t *ptl;
int ret = 0;
+   int *cleaned = arg;
 
pte = page_check_address(page, mm, address, , 1);
if (!pte)
@@ -833,44 +834,46 @@ static int page_mkclean_one(struct page *page, struct 
vm_area_struct *vma,
 
pte_unmap_unlock(pte, ptl);
 
-   if (ret)
+   if (ret) {
mmu_notifier_invalidate_page(mm, address);
+   (*cleaned)++;
+   }
 out:
-   return ret;
+   return SWAP_AGAIN;
 }
 
-static int page_mkclean_file(struct address_space *mapping, struct page *page)
+static int skip_vma_non_shared(struct vm_area_struct *vma, void *arg)
 {
-   pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-   struct vm_area_struct *vma;
-   int ret = 0;
-
-   BUG_ON(PageAnon(page));
+   if (vma->vm_flags & VM_SHARED)
+   return 0;
 
-   mutex_lock(>i_mmap_mutex);
-   vma_interval_tree_foreach(vma, >i_mmap, pgoff, pgoff) {
-   if (vma->vm_flags & VM_SHARED) {
-   unsigned long address = vma_address(page, vma);
-   ret += page_mkclean_one(page, vma, address);
-   }
-   }
-   mutex_unlock(>i_mmap_mutex);
-   return ret;
+   return 1;
 }
 
 int page_mkclean(struct page *page)
 {
-   int ret = 0;
+   struct address_space *mapping;
+   struct rmap_walk_control rwc;
+   int cleaned;
 
BUG_ON(!PageLocked(page));
 
-   if (page_mapped(page)) {
-   struct address_space *mapping = page_mapping(page);
-   if (mapping)
-   ret = page_mkclean_file(mapping, page);
-   }
+   if (!page_mapped(page))
+   return 0;
 
-   return ret;
+   mapping = page_mapping(page);
+   if (!mapping)
+   return 0;
+
+   memset(, 0, sizeof(rwc));
+   cleaned = 0;
+   rwc.main = page_mkclean_one;
+   rwc.arg = (void *)
+   rwc.vma_skip = skip_vma_non_shared;
+
+   rmap_walk(page, );
+
+   return cleaned;
 }
 EXPORT_SYMBOL_GPL(page_mkclean);
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 9/9] mm/rmap: use rmap_walk() in page_mkclean()

2013-11-27 Thread Joonsoo Kim
Now, we have an infrastructure in rmap_walk() to handle difference
from variants of rmap traversing functions.

So, just use it in page_mkclean().

In this patch, I change following things.

1. remove some variants of rmap traversing functions.
cf page_mkclean_file
2. mechanical change to use rmap_walk() in page_mkclean().

Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com

diff --git a/mm/rmap.c b/mm/rmap.c
index 5e78d5c..bbbc705 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -809,12 +809,13 @@ int page_referenced(struct page *page,
 }
 
 static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
-   unsigned long address)
+   unsigned long address, void *arg)
 {
struct mm_struct *mm = vma-vm_mm;
pte_t *pte;
spinlock_t *ptl;
int ret = 0;
+   int *cleaned = arg;
 
pte = page_check_address(page, mm, address, ptl, 1);
if (!pte)
@@ -833,44 +834,46 @@ static int page_mkclean_one(struct page *page, struct 
vm_area_struct *vma,
 
pte_unmap_unlock(pte, ptl);
 
-   if (ret)
+   if (ret) {
mmu_notifier_invalidate_page(mm, address);
+   (*cleaned)++;
+   }
 out:
-   return ret;
+   return SWAP_AGAIN;
 }
 
-static int page_mkclean_file(struct address_space *mapping, struct page *page)
+static int skip_vma_non_shared(struct vm_area_struct *vma, void *arg)
 {
-   pgoff_t pgoff = page-index  (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-   struct vm_area_struct *vma;
-   int ret = 0;
-
-   BUG_ON(PageAnon(page));
+   if (vma-vm_flags  VM_SHARED)
+   return 0;
 
-   mutex_lock(mapping-i_mmap_mutex);
-   vma_interval_tree_foreach(vma, mapping-i_mmap, pgoff, pgoff) {
-   if (vma-vm_flags  VM_SHARED) {
-   unsigned long address = vma_address(page, vma);
-   ret += page_mkclean_one(page, vma, address);
-   }
-   }
-   mutex_unlock(mapping-i_mmap_mutex);
-   return ret;
+   return 1;
 }
 
 int page_mkclean(struct page *page)
 {
-   int ret = 0;
+   struct address_space *mapping;
+   struct rmap_walk_control rwc;
+   int cleaned;
 
BUG_ON(!PageLocked(page));
 
-   if (page_mapped(page)) {
-   struct address_space *mapping = page_mapping(page);
-   if (mapping)
-   ret = page_mkclean_file(mapping, page);
-   }
+   if (!page_mapped(page))
+   return 0;
 
-   return ret;
+   mapping = page_mapping(page);
+   if (!mapping)
+   return 0;
+
+   memset(rwc, 0, sizeof(rwc));
+   cleaned = 0;
+   rwc.main = page_mkclean_one;
+   rwc.arg = (void *)cleaned;
+   rwc.vma_skip = skip_vma_non_shared;
+
+   rmap_walk(page, rwc);
+
+   return cleaned;
 }
 EXPORT_SYMBOL_GPL(page_mkclean);
 
-- 
1.7.9.5

--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/