Re: [PATCH 8/9] mm/rmap: use rmap_walk() in page_referenced()

2013-12-02 Thread Naoya Horiguchi
On Thu, Nov 28, 2013 at 04:48:45PM +0900, Joonsoo Kim wrote:
> Now, we have an infrastructure in rmap_walk() to handle difference
> from variants of rmap traversing functions.
> 
> So, just use it in page_referenced().
> 
> In this patch, I change following things.
> 
> 1. remove some variants of rmap traversing functions.
>   cf> page_referenced_ksm, page_referenced_anon,
>   page_referenced_file
> 2. introduce new struct page_referenced_arg and pass it to
> page_referenced_one(), main function of rmap_walk, in order to
> count reference, to store vm_flags and to check finish condition.
> 3. mechanical change to use rmap_walk() in page_referenced().
> 
> Signed-off-by: Joonsoo Kim 

Reviewed-by: Naoya Horiguchi 
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH 8/9] mm/rmap: use rmap_walk() in page_referenced()

2013-12-02 Thread Naoya Horiguchi
On Thu, Nov 28, 2013 at 04:48:45PM +0900, Joonsoo Kim wrote:
 Now, we have an infrastructure in rmap_walk() to handle difference
 from variants of rmap traversing functions.
 
 So, just use it in page_referenced().
 
 In this patch, I change following things.
 
 1. remove some variants of rmap traversing functions.
   cf page_referenced_ksm, page_referenced_anon,
   page_referenced_file
 2. introduce new struct page_referenced_arg and pass it to
 page_referenced_one(), main function of rmap_walk, in order to
 count reference, to store vm_flags and to check finish condition.
 3. mechanical change to use rmap_walk() in page_referenced().
 
 Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com

Reviewed-by: Naoya Horiguchi n-horigu...@ah.jp.nec.com
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 8/9] mm/rmap: use rmap_walk() in page_referenced()

2013-11-27 Thread Joonsoo Kim
Now, we have an infrastructure in rmap_walk() to handle difference
from variants of rmap traversing functions.

So, just use it in page_referenced().

In this patch, I change following things.

1. remove some variants of rmap traversing functions.
cf> page_referenced_ksm, page_referenced_anon,
page_referenced_file
2. introduce new struct page_referenced_arg and pass it to
page_referenced_one(), main function of rmap_walk, in order to
count reference, to store vm_flags and to check finish condition.
3. mechanical change to use rmap_walk() in page_referenced().

Signed-off-by: Joonsoo Kim 

diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 91b9719..3be6bb1 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -73,8 +73,6 @@ static inline void set_page_stable_node(struct page *page,
 struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
 
-int page_referenced_ksm(struct page *page,
-   struct mem_cgroup *memcg, unsigned long *vm_flags);
 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
 void ksm_migrate_page(struct page *newpage, struct page *oldpage);
 
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index d641f6d..e529ba3 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -184,7 +184,7 @@ static inline void page_dup_rmap(struct page *page)
 int page_referenced(struct page *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
 int page_referenced_one(struct page *, struct vm_area_struct *,
-   unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
+   unsigned long address, void *arg);
 
 #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
 
diff --git a/mm/ksm.c b/mm/ksm.c
index 4f25cf7..4c4541b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1891,61 +1891,6 @@ struct page *ksm_might_need_to_copy(struct page *page,
return new_page;
 }
 
-int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
-   unsigned long *vm_flags)
-{
-   struct stable_node *stable_node;
-   struct rmap_item *rmap_item;
-   unsigned int mapcount = page_mapcount(page);
-   int referenced = 0;
-   int search_new_forks = 0;
-
-   VM_BUG_ON(!PageKsm(page));
-   VM_BUG_ON(!PageLocked(page));
-
-   stable_node = page_stable_node(page);
-   if (!stable_node)
-   return 0;
-again:
-   hlist_for_each_entry(rmap_item, _node->hlist, hlist) {
-   struct anon_vma *anon_vma = rmap_item->anon_vma;
-   struct anon_vma_chain *vmac;
-   struct vm_area_struct *vma;
-
-   anon_vma_lock_read(anon_vma);
-   anon_vma_interval_tree_foreach(vmac, _vma->rb_root,
-  0, ULONG_MAX) {
-   vma = vmac->vma;
-   if (rmap_item->address < vma->vm_start ||
-   rmap_item->address >= vma->vm_end)
-   continue;
-   /*
-* Initially we examine only the vma which covers this
-* rmap_item; but later, if there is still work to do,
-* we examine covering vmas in other mms: in case they
-* were forked from the original since ksmd passed.
-*/
-   if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
-   continue;
-
-   if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
-   continue;
-
-   referenced += page_referenced_one(page, vma,
-   rmap_item->address, , vm_flags);
-   if (!search_new_forks || !mapcount)
-   break;
-   }
-   anon_vma_unlock_read(anon_vma);
-   if (!mapcount)
-   goto out;
-   }
-   if (!search_new_forks++)
-   goto again;
-out:
-   return referenced;
-}
-
 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
 {
struct stable_node *stable_node;
diff --git a/mm/rmap.c b/mm/rmap.c
index 860a393..5e78d5c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -656,17 +656,23 @@ int page_mapped_in_vma(struct page *page, struct 
vm_area_struct *vma)
return 1;
 }
 
+struct page_referenced_arg {
+   int mapcount;
+   int referenced;
+   unsigned long vm_flags;
+};
+
 /*
  * Subfunctions of page_referenced: page_referenced_one called
  * repeatedly from either page_referenced_anon or page_referenced_file.
  */
 int page_referenced_one(struct page *page, struct vm_area_struct *vma,
-   unsigned long address, unsigned int *mapcount,
-   unsigned long *vm_flags)
+   

[PATCH 8/9] mm/rmap: use rmap_walk() in page_referenced()

2013-11-27 Thread Joonsoo Kim
Now, we have an infrastructure in rmap_walk() to handle difference
from variants of rmap traversing functions.

So, just use it in page_referenced().

In this patch, I change following things.

1. remove some variants of rmap traversing functions.
cf page_referenced_ksm, page_referenced_anon,
page_referenced_file
2. introduce new struct page_referenced_arg and pass it to
page_referenced_one(), main function of rmap_walk, in order to
count reference, to store vm_flags and to check finish condition.
3. mechanical change to use rmap_walk() in page_referenced().

Signed-off-by: Joonsoo Kim iamjoonsoo@lge.com

diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 91b9719..3be6bb1 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -73,8 +73,6 @@ static inline void set_page_stable_node(struct page *page,
 struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
 
-int page_referenced_ksm(struct page *page,
-   struct mem_cgroup *memcg, unsigned long *vm_flags);
 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
 void ksm_migrate_page(struct page *newpage, struct page *oldpage);
 
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index d641f6d..e529ba3 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -184,7 +184,7 @@ static inline void page_dup_rmap(struct page *page)
 int page_referenced(struct page *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
 int page_referenced_one(struct page *, struct vm_area_struct *,
-   unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
+   unsigned long address, void *arg);
 
 #define TTU_ACTION(x) ((x)  TTU_ACTION_MASK)
 
diff --git a/mm/ksm.c b/mm/ksm.c
index 4f25cf7..4c4541b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1891,61 +1891,6 @@ struct page *ksm_might_need_to_copy(struct page *page,
return new_page;
 }
 
-int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
-   unsigned long *vm_flags)
-{
-   struct stable_node *stable_node;
-   struct rmap_item *rmap_item;
-   unsigned int mapcount = page_mapcount(page);
-   int referenced = 0;
-   int search_new_forks = 0;
-
-   VM_BUG_ON(!PageKsm(page));
-   VM_BUG_ON(!PageLocked(page));
-
-   stable_node = page_stable_node(page);
-   if (!stable_node)
-   return 0;
-again:
-   hlist_for_each_entry(rmap_item, stable_node-hlist, hlist) {
-   struct anon_vma *anon_vma = rmap_item-anon_vma;
-   struct anon_vma_chain *vmac;
-   struct vm_area_struct *vma;
-
-   anon_vma_lock_read(anon_vma);
-   anon_vma_interval_tree_foreach(vmac, anon_vma-rb_root,
-  0, ULONG_MAX) {
-   vma = vmac-vma;
-   if (rmap_item-address  vma-vm_start ||
-   rmap_item-address = vma-vm_end)
-   continue;
-   /*
-* Initially we examine only the vma which covers this
-* rmap_item; but later, if there is still work to do,
-* we examine covering vmas in other mms: in case they
-* were forked from the original since ksmd passed.
-*/
-   if ((rmap_item-mm == vma-vm_mm) == search_new_forks)
-   continue;
-
-   if (memcg  !mm_match_cgroup(vma-vm_mm, memcg))
-   continue;
-
-   referenced += page_referenced_one(page, vma,
-   rmap_item-address, mapcount, vm_flags);
-   if (!search_new_forks || !mapcount)
-   break;
-   }
-   anon_vma_unlock_read(anon_vma);
-   if (!mapcount)
-   goto out;
-   }
-   if (!search_new_forks++)
-   goto again;
-out:
-   return referenced;
-}
-
 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
 {
struct stable_node *stable_node;
diff --git a/mm/rmap.c b/mm/rmap.c
index 860a393..5e78d5c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -656,17 +656,23 @@ int page_mapped_in_vma(struct page *page, struct 
vm_area_struct *vma)
return 1;
 }
 
+struct page_referenced_arg {
+   int mapcount;
+   int referenced;
+   unsigned long vm_flags;
+};
+
 /*
  * Subfunctions of page_referenced: page_referenced_one called
  * repeatedly from either page_referenced_anon or page_referenced_file.
  */
 int page_referenced_one(struct page *page, struct vm_area_struct *vma,
-   unsigned long address, unsigned int *mapcount,
-   unsigned long *vm_flags)
+