On 15/08/2020 07:20, Chinwen Chang wrote:
Extend smap_gather_stats to support indicated beginning address at
which it should start gathering. To achieve the goal, we add a new
parameter @start assigned by the caller and try to refactor it for
simplicity.

If @start is 0, it will use the range of @vma for gathering.

Change since v2:
- This is a new change to make the retry behavior of smaps_rollup
- more complete as suggested by Michel [1]

[1] 
https://lore.kernel.org/lkml/cann689ftcsc71cjajs0gpspohgo_hrj+diwsou1wr98ypkt...@mail.gmail.com/

Signed-off-by: Chinwen Chang <chinwen.ch...@mediatek.com>
CC: Michel Lespinasse <wal...@google.com>
CC: Steven Price <steven.pr...@arm.com>

LGTM

Reviewed-by: Steven Price <steven.pr...@arm.com>

Steve

---
  fs/proc/task_mmu.c | 30 ++++++++++++++++++++++--------
  1 file changed, 22 insertions(+), 8 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dbda449..76e623a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -723,9 +723,21 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long 
hmask,
        .pte_hole               = smaps_pte_hole,
  };
+/*
+ * Gather mem stats from @vma with the indicated beginning
+ * address @start, and keep them in @mss.
+ *
+ * Use vm_start of @vma as the beginning address if @start is 0.
+ */
  static void smap_gather_stats(struct vm_area_struct *vma,
-                            struct mem_size_stats *mss)
+               struct mem_size_stats *mss, unsigned long start)
  {
+       const struct mm_walk_ops *ops = &smaps_walk_ops;
+
+       /* Invalid start */
+       if (start >= vma->vm_end)
+               return;
+
  #ifdef CONFIG_SHMEM
        /* In case of smaps_rollup, reset the value from previous vma */
        mss->check_shmem_swap = false;
@@ -742,18 +754,20 @@ static void smap_gather_stats(struct vm_area_struct *vma,
                 */
                unsigned long shmem_swapped = shmem_swap_usage(vma);
- if (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
-                                       !(vma->vm_flags & VM_WRITE)) {
+               if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
+                                       !(vma->vm_flags & VM_WRITE))) {
                        mss->swap += shmem_swapped;
                } else {
                        mss->check_shmem_swap = true;
-                       walk_page_vma(vma, &smaps_shmem_walk_ops, mss);
-                       return;
+                       ops = &smaps_shmem_walk_ops;
                }
        }
  #endif
        /* mmap_lock is held in m_start */
-       walk_page_vma(vma, &smaps_walk_ops, mss);
+       if (!start)
+               walk_page_vma(vma, ops, mss);
+       else
+               walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
  }
#define SEQ_PUT_DEC(str, val) \
@@ -805,7 +819,7 @@ static int show_smap(struct seq_file *m, void *v)
memset(&mss, 0, sizeof(mss)); - smap_gather_stats(vma, &mss);
+       smap_gather_stats(vma, &mss, 0);
show_map_vma(m, vma); @@ -854,7 +868,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
        hold_task_mempolicy(priv);
for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
-               smap_gather_stats(vma, &mss);
+               smap_gather_stats(vma, &mss, 0);
                last_vma_end = vma->vm_end;
        }

Reply via email to