Re: [PATCH v4 3/4] mm, shmem: Add shmem resident memory accounting

2015-10-04 Thread Hugh Dickins
On Fri, 2 Oct 2015, Vlastimil Babka wrote:

> From: Jerome Marchand 
> 
> Currently looking at /proc//status or statm, there is no way to
> distinguish shmem pages from pages mapped to a regular file (shmem
> pages are mapped to /dev/zero), even though their implication in
> actual memory use is quite different.
> This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
> shmem pages instead of MM_FILEPAGES.
> 
> Signed-off-by: Jerome Marchand 
> Signed-off-by: Vlastimil Babka 
> Acked-by: Konstantin Khlebnikov 
> Acked-by: Michal Hocko 

Acked-by: Hugh Dickins 

Good, this one long overdue, I've grown tired for writing those if/elses.
I'd have probably have done without mm_counter_file(), but it's okay.

> ---
>  arch/s390/mm/pgtable.c   |  5 +
>  fs/proc/task_mmu.c   |  3 ++-
>  include/linux/mm.h   | 18 +-
>  include/linux/mm_types.h |  7 ---
>  kernel/events/uprobes.c  |  2 +-
>  mm/memory.c  | 30 ++
>  mm/oom_kill.c|  5 +++--
>  mm/rmap.c| 12 +++-
>  8 files changed, 41 insertions(+), 41 deletions(-)
> 
> diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
> index 54ef3bc..9816f25 100644
> --- a/arch/s390/mm/pgtable.c
> +++ b/arch/s390/mm/pgtable.c
> @@ -603,10 +603,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, 
> struct mm_struct *mm)
>   else if (is_migration_entry(entry)) {
>   struct page *page = migration_entry_to_page(entry);
>  
> - if (PageAnon(page))
> - dec_mm_counter(mm, MM_ANONPAGES);
> - else
> - dec_mm_counter(mm, MM_FILEPAGES);
> + dec_mm_counter(mm, mm_counter(page));
>   }
>   free_swap_and_cache(entry);
>  }
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 103457c..9b9708e 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -83,7 +83,8 @@ unsigned long task_statm(struct mm_struct *mm,
>unsigned long *shared, unsigned long *text,
>unsigned long *data, unsigned long *resident)
>  {
> - *shared = get_mm_counter(mm, MM_FILEPAGES);
> + *shared = get_mm_counter(mm, MM_FILEPAGES) +
> + get_mm_counter(mm, MM_SHMEMPAGES);
>   *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>   >> PAGE_SHIFT;
>   *data = mm->total_vm - mm->shared_vm;
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index d30eea3..8be4efc 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1361,10 +1361,26 @@ static inline void dec_mm_counter(struct mm_struct 
> *mm, int member)
>   atomic_long_dec(>rss_stat.count[member]);
>  }
>  
> +/* Optimized variant when page is already known not to be PageAnon */
> +static inline int mm_counter_file(struct page *page)
> +{
> + if (PageSwapBacked(page))
> + return MM_SHMEMPAGES;
> + return MM_FILEPAGES;
> +}
> +
> +static inline int mm_counter(struct page *page)
> +{
> + if (PageAnon(page))
> + return MM_ANONPAGES;
> + return mm_counter_file(page);
> +}
> +
>  static inline unsigned long get_mm_rss(struct mm_struct *mm)
>  {
>   return get_mm_counter(mm, MM_FILEPAGES) +
> - get_mm_counter(mm, MM_ANONPAGES);
> + get_mm_counter(mm, MM_ANONPAGES) +
> + get_mm_counter(mm, MM_SHMEMPAGES);
>  }
>  
>  static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index f8d1492..207890b 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -369,9 +369,10 @@ struct core_state {
>  };
>  
>  enum {
> - MM_FILEPAGES,
> - MM_ANONPAGES,
> - MM_SWAPENTS,
> + MM_FILEPAGES,   /* Resident file mapping pages */
> + MM_ANONPAGES,   /* Resident anonymous pages */
> + MM_SWAPENTS,/* Anonymous swap entries */
> + MM_SHMEMPAGES,  /* Resident shared memory pages */
>   NR_MM_COUNTERS
>  };
>  
> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> index 4e5e979..6288606 100644
> --- a/kernel/events/uprobes.c
> +++ b/kernel/events/uprobes.c
> @@ -180,7 +180,7 @@ static int __replace_page(struct vm_area_struct *vma, 
> unsigned long addr,
>   lru_cache_add_active_or_unevictable(kpage, vma);
>  
>   if (!PageAnon(page)) {
> - dec_mm_counter(mm, MM_FILEPAGES);
> + dec_mm_counter(mm, mm_counter_file(page));
>   inc_mm_counter(mm, MM_ANONPAGES);
>   }
>  
> diff --git a/mm/memory.c b/mm/memory.c
> index 3bd465a..f10d458 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -832,10 +832,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct 
> *src_mm,
>   } else if (is_migration_entry(entry)) {
>   page = migration_entry_to_page(entry);
>  
> - 

Re: [PATCH v4 3/4] mm, shmem: Add shmem resident memory accounting

2015-10-04 Thread Hugh Dickins
On Fri, 2 Oct 2015, Vlastimil Babka wrote:

> From: Jerome Marchand 
> 
> Currently looking at /proc//status or statm, there is no way to
> distinguish shmem pages from pages mapped to a regular file (shmem
> pages are mapped to /dev/zero), even though their implication in
> actual memory use is quite different.
> This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
> shmem pages instead of MM_FILEPAGES.
> 
> Signed-off-by: Jerome Marchand 
> Signed-off-by: Vlastimil Babka 
> Acked-by: Konstantin Khlebnikov 
> Acked-by: Michal Hocko 

Acked-by: Hugh Dickins 

Good, this one long overdue, I've grown tired for writing those if/elses.
I'd have probably have done without mm_counter_file(), but it's okay.

> ---
>  arch/s390/mm/pgtable.c   |  5 +
>  fs/proc/task_mmu.c   |  3 ++-
>  include/linux/mm.h   | 18 +-
>  include/linux/mm_types.h |  7 ---
>  kernel/events/uprobes.c  |  2 +-
>  mm/memory.c  | 30 ++
>  mm/oom_kill.c|  5 +++--
>  mm/rmap.c| 12 +++-
>  8 files changed, 41 insertions(+), 41 deletions(-)
> 
> diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
> index 54ef3bc..9816f25 100644
> --- a/arch/s390/mm/pgtable.c
> +++ b/arch/s390/mm/pgtable.c
> @@ -603,10 +603,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, 
> struct mm_struct *mm)
>   else if (is_migration_entry(entry)) {
>   struct page *page = migration_entry_to_page(entry);
>  
> - if (PageAnon(page))
> - dec_mm_counter(mm, MM_ANONPAGES);
> - else
> - dec_mm_counter(mm, MM_FILEPAGES);
> + dec_mm_counter(mm, mm_counter(page));
>   }
>   free_swap_and_cache(entry);
>  }
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 103457c..9b9708e 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -83,7 +83,8 @@ unsigned long task_statm(struct mm_struct *mm,
>unsigned long *shared, unsigned long *text,
>unsigned long *data, unsigned long *resident)
>  {
> - *shared = get_mm_counter(mm, MM_FILEPAGES);
> + *shared = get_mm_counter(mm, MM_FILEPAGES) +
> + get_mm_counter(mm, MM_SHMEMPAGES);
>   *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>   >> PAGE_SHIFT;
>   *data = mm->total_vm - mm->shared_vm;
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index d30eea3..8be4efc 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1361,10 +1361,26 @@ static inline void dec_mm_counter(struct mm_struct 
> *mm, int member)
>   atomic_long_dec(>rss_stat.count[member]);
>  }
>  
> +/* Optimized variant when page is already known not to be PageAnon */
> +static inline int mm_counter_file(struct page *page)
> +{
> + if (PageSwapBacked(page))
> + return MM_SHMEMPAGES;
> + return MM_FILEPAGES;
> +}
> +
> +static inline int mm_counter(struct page *page)
> +{
> + if (PageAnon(page))
> + return MM_ANONPAGES;
> + return mm_counter_file(page);
> +}
> +
>  static inline unsigned long get_mm_rss(struct mm_struct *mm)
>  {
>   return get_mm_counter(mm, MM_FILEPAGES) +
> - get_mm_counter(mm, MM_ANONPAGES);
> + get_mm_counter(mm, MM_ANONPAGES) +
> + get_mm_counter(mm, MM_SHMEMPAGES);
>  }
>  
>  static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index f8d1492..207890b 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -369,9 +369,10 @@ struct core_state {
>  };
>  
>  enum {
> - MM_FILEPAGES,
> - MM_ANONPAGES,
> - MM_SWAPENTS,
> + MM_FILEPAGES,   /* Resident file mapping pages */
> + MM_ANONPAGES,   /* Resident anonymous pages */
> + MM_SWAPENTS,/* Anonymous swap entries */
> + MM_SHMEMPAGES,  /* Resident shared memory pages */
>   NR_MM_COUNTERS
>  };
>  
> diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
> index 4e5e979..6288606 100644
> --- a/kernel/events/uprobes.c
> +++ b/kernel/events/uprobes.c
> @@ -180,7 +180,7 @@ static int __replace_page(struct vm_area_struct *vma, 
> unsigned long addr,
>   lru_cache_add_active_or_unevictable(kpage, vma);
>  
>   if (!PageAnon(page)) {
> - dec_mm_counter(mm, MM_FILEPAGES);
> + dec_mm_counter(mm, mm_counter_file(page));
>   inc_mm_counter(mm, MM_ANONPAGES);
>   }
>  
> diff --git a/mm/memory.c b/mm/memory.c
> index 3bd465a..f10d458 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -832,10 +832,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct 
> *src_mm,
>   } else if 

Re: [PATCH v4 3/4] mm, shmem: Add shmem resident memory accounting

2015-10-02 Thread Andrew Morton
On Fri,  2 Oct 2015 15:35:50 +0200 Vlastimil Babka  wrote:

> From: Jerome Marchand 

Changelog is a bit weird.

> Currently looking at /proc//status or statm, there is no way to
> distinguish shmem pages from pages mapped to a regular file (shmem
> pages are mapped to /dev/zero), even though their implication in
> actual memory use is quite different.

OK, that's a bunch of stuff about the user interface.

> This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
> shmem pages instead of MM_FILEPAGES.

And that has nothing to do with the user interface.

So now this little reader is all confused.  The patch doesn't actually
address the described problem at all, does it?  It's preparatory stuff
only?  No changes to the kernel's user interface?


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH v4 3/4] mm, shmem: Add shmem resident memory accounting

2015-10-02 Thread Vlastimil Babka
From: Jerome Marchand 

Currently looking at /proc//status or statm, there is no way to
distinguish shmem pages from pages mapped to a regular file (shmem
pages are mapped to /dev/zero), even though their implication in
actual memory use is quite different.
This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
shmem pages instead of MM_FILEPAGES.

Signed-off-by: Jerome Marchand 
Signed-off-by: Vlastimil Babka 
Acked-by: Konstantin Khlebnikov 
Acked-by: Michal Hocko 
---
 arch/s390/mm/pgtable.c   |  5 +
 fs/proc/task_mmu.c   |  3 ++-
 include/linux/mm.h   | 18 +-
 include/linux/mm_types.h |  7 ---
 kernel/events/uprobes.c  |  2 +-
 mm/memory.c  | 30 ++
 mm/oom_kill.c|  5 +++--
 mm/rmap.c| 12 +++-
 8 files changed, 41 insertions(+), 41 deletions(-)

diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 54ef3bc..9816f25 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -603,10 +603,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, struct 
mm_struct *mm)
else if (is_migration_entry(entry)) {
struct page *page = migration_entry_to_page(entry);
 
-   if (PageAnon(page))
-   dec_mm_counter(mm, MM_ANONPAGES);
-   else
-   dec_mm_counter(mm, MM_FILEPAGES);
+   dec_mm_counter(mm, mm_counter(page));
}
free_swap_and_cache(entry);
 }
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 103457c..9b9708e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -83,7 +83,8 @@ unsigned long task_statm(struct mm_struct *mm,
 unsigned long *shared, unsigned long *text,
 unsigned long *data, unsigned long *resident)
 {
-   *shared = get_mm_counter(mm, MM_FILEPAGES);
+   *shared = get_mm_counter(mm, MM_FILEPAGES) +
+   get_mm_counter(mm, MM_SHMEMPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d30eea3..8be4efc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1361,10 +1361,26 @@ static inline void dec_mm_counter(struct mm_struct *mm, 
int member)
atomic_long_dec(>rss_stat.count[member]);
 }
 
+/* Optimized variant when page is already known not to be PageAnon */
+static inline int mm_counter_file(struct page *page)
+{
+   if (PageSwapBacked(page))
+   return MM_SHMEMPAGES;
+   return MM_FILEPAGES;
+}
+
+static inline int mm_counter(struct page *page)
+{
+   if (PageAnon(page))
+   return MM_ANONPAGES;
+   return mm_counter_file(page);
+}
+
 static inline unsigned long get_mm_rss(struct mm_struct *mm)
 {
return get_mm_counter(mm, MM_FILEPAGES) +
-   get_mm_counter(mm, MM_ANONPAGES);
+   get_mm_counter(mm, MM_ANONPAGES) +
+   get_mm_counter(mm, MM_SHMEMPAGES);
 }
 
 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index f8d1492..207890b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -369,9 +369,10 @@ struct core_state {
 };
 
 enum {
-   MM_FILEPAGES,
-   MM_ANONPAGES,
-   MM_SWAPENTS,
+   MM_FILEPAGES,   /* Resident file mapping pages */
+   MM_ANONPAGES,   /* Resident anonymous pages */
+   MM_SWAPENTS,/* Anonymous swap entries */
+   MM_SHMEMPAGES,  /* Resident shared memory pages */
NR_MM_COUNTERS
 };
 
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 4e5e979..6288606 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -180,7 +180,7 @@ static int __replace_page(struct vm_area_struct *vma, 
unsigned long addr,
lru_cache_add_active_or_unevictable(kpage, vma);
 
if (!PageAnon(page)) {
-   dec_mm_counter(mm, MM_FILEPAGES);
+   dec_mm_counter(mm, mm_counter_file(page));
inc_mm_counter(mm, MM_ANONPAGES);
}
 
diff --git a/mm/memory.c b/mm/memory.c
index 3bd465a..f10d458 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -832,10 +832,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct 
*src_mm,
} else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
 
-   if (PageAnon(page))
-   rss[MM_ANONPAGES]++;
-   else
-   rss[MM_FILEPAGES]++;
+   rss[mm_counter(page)]++;
 
if (is_write_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
@@ -874,10 +871,7 @@ copy_one_pte(struct 

[PATCH v4 3/4] mm, shmem: Add shmem resident memory accounting

2015-10-02 Thread Vlastimil Babka
From: Jerome Marchand 

Currently looking at /proc//status or statm, there is no way to
distinguish shmem pages from pages mapped to a regular file (shmem
pages are mapped to /dev/zero), even though their implication in
actual memory use is quite different.
This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
shmem pages instead of MM_FILEPAGES.

Signed-off-by: Jerome Marchand 
Signed-off-by: Vlastimil Babka 
Acked-by: Konstantin Khlebnikov 
Acked-by: Michal Hocko 
---
 arch/s390/mm/pgtable.c   |  5 +
 fs/proc/task_mmu.c   |  3 ++-
 include/linux/mm.h   | 18 +-
 include/linux/mm_types.h |  7 ---
 kernel/events/uprobes.c  |  2 +-
 mm/memory.c  | 30 ++
 mm/oom_kill.c|  5 +++--
 mm/rmap.c| 12 +++-
 8 files changed, 41 insertions(+), 41 deletions(-)

diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 54ef3bc..9816f25 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -603,10 +603,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, struct 
mm_struct *mm)
else if (is_migration_entry(entry)) {
struct page *page = migration_entry_to_page(entry);
 
-   if (PageAnon(page))
-   dec_mm_counter(mm, MM_ANONPAGES);
-   else
-   dec_mm_counter(mm, MM_FILEPAGES);
+   dec_mm_counter(mm, mm_counter(page));
}
free_swap_and_cache(entry);
 }
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 103457c..9b9708e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -83,7 +83,8 @@ unsigned long task_statm(struct mm_struct *mm,
 unsigned long *shared, unsigned long *text,
 unsigned long *data, unsigned long *resident)
 {
-   *shared = get_mm_counter(mm, MM_FILEPAGES);
+   *shared = get_mm_counter(mm, MM_FILEPAGES) +
+   get_mm_counter(mm, MM_SHMEMPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d30eea3..8be4efc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1361,10 +1361,26 @@ static inline void dec_mm_counter(struct mm_struct *mm, 
int member)
atomic_long_dec(>rss_stat.count[member]);
 }
 
+/* Optimized variant when page is already known not to be PageAnon */
+static inline int mm_counter_file(struct page *page)
+{
+   if (PageSwapBacked(page))
+   return MM_SHMEMPAGES;
+   return MM_FILEPAGES;
+}
+
+static inline int mm_counter(struct page *page)
+{
+   if (PageAnon(page))
+   return MM_ANONPAGES;
+   return mm_counter_file(page);
+}
+
 static inline unsigned long get_mm_rss(struct mm_struct *mm)
 {
return get_mm_counter(mm, MM_FILEPAGES) +
-   get_mm_counter(mm, MM_ANONPAGES);
+   get_mm_counter(mm, MM_ANONPAGES) +
+   get_mm_counter(mm, MM_SHMEMPAGES);
 }
 
 static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index f8d1492..207890b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -369,9 +369,10 @@ struct core_state {
 };
 
 enum {
-   MM_FILEPAGES,
-   MM_ANONPAGES,
-   MM_SWAPENTS,
+   MM_FILEPAGES,   /* Resident file mapping pages */
+   MM_ANONPAGES,   /* Resident anonymous pages */
+   MM_SWAPENTS,/* Anonymous swap entries */
+   MM_SHMEMPAGES,  /* Resident shared memory pages */
NR_MM_COUNTERS
 };
 
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 4e5e979..6288606 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -180,7 +180,7 @@ static int __replace_page(struct vm_area_struct *vma, 
unsigned long addr,
lru_cache_add_active_or_unevictable(kpage, vma);
 
if (!PageAnon(page)) {
-   dec_mm_counter(mm, MM_FILEPAGES);
+   dec_mm_counter(mm, mm_counter_file(page));
inc_mm_counter(mm, MM_ANONPAGES);
}
 
diff --git a/mm/memory.c b/mm/memory.c
index 3bd465a..f10d458 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -832,10 +832,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct 
*src_mm,
} else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
 
-   if (PageAnon(page))
-   rss[MM_ANONPAGES]++;
-   else
-   rss[MM_FILEPAGES]++;
+   rss[mm_counter(page)]++;
 
if (is_write_migration_entry(entry) &&
  

Re: [PATCH v4 3/4] mm, shmem: Add shmem resident memory accounting

2015-10-02 Thread Andrew Morton
On Fri,  2 Oct 2015 15:35:50 +0200 Vlastimil Babka  wrote:

> From: Jerome Marchand 

Changelog is a bit weird.

> Currently looking at /proc//status or statm, there is no way to
> distinguish shmem pages from pages mapped to a regular file (shmem
> pages are mapped to /dev/zero), even though their implication in
> actual memory use is quite different.

OK, that's a bunch of stuff about the user interface.

> This patch adds MM_SHMEMPAGES counter to mm_rss_stat to account for
> shmem pages instead of MM_FILEPAGES.

And that has nothing to do with the user interface.

So now this little reader is all confused.  The patch doesn't actually
address the described problem at all, does it?  It's preparatory stuff
only?  No changes to the kernel's user interface?


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/