Re: [PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-16 Thread David Rientjes
On Sun, 15 Jul 2018, 禹舟键 wrote:

> Hi David
> Could I use use plain old %d? Just like this,
> pr_cont(",task=%s,pid=%d,uid=%d\n", p->comm, p->pid,
> from_kuid(&init_user_ns, task_uid(p)));
> 

Yes please!

Re: [PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-14 Thread 禹舟键
Hi David
Could I use use plain old %d? Just like this,
pr_cont(",task=%s,pid=%d,uid=%d\n", p->comm, p->pid,
from_kuid(&init_user_ns, task_uid(p)));

Thanks


Re: [PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-13 Thread David Rientjes
On Fri, 13 Jul 2018, ufo19890...@gmail.com wrote:

> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> index 531b2c86d4db..7fbd389ea779 100644
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -434,10 +434,11 @@ static void dump_header(struct oom_control *oc, struct 
> task_struct *p)
>   oom_constraint_text[oc->constraint],
>   nodemask_pr_args(oc->nodemask));
>   cpuset_print_current_mems_allowed();
> + mem_cgroup_print_oom_context(oc->memcg, p);
>   pr_cont(",task=%s,pid=%5d,uid=%5d\n", p->comm, p->pid,
>   from_kuid(&init_user_ns, task_uid(p)));
>   if (is_memcg_oom(oc))
> - mem_cgroup_print_oom_info(oc->memcg, p);
> + mem_cgroup_print_oom_meminfo(oc->memcg);
>   else {
>   show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
>   if (is_dump_unreclaim_slabs())

Ugh, could we please not pad the pid and uid with spaces?  I don't think 
it achieves anything and just makes regex less robust.

Otherwise, looks good!


Re: [PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-13 Thread Michal Hocko
On Fri 13-07-18 19:55:52, ufo19890...@gmail.com wrote:
> From: yuzhoujian 
> 
> The current oom report doesn't display victim's memcg context during the
> global OOM situation. While this information is not strictly needed, it
> can be really helpful for containerized environments to locate which
> container has lost a process. Now that we have a single line for the oom
> context, we can trivially add both the oom memcg (this can be either
> global_oom or a specific memcg which hits its hard limits) and task_memcg
> which is the victim's memcg.
> 
> Below is the single line output in the oom report after this patch.
> - global oom context information:
> oom-kill:constraint=,nodemask=,cpuset=,mems_allowed=,global_oom,task_memcg=,task=,pid=,uid=
> - memcg oom context information:
> oom-kill:constraint=,nodemask=,cpuset=,mems_allowed=,oom_memcg=,task_memcg=,task=,pid=,uid=
> 
> Signed-off-by: yuzhoujian 

Acked-by: Michal Hocko 

Thanks!

> ---
> Changes since v12
> - print cpuset information before memcg info.
> 
>  include/linux/memcontrol.h | 14 +++---
>  mm/memcontrol.c| 36 ++--
>  mm/oom_kill.c  |  3 ++-
>  3 files changed, 35 insertions(+), 18 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 6c6fb116e925..96a73f989101 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -491,8 +491,10 @@ void mem_cgroup_handle_over_high(void);
>  
>  unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
>  
> -void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
> - struct task_struct *p);
> +void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
> + struct task_struct *p);
> +
> +void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
>  
>  static inline void mem_cgroup_oom_enable(void)
>  {
> @@ -903,7 +905,13 @@ static inline unsigned long mem_cgroup_get_max(struct 
> mem_cgroup *memcg)
>  }
>  
>  static inline void
> -mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
> +mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
> + struct task_struct *p)
> +{
> +}
> +
> +static inline void
> +mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
>  {
>  }
>  
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index e6f0d5ef320a..18deea974cfd 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1119,32 +1119,40 @@ static const char *const memcg1_stat_names[] = {
>  
>  #define K(x) ((x) << (PAGE_SHIFT-10))
>  /**
> - * mem_cgroup_print_oom_info: Print OOM information relevant to memory 
> controller.
> - * @memcg: The memory cgroup that went over limit
> + * mem_cgroup_print_oom_context: Print OOM context information relevant to
> + * memory controller.
> + * @memcg: The origin memory cgroup that went over limit
>   * @p: Task that is going to be killed
>   *
>   * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
>   * enabled
>   */
> -void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct 
> *p)
> +void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct 
> task_struct *p)
>  {
> - struct mem_cgroup *iter;
> - unsigned int i;
> + struct cgroup *origin_cgrp, *kill_cgrp;
>  
>   rcu_read_lock();
> -
> + if (memcg) {
> + pr_cont(",oom_memcg=");
> + pr_cont_cgroup_path(memcg->css.cgroup);
> + } else
> + pr_cont(",global_oom");
>   if (p) {
> - pr_info("Task in ");
> + pr_cont(",task_memcg=");
>   pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
> - pr_cont(" killed as a result of limit of ");
> - } else {
> - pr_info("Memory limit reached of cgroup ");
>   }
> -
> - pr_cont_cgroup_path(memcg->css.cgroup);
> - pr_cont("\n");
> -
>   rcu_read_unlock();
> +}
> +
> +/**
> + * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
> + * memory controller.
> + * @memcg: The memory cgroup that went over limit
> + */
> +void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
> +{
> + struct mem_cgroup *iter;
> + unsigned int i;
>  
>   pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
>   K((u64)page_counter_read(&memcg->memory)),
> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> index 531b2c86d4db..7fbd389ea779 100644
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -434,10 +434,11 @@ static void dump_header(struct oom_control *oc, struct 
> task_struct *p)
>   oom_constraint_text[oc->constraint],
>   nodemask_pr_args(oc->nodemask));
>   cpuset_print_current_mems_allowed();
> + mem_cgroup_print_oom_context(oc->memcg, p);
>   pr_cont(",task=%s,pid=%5d,uid=%5d\n", p->comm, p->pid,
>   from_kuid(&init_user_ns, task_uid(p)));
>   if (is_memcg_oom(oc))
> - 

[PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-13 Thread ufo19890607
From: yuzhoujian 

The current oom report doesn't display victim's memcg context during the
global OOM situation. While this information is not strictly needed, it
can be really helpful for containerized environments to locate which
container has lost a process. Now that we have a single line for the oom
context, we can trivially add both the oom memcg (this can be either
global_oom or a specific memcg which hits its hard limits) and task_memcg
which is the victim's memcg.

Below is the single line output in the oom report after this patch.
- global oom context information:
oom-kill:constraint=,nodemask=,cpuset=,mems_allowed=,global_oom,task_memcg=,task=,pid=,uid=
- memcg oom context information:
oom-kill:constraint=,nodemask=,cpuset=,mems_allowed=,oom_memcg=,task_memcg=,task=,pid=,uid=

Signed-off-by: yuzhoujian 
---
Changes since v12
- print cpuset information before memcg info.

 include/linux/memcontrol.h | 14 +++---
 mm/memcontrol.c| 36 ++--
 mm/oom_kill.c  |  3 ++-
 3 files changed, 35 insertions(+), 18 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c6fb116e925..96a73f989101 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -491,8 +491,10 @@ void mem_cgroup_handle_over_high(void);
 
 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
 
-void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
-   struct task_struct *p);
+void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
+   struct task_struct *p);
+
+void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
 
 static inline void mem_cgroup_oom_enable(void)
 {
@@ -903,7 +905,13 @@ static inline unsigned long mem_cgroup_get_max(struct 
mem_cgroup *memcg)
 }
 
 static inline void
-mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
+   struct task_struct *p)
+{
+}
+
+static inline void
+mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
 {
 }
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e6f0d5ef320a..18deea974cfd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1119,32 +1119,40 @@ static const char *const memcg1_stat_names[] = {
 
 #define K(x) ((x) << (PAGE_SHIFT-10))
 /**
- * mem_cgroup_print_oom_info: Print OOM information relevant to memory 
controller.
- * @memcg: The memory cgroup that went over limit
+ * mem_cgroup_print_oom_context: Print OOM context information relevant to
+ * memory controller.
+ * @memcg: The origin memory cgroup that went over limit
  * @p: Task that is going to be killed
  *
  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
  * enabled
  */
-void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct 
*p)
 {
-   struct mem_cgroup *iter;
-   unsigned int i;
+   struct cgroup *origin_cgrp, *kill_cgrp;
 
rcu_read_lock();
-
+   if (memcg) {
+   pr_cont(",oom_memcg=");
+   pr_cont_cgroup_path(memcg->css.cgroup);
+   } else
+   pr_cont(",global_oom");
if (p) {
-   pr_info("Task in ");
+   pr_cont(",task_memcg=");
pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
-   pr_cont(" killed as a result of limit of ");
-   } else {
-   pr_info("Memory limit reached of cgroup ");
}
-
-   pr_cont_cgroup_path(memcg->css.cgroup);
-   pr_cont("\n");
-
rcu_read_unlock();
+}
+
+/**
+ * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
+ * memory controller.
+ * @memcg: The memory cgroup that went over limit
+ */
+void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
+{
+   struct mem_cgroup *iter;
+   unsigned int i;
 
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)),
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 531b2c86d4db..7fbd389ea779 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -434,10 +434,11 @@ static void dump_header(struct oom_control *oc, struct 
task_struct *p)
oom_constraint_text[oc->constraint],
nodemask_pr_args(oc->nodemask));
cpuset_print_current_mems_allowed();
+   mem_cgroup_print_oom_context(oc->memcg, p);
pr_cont(",task=%s,pid=%5d,uid=%5d\n", p->comm, p->pid,
from_kuid(&init_user_ns, task_uid(p)));
if (is_memcg_oom(oc))
-   mem_cgroup_print_oom_info(oc->memcg, p);
+   mem_cgroup_print_oom_meminfo(oc->memcg);
else {
show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
if (is_dump_unreclaim_slabs())
-- 
2.14.1



Re: [PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-11 Thread Michal Hocko
On Wed 11-07-18 18:31:18, 禹舟键 wrote:
> Hi Michal
> 
> I think the single line output you want is just like that:
> 
> oom-kill:constraint=,nodemask=,cpuset=,mems_allowed=,oom_memcg=,task_memcg=,task=,pid=,uid=
> 
> Am I right?

exactly.

-- 
Michal Hocko
SUSE Labs


Re: [PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-11 Thread 禹舟键
Hi Michal

I think the single line output you want is just like that:

oom-kill:constraint=,nodemask=,cpuset=,mems_allowed=,oom_memcg=,task_memcg=,task=,pid=,uid=

Am I right?


Re: [PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-11 Thread Michal Hocko
On Wed 11-07-18 11:39:29, 禹舟键 wrote:
> Hi Michal
> Sorry , I l forget to update the changlog for the second patch, but
> the cpuset information is not missing.

The cpuset information is missing in the changelog.

> Do I still need to make the
> v14  or just update the changelog for v13?

Wait for more feedback for few days. If there are no other concerns just
repost this patch 2. Btw. I still think that it would be more logical
to print cpuset before memcg info. But I will not insist.
-- 
Michal Hocko
SUSE Labs


Re: [PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-10 Thread 禹舟键
Hi Michal
Sorry , I l forget to update the changlog for the second patch, but
the cpuset information is not missing.  Do I still need to make the
v14  or just update the changelog for v13?

Thanks


Re: [PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-10 Thread Michal Hocko
On Tue 10-07-18 18:19:48, ufo19890...@gmail.com wrote:
> From: yuzhoujian 
> 
> The current oom report doesn't display victim's memcg context during the
> global OOM situation. While this information is not strictly needed, it
> can be really helpful for containerized environments to locate which
> container has lost a process. Now that we have a single line for the oom
> context, we can trivially add both the oom memcg (this can be either
> global_oom or a specific memcg which hits its hard limits) and task_memcg
> which is the victim's memcg.
> 
> Below is the single line output in the oom report after this patch.
> - global oom context information:
> oom-kill:constraint=,nodemask=,global_oom,task_memcg=,task=,pid=,uid=
> - memcg oom context information:
> oom-kill:constraint=,nodemask=,oom_memcg=,task_memcg=,task=,pid=,uid=

The cpuset information is missing which is a bit confusing because your
previous patch is mentioning it. I thought we have agreed that cpuset
goes first and memcg after that. I do not insist of course but that
ordering would make much more sense to me because it fits better to
constraint and nodemask parameters.

> Signed-off-by: yuzhoujian 

Once that is fixed, feel free to add
Acked-by: Michal Hocko 

> ---
>  include/linux/memcontrol.h | 14 +++---
>  mm/memcontrol.c| 36 ++--
>  mm/oom_kill.c  |  3 ++-
>  3 files changed, 35 insertions(+), 18 deletions(-)
> 
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index 6c6fb116e925..96a73f989101 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -491,8 +491,10 @@ void mem_cgroup_handle_over_high(void);
>  
>  unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
>  
> -void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
> - struct task_struct *p);
> +void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
> + struct task_struct *p);
> +
> +void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
>  
>  static inline void mem_cgroup_oom_enable(void)
>  {
> @@ -903,7 +905,13 @@ static inline unsigned long mem_cgroup_get_max(struct 
> mem_cgroup *memcg)
>  }
>  
>  static inline void
> -mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
> +mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
> + struct task_struct *p)
> +{
> +}
> +
> +static inline void
> +mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
>  {
>  }
>  
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index e6f0d5ef320a..18deea974cfd 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -1119,32 +1119,40 @@ static const char *const memcg1_stat_names[] = {
>  
>  #define K(x) ((x) << (PAGE_SHIFT-10))
>  /**
> - * mem_cgroup_print_oom_info: Print OOM information relevant to memory 
> controller.
> - * @memcg: The memory cgroup that went over limit
> + * mem_cgroup_print_oom_context: Print OOM context information relevant to
> + * memory controller.
> + * @memcg: The origin memory cgroup that went over limit
>   * @p: Task that is going to be killed
>   *
>   * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
>   * enabled
>   */
> -void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct 
> *p)
> +void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct 
> task_struct *p)
>  {
> - struct mem_cgroup *iter;
> - unsigned int i;
> + struct cgroup *origin_cgrp, *kill_cgrp;
>  
>   rcu_read_lock();
> -
> + if (memcg) {
> + pr_cont(",oom_memcg=");
> + pr_cont_cgroup_path(memcg->css.cgroup);
> + } else
> + pr_cont(",global_oom");
>   if (p) {
> - pr_info("Task in ");
> + pr_cont(",task_memcg=");
>   pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
> - pr_cont(" killed as a result of limit of ");
> - } else {
> - pr_info("Memory limit reached of cgroup ");
>   }
> -
> - pr_cont_cgroup_path(memcg->css.cgroup);
> - pr_cont("\n");
> -
>   rcu_read_unlock();
> +}
> +
> +/**
> + * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
> + * memory controller.
> + * @memcg: The memory cgroup that went over limit
> + */
> +void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
> +{
> + struct mem_cgroup *iter;
> + unsigned int i;
>  
>   pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
>   K((u64)page_counter_read(&memcg->memory)),
> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> index 531b2c86d4db..9e80f6c2eb2e 100644
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -433,11 +433,12 @@ static void dump_header(struct oom_control *oc, struct 
> task_struct *p)
>   pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
>   oom_constraint_text[oc->constraint],
>   nodemask

[PATCH v13 2/2] Add oom victim's memcg to the oom context information

2018-07-10 Thread ufo19890607
From: yuzhoujian 

The current oom report doesn't display victim's memcg context during the
global OOM situation. While this information is not strictly needed, it
can be really helpful for containerized environments to locate which
container has lost a process. Now that we have a single line for the oom
context, we can trivially add both the oom memcg (this can be either
global_oom or a specific memcg which hits its hard limits) and task_memcg
which is the victim's memcg.

Below is the single line output in the oom report after this patch.
- global oom context information:
oom-kill:constraint=,nodemask=,global_oom,task_memcg=,task=,pid=,uid=
- memcg oom context information:
oom-kill:constraint=,nodemask=,oom_memcg=,task_memcg=,task=,pid=,uid=
Signed-off-by: yuzhoujian 
---
 include/linux/memcontrol.h | 14 +++---
 mm/memcontrol.c| 36 ++--
 mm/oom_kill.c  |  3 ++-
 3 files changed, 35 insertions(+), 18 deletions(-)

diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c6fb116e925..96a73f989101 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -491,8 +491,10 @@ void mem_cgroup_handle_over_high(void);
 
 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
 
-void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
-   struct task_struct *p);
+void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
+   struct task_struct *p);
+
+void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
 
 static inline void mem_cgroup_oom_enable(void)
 {
@@ -903,7 +905,13 @@ static inline unsigned long mem_cgroup_get_max(struct 
mem_cgroup *memcg)
 }
 
 static inline void
-mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
+   struct task_struct *p)
+{
+}
+
+static inline void
+mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
 {
 }
 
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e6f0d5ef320a..18deea974cfd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1119,32 +1119,40 @@ static const char *const memcg1_stat_names[] = {
 
 #define K(x) ((x) << (PAGE_SHIFT-10))
 /**
- * mem_cgroup_print_oom_info: Print OOM information relevant to memory 
controller.
- * @memcg: The memory cgroup that went over limit
+ * mem_cgroup_print_oom_context: Print OOM context information relevant to
+ * memory controller.
+ * @memcg: The origin memory cgroup that went over limit
  * @p: Task that is going to be killed
  *
  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
  * enabled
  */
-void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
+void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct 
*p)
 {
-   struct mem_cgroup *iter;
-   unsigned int i;
+   struct cgroup *origin_cgrp, *kill_cgrp;
 
rcu_read_lock();
-
+   if (memcg) {
+   pr_cont(",oom_memcg=");
+   pr_cont_cgroup_path(memcg->css.cgroup);
+   } else
+   pr_cont(",global_oom");
if (p) {
-   pr_info("Task in ");
+   pr_cont(",task_memcg=");
pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
-   pr_cont(" killed as a result of limit of ");
-   } else {
-   pr_info("Memory limit reached of cgroup ");
}
-
-   pr_cont_cgroup_path(memcg->css.cgroup);
-   pr_cont("\n");
-
rcu_read_unlock();
+}
+
+/**
+ * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
+ * memory controller.
+ * @memcg: The memory cgroup that went over limit
+ */
+void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
+{
+   struct mem_cgroup *iter;
+   unsigned int i;
 
pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
K((u64)page_counter_read(&memcg->memory)),
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 531b2c86d4db..9e80f6c2eb2e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -433,11 +433,12 @@ static void dump_header(struct oom_control *oc, struct 
task_struct *p)
pr_info("oom-kill:constraint=%s,nodemask=%*pbl",
oom_constraint_text[oc->constraint],
nodemask_pr_args(oc->nodemask));
+   mem_cgroup_print_oom_context(oc->memcg, p);
cpuset_print_current_mems_allowed();
pr_cont(",task=%s,pid=%5d,uid=%5d\n", p->comm, p->pid,
from_kuid(&init_user_ns, task_uid(p)));
if (is_memcg_oom(oc))
-   mem_cgroup_print_oom_info(oc->memcg, p);
+   mem_cgroup_print_oom_meminfo(oc->memcg);
else {
show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask);
if (is_dump_unreclaim_slabs())
-- 
2.14.1