Andrea Righi wrote:

> Since we hold task_lock(), we know that p->mm cannot change and we don't have
> to worry about incrementing mm_users. So, just use p->mm directly and
> check that we've not picked a kernel thread.
> 
> Signed-off-by: Andrea Righi <[EMAIL PROTECTED]>
> ---
>  kernel/cgroup.c      |    3 ++-
>  mm/memrlimitcgroup.c |   10 ++++------
>  2 files changed, 6 insertions(+), 7 deletions(-)
> 
> diff --git a/kernel/cgroup.c b/kernel/cgroup.c
> index 678a680..03cc925 100644
> --- a/kernel/cgroup.c
> +++ b/kernel/cgroup.c
> @@ -2757,7 +2757,8 @@ void cgroup_fork_callbacks(struct task_struct *child)
>   * invoke this routine, since it assigns the mm->owner the first time
>   * and does not change it.
>   *
> - * The callbacks are invoked with mmap_sem held in read mode.
> + * The callbacks are invoked with task_lock held and mmap_sem held in read
> + * mode.
>   */
>  void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct 
> *new)
>  {
> diff --git a/mm/memrlimitcgroup.c b/mm/memrlimitcgroup.c
> index 8ee74f6..b3d20f5 100644
> --- a/mm/memrlimitcgroup.c
> +++ b/mm/memrlimitcgroup.c
> @@ -238,7 +238,7 @@ out:
>  }
> 
>  /*
> - * This callback is called with mmap_sem held
> + * This callback is called with mmap_sem and task_lock held
>   */
>  static void memrlimit_cgroup_mm_owner_changed(struct cgroup_subsys *ss,
>                                               struct cgroup *old_cgrp,
> @@ -246,9 +246,9 @@ static void memrlimit_cgroup_mm_owner_changed(struct 
> cgroup_subsys *ss,
>                                               struct task_struct *p)
>  {
>       struct memrlimit_cgroup *memrcg, *old_memrcg;
> -     struct mm_struct *mm = get_task_mm(p);
> +     struct mm_struct *mm = p->mm;
> 
> -     BUG_ON(!mm);
> +     BUG_ON(!mm || (p->flags & PF_KTHREAD));
> 
>       /*
>        * If we don't have a new cgroup, we just uncharge from the old one.
> @@ -258,7 +258,7 @@ static void memrlimit_cgroup_mm_owner_changed(struct 
> cgroup_subsys *ss,
>               memrcg = memrlimit_cgroup_from_cgrp(cgrp);
>               if (res_counter_charge(&memrcg->as_res,
>                               mm->total_vm << PAGE_SHIFT))
> -                     goto out;
> +                     return;
>       }
> 
>       if (old_cgrp) {
> @@ -266,8 +266,6 @@ static void memrlimit_cgroup_mm_owner_changed(struct 
> cgroup_subsys *ss,
>               res_counter_uncharge(&old_memrcg->as_res,
>                               mm->total_vm << PAGE_SHIFT);
>       }
> -out:
> -     mmput(mm);
>  }

Seems reasonable

Acked-by: Balbir Singh <[EMAIL PROTECTED]>

-- 
        Balbir
_______________________________________________
Containers mailing list
[EMAIL PROTECTED]
https://lists.linux-foundation.org/mailman/listinfo/containers

_______________________________________________
Devel mailing list
[email protected]
https://openvz.org/mailman/listinfo/devel

Reply via email to