On Mon 16-06-14 15:54:26, Johannes Weiner wrote:
> The move precharge function does some baroque things: it tries raw
> res_counter charging of the entire amount first, and then falls back
> to a loop of one-by-one charges, with checks for pending signals and
> cond_resched() batching.
> 
> Just use mem_cgroup_try_charge() without __GFP_WAIT for the first bulk
> charge attempt.  In the one-by-one loop, remove the signal check (this
> is already checked in try_charge), and simply call cond_resched()
> after every charge - it's not that expensive.

Agreed. There shouldn't be any calls to res_counters for {un}charging
outside of mem_cgroup_try_charge and kmem variant.

> Signed-off-by: Johannes Weiner <han...@cmpxchg.org>

Acked-by: Michal Hocko <mho...@suse.cz>

> ---
>  mm/memcontrol.c | 51 +++++++++++++++++----------------------------------
>  1 file changed, 17 insertions(+), 34 deletions(-)
> 
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 9c646b9b56f4..3d9df94896a7 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -6372,55 +6372,38 @@ static void mem_cgroup_css_free(struct 
> cgroup_subsys_state *css)
>  
>  #ifdef CONFIG_MMU
>  /* Handlers for move charge at task migration. */
> -#define PRECHARGE_COUNT_AT_ONCE      256
>  static int mem_cgroup_do_precharge(unsigned long count)
>  {
> -     int ret = 0;
> -     int batch_count = PRECHARGE_COUNT_AT_ONCE;
> -     struct mem_cgroup *memcg = mc.to;
> +     int ret;
>  
> -     if (mem_cgroup_is_root(memcg)) {
> +     if (mem_cgroup_is_root(mc.to)) {
>               mc.precharge += count;
>               /* we don't need css_get for root */
>               return ret;
>       }
> -     /* try to charge at once */
> -     if (count > 1) {
> -             struct res_counter *dummy;
> -             /*
> -              * "memcg" cannot be under rmdir() because we've already checked
> -              * by cgroup_lock_live_cgroup() that it is not removed and we
> -              * are still under the same cgroup_mutex. So we can postpone
> -              * css_get().
> -              */
> -             if (res_counter_charge(&memcg->res, PAGE_SIZE * count, &dummy))
> -                     goto one_by_one;
> -             if (do_swap_account && res_counter_charge(&memcg->memsw,
> -                                             PAGE_SIZE * count, &dummy)) {
> -                     res_counter_uncharge(&memcg->res, PAGE_SIZE * count);
> -                     goto one_by_one;
> -             }
> +
> +     /* Try a single bulk charge without reclaim first */
> +     ret = mem_cgroup_try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT,
> +                                 count, false);
> +     if (!ret) {
>               mc.precharge += count;
>               return ret;
>       }
> -one_by_one:
> -     /* fall back to one by one charge */
> +
> +     /* Try charges one by one with reclaim */
>       while (count--) {
> -             if (signal_pending(current)) {
> -                     ret = -EINTR;
> -                     break;
> -             }
> -             if (!batch_count--) {
> -                     batch_count = PRECHARGE_COUNT_AT_ONCE;
> -                     cond_resched();
> -             }
> -             ret = mem_cgroup_try_charge(memcg, GFP_KERNEL, 1, false);
> +             ret = mem_cgroup_try_charge(mc.to, GFP_KERNEL, 1, false);
> +             /*
> +              * In case of failure, any residual charges against
> +              * mc.to will be dropped by mem_cgroup_clear_mc()
> +              * later on.
> +              */
>               if (ret)
> -                     /* mem_cgroup_clear_mc() will do uncharge later */
>                       return ret;
>               mc.precharge++;
> +             cond_resched();
>       }
> -     return ret;
> +     return 0;
>  }
>  
>  /**
> -- 
> 2.0.0
> 

-- 
Michal Hocko
SUSE Labs
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to