From: Andrey Ryabinin <aryabi...@virtuozzo.com> Currently we use per-cpu stocks to do precharges of the ->memory and ->memsw counters. Do this for the ->kmem and ->cache as well to decrease contention on these counters as well.
https://jira.sw.ru/browse/PSBM-101300 Signed-off-by: Andrey Ryabinin <aryabi...@virtuozzo.com> (cherry picked from commit e1ae7b88d380d24a6df7c9b34635346726de39e3) Original title: mm/memcg: Use per-cpu stock charges for ->kmem and ->cache counters #PSBM-101300 Reworked: kmem part was dropped because looks like this percpu charging functionallity was covered by ms commit (see below). see ms: bf4f0599 ("mm: memcg/slab: obj_cgroup API") e1a366be ("mm: memcontrol: switch to rcu protection in drain_all_stock()") 1a3e1f40 ("mm: memcontrol: decouple reference counting from page accounting") https://jira.sw.ru/browse/PSBM-131957 Signed-off-by: Alexander Mikhalitsyn <alexander.mikhalit...@virtuozzo.com> --- mm/memcontrol.c | 61 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 41 insertions(+), 20 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2af9c460a06d..ccfc04e21c2d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2318,6 +2318,7 @@ struct memcg_stock_pcp { unsigned int nr_bytes; #endif + unsigned int cache_nr_pages; struct work_struct work; unsigned long flags; #define FLUSHING_CACHED_CHARGE 0 @@ -2352,7 +2353,8 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, * * returns true if successful, false otherwise. */ -static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages, + bool cache) { struct memcg_stock_pcp *stock; unsigned long flags; @@ -2364,9 +2366,16 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) local_irq_save(flags); stock = this_cpu_ptr(&memcg_stock); - if (memcg == stock->cached && stock->nr_pages >= nr_pages) { - stock->nr_pages -= nr_pages; - ret = true; + if (memcg == stock->cached) { + if (cache && stock->cache_nr_pages >= nr_pages) { + stock->cache_nr_pages -= nr_pages; + ret = true; + } + + if (!cache && stock->nr_pages >= nr_pages) { + stock->nr_pages -= nr_pages; + ret = true; + } } local_irq_restore(flags); @@ -2380,15 +2389,20 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) static void drain_stock(struct memcg_stock_pcp *stock) { struct mem_cgroup *old = stock->cached; + unsigned long nr_pages = stock->nr_pages + stock->cache_nr_pages; if (!old) return; - if (stock->nr_pages) { - page_counter_uncharge(&old->memory, stock->nr_pages); + if (stock->cache_nr_pages) + page_counter_uncharge(&old->cache, stock->cache_nr_pages); + + if (nr_pages) { + page_counter_uncharge(&old->memory, nr_pages); if (do_memsw_account()) - page_counter_uncharge(&old->memsw, stock->nr_pages); + page_counter_uncharge(&old->memsw, nr_pages); stock->nr_pages = 0; + stock->cache_nr_pages = 0; } css_put(&old->css); @@ -2418,10 +2432,12 @@ static void drain_local_stock(struct work_struct *dummy) * Cache charges(val) to local per_cpu area. * This will be consumed by consume_stock() function, later. */ -static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) +static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages, + bool cache) { struct memcg_stock_pcp *stock; unsigned long flags; + unsigned long stock_nr_pages; local_irq_save(flags); @@ -2431,9 +2447,15 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) css_get(&memcg->css); stock->cached = memcg; } - stock->nr_pages += nr_pages; - if (stock->nr_pages > MEMCG_CHARGE_BATCH) + if (cache) + stock->cache_nr_pages += nr_pages; + else + stock->nr_pages += nr_pages; + + stock_nr_pages = stock->nr_pages + stock->cache_nr_pages; + /* checkme: looks like a bug in original patch */ + if (stock_nr_pages > MEMCG_CHARGE_BATCH) drain_stock(stock); local_irq_restore(flags); @@ -2461,10 +2483,12 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; bool flush = false; + unsigned long nr_pages = stock->nr_pages + + stock->cache_nr_pages; rcu_read_lock(); memcg = stock->cached; - if (memcg && stock->nr_pages && + if (memcg && nr_pages && mem_cgroup_is_descendant(memcg, root_memcg)) flush = true; if (obj_stock_flush_required(stock, root_memcg)) @@ -2827,10 +2851,10 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge retry: may_swap = true; kmem_limit = false; - if (consume_stock(memcg, nr_pages)) { + if (consume_stock(memcg, nr_pages, cache_charge)) { if (kmem_charge && !page_counter_try_charge( &memcg->kmem, nr_pages, &counter)) { - refill_stock(memcg, nr_pages); + refill_stock(memcg, nr_pages, false); goto charge; } @@ -2850,7 +2874,7 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge mem_over_limit = mem_cgroup_from_counter(counter, memory); if (!mem_over_limit && kmem_charge) { - if (!page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) { + if (!page_counter_try_charge(&memcg->kmem, batch, &counter)) { kmem_limit = true; mem_over_limit = mem_cgroup_from_counter(counter, kmem); page_counter_uncharge(&memcg->memory, batch); @@ -2996,11 +3020,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, bool kmem_charge page_counter_charge(&memcg->cache, batch); if (batch > nr_pages) - refill_stock(memcg, batch - nr_pages); + refill_stock(memcg, batch - nr_pages, cache_charge); done: - if (cache_charge) - page_counter_charge(&memcg->cache, nr_pages); - /* * If the hierarchy is above the normal consumption range, schedule * reclaim on returning to userland. We can perform reclaim here @@ -3245,7 +3266,7 @@ void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages) if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) page_counter_uncharge(&memcg->kmem, nr_pages); - refill_stock(memcg, nr_pages); + refill_stock(memcg, nr_pages, false); } /** @@ -7910,7 +7931,7 @@ void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages) mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages); - refill_stock(memcg, nr_pages); + refill_stock(memcg, nr_pages, false); } static int __init cgroup_memory(char *s) -- 2.28.0 _______________________________________________ Devel mailing list Devel@openvz.org https://lists.openvz.org/mailman/listinfo/devel