The commit is pushed to "branch-rh7-3.10.0-1160.105.1.vz7.220.x-ovz" and will 
appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-1160.105.1.vz7.220.7
------>
commit f56d551bddbc90ca3b0dfb2cafd778bec0e5fd15
Author: Konstantin Khorenko <khore...@virtuozzo.com>
Date:   Tue Jul 2 21:01:51 2024 +0300

    slub: ignore memcg limits during slub allocations
    
    Pretend slubs are always allocated by a PF_MEMALLOC process to bypass
    memory cgroup limits.
    
    Added per-slab "pf_memalloc" attribute to disable/enable the logic on
    the fly.
    
    This is debug patch for sure.
    
    https://virtuozzo.atlassian.net/browse/PSBM-155867
    
    Signed-off-by: Konstantin Khorenko <khore...@virtuozzo.com>
---
 include/linux/slab.h |  1 +
 mm/slub.c            | 39 +++++++++++++++++++++++++++++++++++++--
 2 files changed, 38 insertions(+), 2 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 8722dc8864be..263f10a436f6 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -24,6 +24,7 @@
 #define SLAB_CONSISTENCY_CHECKS        0x00000100UL    /* DEBUG: Perform 
(expensive) checks on alloc/free */
 #define SLAB_RED_ZONE          0x00000400UL    /* DEBUG: Red zone objs in a 
cache */
 #define SLAB_POISON            0x00000800UL    /* DEBUG: Poison objects */
+#define SLAB_PF_MEMALLOC       0x00001000UL    /* DEBUG: Ignore memcg limits */
 #define SLAB_HWCACHE_ALIGN     0x00002000UL    /* Align objs on cache lines */
 #define SLAB_CACHE_DMA         0x00004000UL    /* Use GFP_DMA memory */
 #define SLAB_STORE_USER                0x00010000UL    /* DEBUG: Store the 
last owner for bug hunting */
diff --git a/mm/slub.c b/mm/slub.c
index d4392708a014..7ec3f8b37701 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1273,6 +1273,9 @@ static int __init setup_slub_debug(char *str)
                case 'a':
                        slub_debug |= SLAB_FAILSLAB;
                        break;
+               case 'm':
+                       slub_debug |= SLAB_PF_MEMALLOC;
+                       break;
                default:
                        printk(KERN_ERR "slub_debug option '%c' "
                                "unknown. skipped\n", *str);
@@ -2460,6 +2463,16 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t 
gfpflags, int node,
 {
        void *freelist;
        struct page *page;
+       unsigned long pflags = current->flags;
+
+       if (s->flags & SLAB_PF_MEMALLOC) {
+               /*
+                * A big crutch for debug purposes: we suspect some slub 
allocation
+                * failure leads to a memory corruption, so for now we want to 
make
+                * them all always succeed to check the theory.
+                */
+               current->flags |= PF_MEMALLOC;
+       }
 
        page = c->page;
        if (!page)
@@ -2519,7 +2532,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t 
gfpflags, int node,
        VM_BUG_ON(!c->page->frozen);
        c->freelist = get_freepointer(s, freelist);
        c->tid = next_tid(c->tid);
-       return freelist;
+       goto restore_pfmemalloc;
 
 new_slab:
 
@@ -2536,7 +2549,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t 
gfpflags, int node,
        if (unlikely(!freelist)) {
                if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
                        slab_out_of_memory(s, gfpflags, node);
-               return NULL;
+               goto restore_pfmemalloc;
        }
 
        page = c->page;
@@ -2550,6 +2563,11 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t 
gfpflags, int node,
        deactivate_slab(s, page, get_freepointer(s, freelist));
        c->page = NULL;
        c->freelist = NULL;
+
+restore_pfmemalloc:
+       if (s->flags & SLAB_PF_MEMALLOC)
+               tsk_restore_flags(current, pflags, PF_MEMALLOC);
+
        return freelist;
 }
 
@@ -5217,6 +5235,22 @@ static ssize_t poison_store(struct kmem_cache *s,
 }
 SLAB_ATTR(poison);
 
+static ssize_t pf_memalloc_show(struct kmem_cache *s, char *buf)
+{
+       return sprintf(buf, "%d\n", !!(s->flags & SLAB_PF_MEMALLOC));
+}
+
+static ssize_t pf_memalloc_store(struct kmem_cache *s,
+                                const char *buf, size_t length)
+{
+       s->flags &= ~SLAB_PF_MEMALLOC;
+       if (buf[0] == '1') {
+               s->flags |= SLAB_PF_MEMALLOC;
+       }
+       return length;
+}
+SLAB_ATTR(pf_memalloc);
+
 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
 {
        return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
@@ -5439,6 +5473,7 @@ static struct attribute *slab_attrs[] = {
        &trace_attr.attr,
        &red_zone_attr.attr,
        &poison_attr.attr,
+       &pf_memalloc_attr.attr,
        &store_user_attr.attr,
        &validate_attr.attr,
        &alloc_calls_attr.attr,
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to