Draft patch of how the data structs could change. kmem_cache_attr is read
only.

Index: linux/include/linux/slab.h
===================================================================
--- linux.orig/include/linux/slab.h
+++ linux/include/linux/slab.h
@@ -135,9 +135,17 @@ struct mem_cgroup;
 void __init kmem_cache_init(void);
 bool slab_is_available(void);

-struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
-                       slab_flags_t,
-                       void (*)(void *));
+typedef kmem_cache_ctor void (*ctor)(void *);
+
+struct kmem_cache_attr {
+       char name[16];
+       unsigned int size;
+       unsigned int align;
+       slab_flags_t flags;
+       kmem_cache_ctor ctor;
+}
+
+struct kmem_cache *kmem_cache_create(const kmem_cache_attr *);
 void kmem_cache_destroy(struct kmem_cache *);
 int kmem_cache_shrink(struct kmem_cache *);

Index: linux/include/linux/slab_def.h
===================================================================
--- linux.orig/include/linux/slab_def.h
+++ linux/include/linux/slab_def.h
@@ -10,6 +10,7 @@

 struct kmem_cache {
        struct array_cache __percpu *cpu_cache;
+       struct kmem_cache_attr *attr;

 /* 1) Cache tunables. Protected by slab_mutex */
        unsigned int batchcount;
@@ -35,14 +36,9 @@ struct kmem_cache {
        struct kmem_cache *freelist_cache;
        unsigned int freelist_size;

-       /* constructor func */
-       void (*ctor)(void *obj);
-
 /* 4) cache creation/removal */
-       const char *name;
        struct list_head list;
        int refcount;
-       int object_size;
        int align;

 /* 5) statistics */
Index: linux/include/linux/slub_def.h
===================================================================
--- linux.orig/include/linux/slub_def.h
+++ linux/include/linux/slub_def.h
@@ -83,9 +83,9 @@ struct kmem_cache {
        struct kmem_cache_cpu __percpu *cpu_slab;
        /* Used for retriving partial slabs etc */
        slab_flags_t flags;
+       struct kmem_cache_attr *attr;
        unsigned long min_partial;
        int size;               /* The size of an object including meta data */
-       int object_size;        /* The size of an object without meta data */
        int offset;             /* Free pointer offset. */
 #ifdef CONFIG_SLUB_CPU_PARTIAL
        int cpu_partial;        /* Number of per cpu partial objects to keep 
around */
@@ -97,12 +97,10 @@ struct kmem_cache {
        struct kmem_cache_order_objects min;
        gfp_t allocflags;       /* gfp flags to use on each alloc */
        int refcount;           /* Refcount for slab cache destroy */
-       void (*ctor)(void *);
        int inuse;              /* Offset to metadata */
        int align;              /* Alignment */
        int reserved;           /* Reserved bytes at the end of slabs */
        int red_left_pad;       /* Left redzone padding size */
-       const char *name;       /* Name (only for display!) */
        struct list_head list;  /* List of slab caches */
 #ifdef CONFIG_SYSFS
        struct kobject kobj;    /* For sysfs */
Index: linux/mm/slab.h
===================================================================
--- linux.orig/mm/slab.h
+++ linux/mm/slab.h
@@ -18,13 +18,11 @@
  * SLUB is no longer needed.
  */
 struct kmem_cache {
-       unsigned int object_size;/* The original size of the object */
+       struct kmem_cache_attr *attr
        unsigned int size;      /* The aligned/padded/added on size  */
        unsigned int align;     /* Alignment as calculated */
        slab_flags_t flags;     /* Active flags on the slab */
-       const char *name;       /* Slab name for sysfs */
        int refcount;           /* Use counter */
-       void (*ctor)(void *);   /* Called on object slot creation */
        struct list_head list;  /* List of all slab caches on the system */
 };

Reply via email to