Re: [PATCH v2 05/10] mm, kfence: insert KFENCE hooks for SLUB

2020-09-17 Thread Alexander Potapenko
On Thu, Sep 17, 2020 at 11:40 AM Christopher Lameter  wrote:
>
> On Tue, 15 Sep 2020, Marco Elver wrote:
>
> >  void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
> >  {
> > - void *ret = slab_alloc(s, gfpflags, _RET_IP_);
> > + void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);
>
> The additional size parameter is a part of a struct kmem_cache that is
> already passed to the function. Why does the parameter list need to be
> expanded?

See my response to the similar question about the SLAB allocator:
https://lore.kernel.org/linux-arm-kernel/CAG_fn=XMc8NPZPFtUE=rdoR=xjh4f+txzs-w5n4vuawktjc...@mail.gmail.com/



-- 
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Paul Manicle, Halimah DeLaine Prado
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg


Re: [PATCH v2 05/10] mm, kfence: insert KFENCE hooks for SLUB

2020-09-17 Thread Christopher Lameter
On Tue, 15 Sep 2020, Marco Elver wrote:

>  void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
>  {
> - void *ret = slab_alloc(s, gfpflags, _RET_IP_);
> + void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);

The additional size parameter is a part of a struct kmem_cache that is
already passed to the function. Why does the parameter list need to be
expanded?



[PATCH v2 05/10] mm, kfence: insert KFENCE hooks for SLUB

2020-09-15 Thread Marco Elver
From: Alexander Potapenko 

Inserts KFENCE hooks into the SLUB allocator.

We note the addition of the 'orig_size' argument to slab_alloc*()
functions, to be able to pass the originally requested size to KFENCE.
When KFENCE is disabled, there is no additional overhead, since these
functions are __always_inline.

Co-developed-by: Marco Elver 
Signed-off-by: Marco Elver 
Signed-off-by: Alexander Potapenko 
---
 mm/slub.c | 72 ---
 1 file changed, 53 insertions(+), 19 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index d4177aecedf6..5c5a13a7857c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -27,6 +27,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -1557,6 +1558,11 @@ static inline bool slab_free_freelist_hook(struct 
kmem_cache *s,
void *old_tail = *tail ? *tail : *head;
int rsize;
 
+   if (is_kfence_address(next)) {
+   slab_free_hook(s, next);
+   return true;
+   }
+
/* Head and tail of the reconstructed freelist */
*head = NULL;
*tail = NULL;
@@ -2660,7 +2666,8 @@ static inline void *get_freelist(struct kmem_cache *s, 
struct page *page)
  * already disabled (which is the case for bulk allocation).
  */
 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
- unsigned long addr, struct kmem_cache_cpu *c)
+ unsigned long addr, struct kmem_cache_cpu *c,
+ size_t orig_size)
 {
void *freelist;
struct page *page;
@@ -2763,7 +2770,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t 
gfpflags, int node,
  * cpu changes by refetching the per cpu area pointer.
  */
 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
- unsigned long addr, struct kmem_cache_cpu *c)
+ unsigned long addr, struct kmem_cache_cpu *c,
+ size_t orig_size)
 {
void *p;
unsigned long flags;
@@ -2778,7 +2786,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t 
gfpflags, int node,
c = this_cpu_ptr(s->cpu_slab);
 #endif
 
-   p = ___slab_alloc(s, gfpflags, node, addr, c);
+   p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size);
local_irq_restore(flags);
return p;
 }
@@ -2805,7 +2813,7 @@ static __always_inline void maybe_wipe_obj_freeptr(struct 
kmem_cache *s,
  * Otherwise we can simply pick the next object from the lockless free list.
  */
 static __always_inline void *slab_alloc_node(struct kmem_cache *s,
-   gfp_t gfpflags, int node, unsigned long addr)
+   gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
 {
void *object;
struct kmem_cache_cpu *c;
@@ -2816,6 +2824,11 @@ static __always_inline void *slab_alloc_node(struct 
kmem_cache *s,
s = slab_pre_alloc_hook(s, , 1, gfpflags);
if (!s)
return NULL;
+
+   object = kfence_alloc(s, orig_size, gfpflags);
+   if (unlikely(object))
+   goto out;
+
 redo:
/*
 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
@@ -2853,7 +2866,7 @@ static __always_inline void *slab_alloc_node(struct 
kmem_cache *s,
object = c->freelist;
page = c->page;
if (unlikely(!object || !node_match(page, node))) {
-   object = __slab_alloc(s, gfpflags, node, addr, c);
+   object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
stat(s, ALLOC_SLOWPATH);
} else {
void *next_object = get_freepointer_safe(s, object);
@@ -2889,20 +2902,21 @@ static __always_inline void *slab_alloc_node(struct 
kmem_cache *s,
if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
memset(object, 0, s->object_size);
 
+out:
slab_post_alloc_hook(s, objcg, gfpflags, 1, );
 
return object;
 }
 
 static __always_inline void *slab_alloc(struct kmem_cache *s,
-   gfp_t gfpflags, unsigned long addr)
+   gfp_t gfpflags, unsigned long addr, size_t orig_size)
 {
-   return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
+   return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr, orig_size);
 }
 
 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
-   void *ret = slab_alloc(s, gfpflags, _RET_IP_);
+   void *ret = slab_alloc(s, gfpflags, _RET_IP_, s->object_size);
 
trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
s->size, gfpflags);
@@ -2914,7 +2928,7 @@ EXPORT_SYMBOL(kmem_cache_alloc);
 #ifdef CONFIG_TRACING
 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
-   void *ret = slab_alloc(s, gfpflags, _RET_IP_);
+   void *ret = slab_alloc(s, gfpflags, _RET_IP_, size);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);