On Fri, Sep 15, 2023 at 10:59:32AM +0000, Matteo Rizzo wrote:
> From: Jann Horn <[email protected]>
> 
> Sanity-check that:
>  - non-NULL freepointers point into the slab
>  - freepointers look plausibly aligned
> 
> Signed-off-by: Jann Horn <[email protected]>
> Co-developed-by: Matteo Rizzo <[email protected]>
> Signed-off-by: Matteo Rizzo <[email protected]>
> ---
>  lib/slub_kunit.c |  4 ++++
>  mm/slab.h        |  8 +++++++
>  mm/slub.c        | 57 ++++++++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 69 insertions(+)
> 
> diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
> index d4a3730b08fa..acf8600bd1fd 100644
> --- a/lib/slub_kunit.c
> +++ b/lib/slub_kunit.c
> @@ -45,6 +45,10 @@ static void test_clobber_zone(struct kunit *test)
>  #ifndef CONFIG_KASAN
>  static void test_next_pointer(struct kunit *test)
>  {
> +     if (IS_ENABLED(CONFIG_SLAB_VIRTUAL))
> +             kunit_skip(test,
> +                     "incompatible with freepointer corruption detection in 
> CONFIG_SLAB_VIRTUAL");
> +
>       struct kmem_cache *s = test_kmem_cache_create("TestSlub_next_ptr_free",
>                                                       64, SLAB_POISON);
>       u8 *p = kmem_cache_alloc(s, GFP_KERNEL);
> diff --git a/mm/slab.h b/mm/slab.h
> index 460c802924bd..8d10a011bdf0 100644
> --- a/mm/slab.h
> +++ b/mm/slab.h
> @@ -79,6 +79,14 @@ struct slab {
>  
>       struct list_head flush_list_elem;
>  
> +     /*
> +      * Not in kmem_cache because it depends on whether the allocation is
> +      * normal order or fallback order.
> +      * an alternative might be to over-allocate virtual memory for
> +      * fallback-order pages.
> +      */
> +     unsigned long align_mask;
> +
>       /* Replaces the page lock */
>       spinlock_t slab_lock;
>  
> diff --git a/mm/slub.c b/mm/slub.c
> index 0f7f5bf0b174..57474c8a6569 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -392,6 +392,44 @@ static inline freeptr_t freelist_ptr_encode(const struct 
> kmem_cache *s,
>       return (freeptr_t){.v = encoded};
>  }
>  
> +/*
> + * Does some validation of freelist pointers. Without SLAB_VIRTUAL this is
> + * currently a no-op.
> + */
> +static inline bool freelist_pointer_corrupted(struct slab *slab, freeptr_t 
> ptr,
> +     void *decoded)
> +{
> +#ifdef CONFIG_SLAB_VIRTUAL
> +     /*
> +      * If the freepointer decodes to 0, use 0 as the slab_base so that
> +      * the check below always passes (0 & slab->align_mask == 0).
> +      */
> +     unsigned long slab_base = decoded ? (unsigned long)slab_to_virt(slab)
> +             : 0;
> +
> +     /*
> +      * This verifies that the SLUB freepointer does not point outside the
> +      * slab. Since at that point we can basically do it for free, it also
> +      * checks that the pointer alignment looks vaguely sane.
> +      * However, we probably don't want the cost of a proper division here,
> +      * so instead we just do a cheap check whether the bottom bits that are
> +      * clear in the size are also clear in the pointer.
> +      * So for kmalloc-32, it does a perfect alignment check, but for
> +      * kmalloc-192, it just checks that the pointer is a multiple of 32.
> +      * This should probably be reconsidered - is this a good tradeoff, or
> +      * should that part be thrown out, or do we want a proper accurate
> +      * alignment check (and can we make it work with acceptable performance
> +      * cost compared to the security improvement - probably not)?

Is it really that much more expensive to check the alignment exactly?

> +      */
> +     return CHECK_DATA_CORRUPTION(
> +             ((unsigned long)decoded & slab->align_mask) != slab_base,
> +             "bad freeptr (encoded %lx, ptr %p, base %lx, mask %lx",
> +             ptr.v, decoded, slab_base, slab->align_mask);
> +#else
> +     return false;
> +#endif
> +}
> +
>  static inline void *freelist_ptr_decode(const struct kmem_cache *s,
>                                       freeptr_t ptr, unsigned long ptr_addr,
>                                       struct slab *slab)
> @@ -403,6 +441,10 @@ static inline void *freelist_ptr_decode(const struct 
> kmem_cache *s,
>  #else
>       decoded = (void *)ptr.v;
>  #endif
> +
> +     if (unlikely(freelist_pointer_corrupted(slab, ptr, decoded)))
> +             return NULL;
> +
>       return decoded;
>  }
>  
> @@ -2122,6 +2164,21 @@ static struct slab *get_free_slab(struct kmem_cache *s,
>       if (slab == NULL)
>               return NULL;
>  
> +     /*
> +      * Bits that must be equal to start-of-slab address for all
> +      * objects inside the slab.
> +      * For compatibility with pointer tagging (like in HWASAN), this would
> +      * need to clear the pointer tag bits from the mask.
> +      */
> +     slab->align_mask = ~((PAGE_SIZE << oo_order(oo)) - 1);
> +
> +     /*
> +      * Object alignment bits (must be zero, which is equal to the bits in
> +      * the start-of-slab address)
> +      */
> +     if (s->red_left_pad == 0)
> +             slab->align_mask |= (1 << (ffs(s->size) - 1)) - 1;
> +
>       return slab;
>  }
>  
> -- 
> 2.42.0.459.ge4e396fd5e-goog
> 

We can improve the sanity checking in the future, so as-is, sure:

Reviewed-by: Kees Cook <[email protected]>

-- 
Kees Cook

Reply via email to