Another thing I noticed, dunno how critical

On 2/21/24 20:40, Suren Baghdasaryan wrote:
> +static inline void __alloc_tag_sub(union codetag_ref *ref, size_t bytes)
> +{
> +     struct alloc_tag *tag;
> +
> +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
> +     WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
> +#endif
> +     if (!ref || !ref->ct)
> +             return;

This is quite careful.

> +
> +     tag = ct_to_alloc_tag(ref->ct);
> +
> +     this_cpu_sub(tag->counters->bytes, bytes);
> +     this_cpu_dec(tag->counters->calls);
> +
> +     ref->ct = NULL;
> +}
> +
> +static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
> +{
> +     __alloc_tag_sub(ref, bytes);
> +}
> +
> +static inline void alloc_tag_sub_noalloc(union codetag_ref *ref, size_t 
> bytes)
> +{
> +     __alloc_tag_sub(ref, bytes);
> +}
> +
> +static inline void alloc_tag_ref_set(union codetag_ref *ref, struct 
> alloc_tag *tag)
> +{
> +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
> +     WARN_ONCE(ref && ref->ct,
> +               "alloc_tag was not cleared (got tag for %s:%u)\n",\
> +               ref->ct->filename, ref->ct->lineno);
> +
> +     WARN_ONCE(!tag, "current->alloc_tag not set");
> +#endif
> +     if (!ref || !tag)
> +             return;

This too.

> +
> +     ref->ct = &tag->ct;
> +     /*
> +      * We need in increment the call counter every time we have a new
> +      * allocation or when we split a large allocation into smaller ones.
> +      * Each new reference for every sub-allocation needs to increment call
> +      * counter because when we free each part the counter will be 
> decremented.
> +      */
> +     this_cpu_inc(tag->counters->calls);
> +}
> +
> +static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag 
> *tag, size_t bytes)
> +{
> +     alloc_tag_ref_set(ref, tag);

We might have returned from alloc_tag_ref_set() due to !tag

> +     this_cpu_add(tag->counters->bytes, bytes);

But here we still assume it's valid.

> +}
> +


Reply via email to