On Wed, Jul 24, 2019 at 03:37:43PM -0700, Ian Rogers wrote:

> @@ -2597,6 +2612,30 @@ static int  __perf_install_in_context(void *info)
>               struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
>               reprogram = cgroup_is_descendant(cgrp->css.cgroup,
>                                       event->cgrp->css.cgroup);
> +
> +             /*
> +              * Ensure space for visit_groups_merge iterator storage. With
> +              * cgroup profiling we may have an event at each depth plus
> +              * system wide events.
> +              */
> +             max_iterators = perf_event_cgroup_depth(event) + 1;
> +             if (max_iterators >
> +                 cpuctx->visit_groups_merge_iterator_storage_size) {
> +                     struct perf_event **storage =
> +                        krealloc(cpuctx->visit_groups_merge_iterator_storage,
> +                                 sizeof(struct perf_event *) * max_iterators,
> +                                 GFP_KERNEL);
> +                     if (storage) {
> +                             cpuctx->visit_groups_merge_iterator_storage
> +                                             = storage;
> +                             cpuctx->visit_groups_merge_iterator_storage_size
> +                                             = max_iterators;
> +                     } else {
> +                             WARN_ONCE(1, "Unable to increase iterator "
> +                                     "storage for perf events with cgroups");
> +                             ret = -ENOMEM;
> +                     }
> +             }
>       }
>  #endif

This is completely insane and broken. You do not allocate memory from
hardirq context while holding all sorts of locks.

Also, the patches are still an unreadable mess, and they do far too much
in a single patch.

Please have a look at the completely untested lot at:

  git://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git perf/cgroup


Reply via email to