+Ulf to keep me honest on the power domains

On Thu, Jan 9, 2020 at 10:08 PM Steven Price <steven.pr...@arm.com> wrote:
>
> On 08/01/2020 05:23, Nicolas Boichat wrote:
> > When there is a single power domain per device, the core will
> > ensure the power domains are all switched on.
> >
> > However, when there are multiple ones, as in MT8183 Bifrost GPU,
> > we need to handle them in driver code.
> >
> >
> > Signed-off-by: Nicolas Boichat <drink...@chromium.org>
> > ---
> >
> > The downstream driver we use on chromeos-4.19 currently uses 2
> > additional devices in device tree to accomodate for this [1], but
> > I believe this solution is cleaner.
>
> I'm not sure what is best, but it seems odd to encode this into the Panfrost 
> driver itself - it doesn't have any knowledge of what to do with these power 
> domains. The naming of the domains looks suspiciously like someone thought 
> that e.g. only half of the cores could be powered, but it doesn't look like 
> that was implemented in the chromeos driver linked and anyway that is *meant* 
> to be automatic in the hardware! (I.e. if you only power up one cores in one 
> core stack then the PDC should only enable the power domain for that set of 
> cores).

This is actually implemented in the Chrome OS driver [1]. IMHO power
domains are a bit confusing [2]:
 i. If there's only 1 power domain in the device, then the core takes
care of power on the domain (based on pm_runtime)
 ii. If there's more than 1 power domain, then the device needs to
link the domains manually.

So the Chrome OS [1] driver takes approach (i), by creating 3 devices,
each with 1 power domain that is switched on/off automatically using
pm_runtime.

This patch takes approach (ii) with device links to handle the extra domains.

I believe the latter is more upstream-friendly, but, as always,
suggestions welcome.

[2] 
https://elixir.bootlin.com/linux/latest/source/drivers/base/power/domain.c#L2466

> Steve
>
> >
> > [1] 
> > https://chromium.googlesource.com/chromiumos/third_party/kernel/+/refs/heads/chromeos-4.19/drivers/gpu/arm/midgard/platform/mediatek/mali_kbase_runtime_pm.c#31
> >
> > drivers/gpu/drm/panfrost/panfrost_device.c | 87 ++++++++++++++++++++--
> >   drivers/gpu/drm/panfrost/panfrost_device.h |  4 +
> >   2 files changed, 83 insertions(+), 8 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c 
> > b/drivers/gpu/drm/panfrost/panfrost_device.c
> > index a0b0a6fef8b4e63..c6e9e059de94a4d 100644
> > --- a/drivers/gpu/drm/panfrost/panfrost_device.c
> > +++ b/drivers/gpu/drm/panfrost/panfrost_device.c
> > @@ -5,6 +5,7 @@
> >   #include <linux/clk.h>
> >   #include <linux/reset.h>
> >   #include <linux/platform_device.h>
> > +#include <linux/pm_domain.h>
> >   #include <linux/regulator/consumer.h>
> >
> >   #include "panfrost_device.h"
> > @@ -131,6 +132,67 @@ static void panfrost_regulator_fini(struct 
> > panfrost_device *pfdev)
> >       regulator_disable(pfdev->regulator_sram);
> >   }
> >
> > +static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
> > +{
> > +     int i;
> > +
> > +     for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
> > +             if (!pfdev->pm_domain_devs[i])
> > +                     break;
> > +
> > +             if (pfdev->pm_domain_links[i])
> > +                     device_link_del(pfdev->pm_domain_links[i]);
> > +
> > +             dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
> > +     }
> > +}
> > +
> > +static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
> > +{
> > +     int err;
> > +     int i, num_domains;
> > +
> > +     num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
> > +                                              "power-domains",
> > +                                              "#power-domain-cells");
> > +     /* Single domains are handled by the core. */
> > +     if (num_domains < 2)
> > +             return 0;
> > +
> > +     if (num_domains > ARRAY_SIZE(pfdev->pm_domain_devs)) {
> > +             dev_err(pfdev->dev, "Too many pm-domains: %d\n", num_domains);
> > +             return -EINVAL;
> > +     }
> > +
> > +     for (i = 0; i < num_domains; i++) {
> > +             pfdev->pm_domain_devs[i] =
> > +                     dev_pm_domain_attach_by_id(pfdev->dev, i);
> > +             if (IS_ERR(pfdev->pm_domain_devs[i])) {
> > +                     err = PTR_ERR(pfdev->pm_domain_devs[i]);
> > +                     pfdev->pm_domain_devs[i] = NULL;
> > +                     dev_err(pfdev->dev,
> > +                             "failed to get pm-domain %d: %d\n", i, err);
> > +                     goto err;
> > +             }
> > +
> > +             pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
> > +                             pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
> > +                             DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
> > +             if (!pfdev->pm_domain_links[i]) {
> > +                     dev_err(pfdev->pm_domain_devs[i],
> > +                             "adding device link failed!\n");
> > +                     err = -ENODEV;
> > +                     goto err;
> > +             }
> > +     }
> > +
> > +     return 0;
> > +
> > +err:
> > +     panfrost_pm_domain_fini(pfdev);
> > +     return err;
> > +}
> > +
> >   int panfrost_device_init(struct panfrost_device *pfdev)
> >   {
> >       int err;
> > @@ -161,37 +223,45 @@ int panfrost_device_init(struct panfrost_device 
> > *pfdev)
> >               goto err_out1;
> >       }
> >
> > +     err = panfrost_pm_domain_init(pfdev);
> > +     if (err) {
> > +             dev_err(pfdev->dev, "pm_domain init failed %d\n", err);
> > +             goto err_out2;
> > +     }
> > +
> >       res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
> >       pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
> >       if (IS_ERR(pfdev->iomem)) {
> >               dev_err(pfdev->dev, "failed to ioremap iomem\n");
> >               err = PTR_ERR(pfdev->iomem);
> > -             goto err_out2;
> > +             goto err_out3;
> >       }
> >
> >       err = panfrost_gpu_init(pfdev);
> >       if (err)
> > -             goto err_out2;
> > +             goto err_out3;
> >
> >       err = panfrost_mmu_init(pfdev);
> >       if (err)
> > -             goto err_out3;
> > +             goto err_out4;
> >
> >       err = panfrost_job_init(pfdev);
> >       if (err)
> > -             goto err_out4;
> > +             goto err_out5;
> >
> >       err = panfrost_perfcnt_init(pfdev);
> >       if (err)
> > -             goto err_out5;
> > +             goto err_out6;
> >
> >       return 0;
> > -err_out5:
> > +err_out6:
> >       panfrost_job_fini(pfdev);
> > -err_out4:
> > +err_out5:
> >       panfrost_mmu_fini(pfdev);
> > -err_out3:
> > +err_out4:
> >       panfrost_gpu_fini(pfdev);
> > +err_out3:
> > +     panfrost_pm_domain_fini(pfdev);
> >   err_out2:
> >       panfrost_reset_fini(pfdev);
> >   err_out1:
> > @@ -208,6 +278,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
> >       panfrost_mmu_fini(pfdev);
> >       panfrost_gpu_fini(pfdev);
> >       panfrost_reset_fini(pfdev);
> > +     panfrost_pm_domain_fini(pfdev);
> >       panfrost_regulator_fini(pfdev);
> >       panfrost_clk_fini(pfdev);
> >   }
> > diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h 
> > b/drivers/gpu/drm/panfrost/panfrost_device.h
> > index a124334d69e7e93..92d471676fc7823 100644
> > --- a/drivers/gpu/drm/panfrost/panfrost_device.h
> > +++ b/drivers/gpu/drm/panfrost/panfrost_device.h
> > @@ -19,6 +19,7 @@ struct panfrost_job;
> >   struct panfrost_perfcnt;
> >
> >   #define NUM_JOB_SLOTS 3
> > +#define MAX_PM_DOMAINS 3
> >
> >   struct panfrost_features {
> >       u16 id;
> > @@ -62,6 +63,9 @@ struct panfrost_device {
> >       struct regulator *regulator;
> >       struct regulator *regulator_sram;
> >       struct reset_control *rstc;
> > +     /* pm_domains for devices with more than one. */
> > +     struct device *pm_domain_devs[MAX_PM_DOMAINS];
> > +     struct device_link *pm_domain_links[MAX_PM_DOMAINS];
> >
> >       struct panfrost_features features;
> >
> >
>
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Reply via email to