Re: [Intel-gfx] [PATCH 24/52] drm: Manage drm_gem_init with drmm_

2020-02-19 Thread Daniel Vetter
On Wed, Feb 19, 2020 at 3:52 PM Laurent Pinchart
 wrote:
>
> Hi Daniel,
>
> On Wed, Feb 19, 2020 at 03:37:46PM +0100, Daniel Vetter wrote:
> > On Wed, Feb 19, 2020 at 3:22 PM Laurent Pinchart wrote:
> > > On Wed, Feb 19, 2020 at 11:20:54AM +0100, Daniel Vetter wrote:
> > > > We might want to look into pushing this down into drm_mm_init, but
> > > > that would mean rolling out return codes to a pile of functions
> > > > unfortunately. So let's leave that for now.
> > > >
> > > > Signed-off-by: Daniel Vetter 
> > > > ---
> > > >  drivers/gpu/drm/drm_drv.c  |  8 +---
> > > >  drivers/gpu/drm/drm_gem.c  | 21 ++---
> > > >  drivers/gpu/drm/drm_internal.h |  1 -
> > > >  3 files changed, 11 insertions(+), 19 deletions(-)
> > > >
> > > > diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
> > > > index 03a1fb377830..7b3df1188da9 100644
> > > > --- a/drivers/gpu/drm/drm_drv.c
> > > > +++ b/drivers/gpu/drm/drm_drv.c
> > > > @@ -688,13 +688,10 @@ int drm_dev_init(struct drm_device *dev,
> > > >
> > > >   ret = drm_dev_set_unique(dev, dev_name(parent));
> > > >   if (ret)
> > > > - goto err_setunique;
> > > > + goto err;
> > > >
> > > >   return 0;
> > > >
> > > > -err_setunique:
> > > > - if (drm_core_check_feature(dev, DRIVER_GEM))
> > > > - drm_gem_destroy(dev);
> > > >  err:
> > > >   drm_managed_release(dev);
> > > >
> > > > @@ -756,9 +753,6 @@ EXPORT_SYMBOL(devm_drm_dev_init);
> > > >  void drm_dev_fini(struct drm_device *dev)
> > > >  {
> > > >   drm_vblank_cleanup(dev);
> > > > -
> > > > - if (drm_core_check_feature(dev, DRIVER_GEM))
> > > > - drm_gem_destroy(dev);
> > > >  }
> > > >  EXPORT_SYMBOL(drm_dev_fini);
> > > >
> > > > diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> > > > index 0b6e6623735e..31095e0f6b9f 100644
> > > > --- a/drivers/gpu/drm/drm_gem.c
> > > > +++ b/drivers/gpu/drm/drm_gem.c
> > > > @@ -44,6 +44,7 @@
> > > >  #include 
> > > >  #include 
> > > >  #include 
> > > > +#include 
> > > >  #include 
> > > >  #include 
> > > >
> > > > @@ -77,6 +78,12 @@
> > > >   * up at a later date, and as our interface with shmfs for memory 
> > > > allocation.
> > > >   */
> > > >
> > > > +static void
> > > > +drm_gem_init_release(struct drm_device *dev, void *ptr)
> > > > +{
> > > > + drm_vma_offset_manager_destroy(dev->vma_offset_manager);
> > > > +}
> > > > +
> > > >  /**
> > > >   * drm_gem_init - Initialize the GEM device fields
> > > >   * @dev: drm_devic structure to initialize
> > > > @@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev)
> > > >   mutex_init(>object_name_lock);
> > > >   idr_init_base(>object_name_idr, 1);
> > > >
> > > > - vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), 
> > > > GFP_KERNEL);
> > > > + vma_offset_manager = drmm_kzalloc(dev, 
> > > > sizeof(*vma_offset_manager),
> > > > +   GFP_KERNEL);
> > > >   if (!vma_offset_manager) {
> > > >   DRM_ERROR("out of memory\n");
> > > >   return -ENOMEM;
> > > > @@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev)
> > > >   DRM_FILE_PAGE_OFFSET_START,
> > > >   DRM_FILE_PAGE_OFFSET_SIZE);
> > > >
> > > > - return 0;
> > > > -}
> > > > -
> > > > -void
> > > > -drm_gem_destroy(struct drm_device *dev)
> > > > -{
> > > > -
> > > > - drm_vma_offset_manager_destroy(dev->vma_offset_manager);
> > > > - kfree(dev->vma_offset_manager);
> > > > - dev->vma_offset_manager = NULL;
> > > > + return drmm_add_action(dev, drm_gem_init_release, NULL);
> > >
> > > This looks fine as such (although I'm not sure if the managed API
> > > overhead is really worth it for core code), but it leads to a potential
> > > issue: if we handle more of the cleanup through the managed API, how do
> > > we ensure that the cleanup functions are called in the right order (when
> > > order matters) ?
> >
> > KASAN essentially (already helped while developing this), plus review.
> > It's still the same problem like reviewing onion unwind code, it's
> > just less fragile for the normal case.
>
> That wasn't really my question though. If there are ordering
> constraints, and if we want to honour them, the ordering of cleanups
> need to be documented in the API (and of course implemented). We may for
> instance want to always do cleanups in the reverse order of the
> allocations.
>
> > I also think that if you have ordering constraints in your drm_device
> > release functions, there's a more fundamental problem going on.
> > Unfortunately we have a lot of these, which is why converting
> > everything in drm, including drivers, is not going to be easy nor
> > quick. There's a lot of problems. E.g. naively converting all
> > drm_connector allocations from devm_kzalloc to drmm_kzalloc still
> > means they get released too early, since the 

Re: [Intel-gfx] [PATCH 24/52] drm: Manage drm_gem_init with drmm_

2020-02-19 Thread Laurent Pinchart
Hi Daniel,

On Wed, Feb 19, 2020 at 03:37:46PM +0100, Daniel Vetter wrote:
> On Wed, Feb 19, 2020 at 3:22 PM Laurent Pinchart wrote:
> > On Wed, Feb 19, 2020 at 11:20:54AM +0100, Daniel Vetter wrote:
> > > We might want to look into pushing this down into drm_mm_init, but
> > > that would mean rolling out return codes to a pile of functions
> > > unfortunately. So let's leave that for now.
> > >
> > > Signed-off-by: Daniel Vetter 
> > > ---
> > >  drivers/gpu/drm/drm_drv.c  |  8 +---
> > >  drivers/gpu/drm/drm_gem.c  | 21 ++---
> > >  drivers/gpu/drm/drm_internal.h |  1 -
> > >  3 files changed, 11 insertions(+), 19 deletions(-)
> > >
> > > diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
> > > index 03a1fb377830..7b3df1188da9 100644
> > > --- a/drivers/gpu/drm/drm_drv.c
> > > +++ b/drivers/gpu/drm/drm_drv.c
> > > @@ -688,13 +688,10 @@ int drm_dev_init(struct drm_device *dev,
> > >
> > >   ret = drm_dev_set_unique(dev, dev_name(parent));
> > >   if (ret)
> > > - goto err_setunique;
> > > + goto err;
> > >
> > >   return 0;
> > >
> > > -err_setunique:
> > > - if (drm_core_check_feature(dev, DRIVER_GEM))
> > > - drm_gem_destroy(dev);
> > >  err:
> > >   drm_managed_release(dev);
> > >
> > > @@ -756,9 +753,6 @@ EXPORT_SYMBOL(devm_drm_dev_init);
> > >  void drm_dev_fini(struct drm_device *dev)
> > >  {
> > >   drm_vblank_cleanup(dev);
> > > -
> > > - if (drm_core_check_feature(dev, DRIVER_GEM))
> > > - drm_gem_destroy(dev);
> > >  }
> > >  EXPORT_SYMBOL(drm_dev_fini);
> > >
> > > diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> > > index 0b6e6623735e..31095e0f6b9f 100644
> > > --- a/drivers/gpu/drm/drm_gem.c
> > > +++ b/drivers/gpu/drm/drm_gem.c
> > > @@ -44,6 +44,7 @@
> > >  #include 
> > >  #include 
> > >  #include 
> > > +#include 
> > >  #include 
> > >  #include 
> > >
> > > @@ -77,6 +78,12 @@
> > >   * up at a later date, and as our interface with shmfs for memory 
> > > allocation.
> > >   */
> > >
> > > +static void
> > > +drm_gem_init_release(struct drm_device *dev, void *ptr)
> > > +{
> > > + drm_vma_offset_manager_destroy(dev->vma_offset_manager);
> > > +}
> > > +
> > >  /**
> > >   * drm_gem_init - Initialize the GEM device fields
> > >   * @dev: drm_devic structure to initialize
> > > @@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev)
> > >   mutex_init(>object_name_lock);
> > >   idr_init_base(>object_name_idr, 1);
> > >
> > > - vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), 
> > > GFP_KERNEL);
> > > + vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
> > > +   GFP_KERNEL);
> > >   if (!vma_offset_manager) {
> > >   DRM_ERROR("out of memory\n");
> > >   return -ENOMEM;
> > > @@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev)
> > >   DRM_FILE_PAGE_OFFSET_START,
> > >   DRM_FILE_PAGE_OFFSET_SIZE);
> > >
> > > - return 0;
> > > -}
> > > -
> > > -void
> > > -drm_gem_destroy(struct drm_device *dev)
> > > -{
> > > -
> > > - drm_vma_offset_manager_destroy(dev->vma_offset_manager);
> > > - kfree(dev->vma_offset_manager);
> > > - dev->vma_offset_manager = NULL;
> > > + return drmm_add_action(dev, drm_gem_init_release, NULL);
> >
> > This looks fine as such (although I'm not sure if the managed API
> > overhead is really worth it for core code), but it leads to a potential
> > issue: if we handle more of the cleanup through the managed API, how do
> > we ensure that the cleanup functions are called in the right order (when
> > order matters) ?
> 
> KASAN essentially (already helped while developing this), plus review.
> It's still the same problem like reviewing onion unwind code, it's
> just less fragile for the normal case.

That wasn't really my question though. If there are ordering
constraints, and if we want to honour them, the ordering of cleanups
need to be documented in the API (and of course implemented). We may for
instance want to always do cleanups in the reverse order of the
allocations.

> I also think that if you have ordering constraints in your drm_device
> release functions, there's a more fundamental problem going on.
> Unfortunately we have a lot of these, which is why converting
> everything in drm, including drivers, is not going to be easy nor
> quick. There's a lot of problems. E.g. naively converting all
> drm_connector allocations from devm_kzalloc to drmm_kzalloc still
> means they get released too early, since the drm_mode_config_init
> happens before you set up the connectors. So you still have the
> problem that your connector_funcs->destroy gets called on already
> freed memory. Lots of work ahead.

Yes that's the kind of issue I was thinking about. We have ordering
constraints, they will not go away. What's your idea on how to 

Re: [Intel-gfx] [PATCH 24/52] drm: Manage drm_gem_init with drmm_

2020-02-19 Thread Daniel Vetter
On Wed, Feb 19, 2020 at 3:22 PM Laurent Pinchart
 wrote:
>
> Hi Daniel,
>
> Thank you for the patch.
>
> On Wed, Feb 19, 2020 at 11:20:54AM +0100, Daniel Vetter wrote:
> > We might want to look into pushing this down into drm_mm_init, but
> > that would mean rolling out return codes to a pile of functions
> > unfortunately. So let's leave that for now.
> >
> > Signed-off-by: Daniel Vetter 
> > ---
> >  drivers/gpu/drm/drm_drv.c  |  8 +---
> >  drivers/gpu/drm/drm_gem.c  | 21 ++---
> >  drivers/gpu/drm/drm_internal.h |  1 -
> >  3 files changed, 11 insertions(+), 19 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
> > index 03a1fb377830..7b3df1188da9 100644
> > --- a/drivers/gpu/drm/drm_drv.c
> > +++ b/drivers/gpu/drm/drm_drv.c
> > @@ -688,13 +688,10 @@ int drm_dev_init(struct drm_device *dev,
> >
> >   ret = drm_dev_set_unique(dev, dev_name(parent));
> >   if (ret)
> > - goto err_setunique;
> > + goto err;
> >
> >   return 0;
> >
> > -err_setunique:
> > - if (drm_core_check_feature(dev, DRIVER_GEM))
> > - drm_gem_destroy(dev);
> >  err:
> >   drm_managed_release(dev);
> >
> > @@ -756,9 +753,6 @@ EXPORT_SYMBOL(devm_drm_dev_init);
> >  void drm_dev_fini(struct drm_device *dev)
> >  {
> >   drm_vblank_cleanup(dev);
> > -
> > - if (drm_core_check_feature(dev, DRIVER_GEM))
> > - drm_gem_destroy(dev);
> >  }
> >  EXPORT_SYMBOL(drm_dev_fini);
> >
> > diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> > index 0b6e6623735e..31095e0f6b9f 100644
> > --- a/drivers/gpu/drm/drm_gem.c
> > +++ b/drivers/gpu/drm/drm_gem.c
> > @@ -44,6 +44,7 @@
> >  #include 
> >  #include 
> >  #include 
> > +#include 
> >  #include 
> >  #include 
> >
> > @@ -77,6 +78,12 @@
> >   * up at a later date, and as our interface with shmfs for memory 
> > allocation.
> >   */
> >
> > +static void
> > +drm_gem_init_release(struct drm_device *dev, void *ptr)
> > +{
> > + drm_vma_offset_manager_destroy(dev->vma_offset_manager);
> > +}
> > +
> >  /**
> >   * drm_gem_init - Initialize the GEM device fields
> >   * @dev: drm_devic structure to initialize
> > @@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev)
> >   mutex_init(>object_name_lock);
> >   idr_init_base(>object_name_idr, 1);
> >
> > - vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
> > + vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
> > +   GFP_KERNEL);
> >   if (!vma_offset_manager) {
> >   DRM_ERROR("out of memory\n");
> >   return -ENOMEM;
> > @@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev)
> >   DRM_FILE_PAGE_OFFSET_START,
> >   DRM_FILE_PAGE_OFFSET_SIZE);
> >
> > - return 0;
> > -}
> > -
> > -void
> > -drm_gem_destroy(struct drm_device *dev)
> > -{
> > -
> > - drm_vma_offset_manager_destroy(dev->vma_offset_manager);
> > - kfree(dev->vma_offset_manager);
> > - dev->vma_offset_manager = NULL;
> > + return drmm_add_action(dev, drm_gem_init_release, NULL);
>
> This looks fine as such (although I'm not sure if the managed API
> overhead is really worth it for core code), but it leads to a potential
> issue: if we handle more of the cleanup through the managed API, how do
> we ensure that the cleanup functions are called in the right order (when
> order matters) ?

KASAN essentially (already helped while developing this), plus review.
It's still the same problem like reviewing onion unwind code, it's
just less fragile for the normal case.

I also think that if you have ordering constraints in your drm_device
release functions, there's a more fundamental problem going on.
Unfortunately we have a lot of these, which is why converting
everything in drm, including drivers, is not going to be easy nor
quick. There's a lot of problems. E.g. naively converting all
drm_connector allocations from devm_kzalloc to drmm_kzalloc still
means they get released too early, since the drm_mode_config_init
happens before you set up the connectors. So you still have the
problem that your connector_funcs->destroy gets called on already
freed memory. Lots of work ahead.
-Daniel


> >  }
> >
> >  /**
> > diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
> > index 8c2628dfc6c7..cb09e95a795e 100644
> > --- a/drivers/gpu/drm/drm_internal.h
> > +++ b/drivers/gpu/drm/drm_internal.h
> > @@ -144,7 +144,6 @@ void drm_sysfs_lease_event(struct drm_device *dev);
> >  /* drm_gem.c */
> >  struct drm_gem_object;
> >  int drm_gem_init(struct drm_device *dev);
> > -void drm_gem_destroy(struct drm_device *dev);
> >  int drm_gem_handle_create_tail(struct drm_file *file_priv,
> >  struct drm_gem_object *obj,
> >  u32 *handlep);
>
> --
> Regards,
>
> Laurent Pinchart




Re: [Intel-gfx] [PATCH 24/52] drm: Manage drm_gem_init with drmm_

2020-02-19 Thread Laurent Pinchart
Hi Daniel,

Thank you for the patch.

On Wed, Feb 19, 2020 at 11:20:54AM +0100, Daniel Vetter wrote:
> We might want to look into pushing this down into drm_mm_init, but
> that would mean rolling out return codes to a pile of functions
> unfortunately. So let's leave that for now.
> 
> Signed-off-by: Daniel Vetter 
> ---
>  drivers/gpu/drm/drm_drv.c  |  8 +---
>  drivers/gpu/drm/drm_gem.c  | 21 ++---
>  drivers/gpu/drm/drm_internal.h |  1 -
>  3 files changed, 11 insertions(+), 19 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
> index 03a1fb377830..7b3df1188da9 100644
> --- a/drivers/gpu/drm/drm_drv.c
> +++ b/drivers/gpu/drm/drm_drv.c
> @@ -688,13 +688,10 @@ int drm_dev_init(struct drm_device *dev,
>  
>   ret = drm_dev_set_unique(dev, dev_name(parent));
>   if (ret)
> - goto err_setunique;
> + goto err;
>  
>   return 0;
>  
> -err_setunique:
> - if (drm_core_check_feature(dev, DRIVER_GEM))
> - drm_gem_destroy(dev);
>  err:
>   drm_managed_release(dev);
>  
> @@ -756,9 +753,6 @@ EXPORT_SYMBOL(devm_drm_dev_init);
>  void drm_dev_fini(struct drm_device *dev)
>  {
>   drm_vblank_cleanup(dev);
> -
> - if (drm_core_check_feature(dev, DRIVER_GEM))
> - drm_gem_destroy(dev);
>  }
>  EXPORT_SYMBOL(drm_dev_fini);
>  
> diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
> index 0b6e6623735e..31095e0f6b9f 100644
> --- a/drivers/gpu/drm/drm_gem.c
> +++ b/drivers/gpu/drm/drm_gem.c
> @@ -44,6 +44,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  
> @@ -77,6 +78,12 @@
>   * up at a later date, and as our interface with shmfs for memory allocation.
>   */
>  
> +static void
> +drm_gem_init_release(struct drm_device *dev, void *ptr)
> +{
> + drm_vma_offset_manager_destroy(dev->vma_offset_manager);
> +}
> +
>  /**
>   * drm_gem_init - Initialize the GEM device fields
>   * @dev: drm_devic structure to initialize
> @@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev)
>   mutex_init(>object_name_lock);
>   idr_init_base(>object_name_idr, 1);
>  
> - vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
> + vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
> +   GFP_KERNEL);
>   if (!vma_offset_manager) {
>   DRM_ERROR("out of memory\n");
>   return -ENOMEM;
> @@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev)
>   DRM_FILE_PAGE_OFFSET_START,
>   DRM_FILE_PAGE_OFFSET_SIZE);
>  
> - return 0;
> -}
> -
> -void
> -drm_gem_destroy(struct drm_device *dev)
> -{
> -
> - drm_vma_offset_manager_destroy(dev->vma_offset_manager);
> - kfree(dev->vma_offset_manager);
> - dev->vma_offset_manager = NULL;
> + return drmm_add_action(dev, drm_gem_init_release, NULL);

This looks fine as such (although I'm not sure if the managed API
overhead is really worth it for core code), but it leads to a potential
issue: if we handle more of the cleanup through the managed API, how do
we ensure that the cleanup functions are called in the right order (when
order matters) ?

>  }
>  
>  /**
> diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
> index 8c2628dfc6c7..cb09e95a795e 100644
> --- a/drivers/gpu/drm/drm_internal.h
> +++ b/drivers/gpu/drm/drm_internal.h
> @@ -144,7 +144,6 @@ void drm_sysfs_lease_event(struct drm_device *dev);
>  /* drm_gem.c */
>  struct drm_gem_object;
>  int drm_gem_init(struct drm_device *dev);
> -void drm_gem_destroy(struct drm_device *dev);
>  int drm_gem_handle_create_tail(struct drm_file *file_priv,
>  struct drm_gem_object *obj,
>  u32 *handlep);

-- 
Regards,

Laurent Pinchart
___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx


[Intel-gfx] [PATCH 24/52] drm: Manage drm_gem_init with drmm_

2020-02-19 Thread Daniel Vetter
We might want to look into pushing this down into drm_mm_init, but
that would mean rolling out return codes to a pile of functions
unfortunately. So let's leave that for now.

Signed-off-by: Daniel Vetter 
---
 drivers/gpu/drm/drm_drv.c  |  8 +---
 drivers/gpu/drm/drm_gem.c  | 21 ++---
 drivers/gpu/drm/drm_internal.h |  1 -
 3 files changed, 11 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 03a1fb377830..7b3df1188da9 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -688,13 +688,10 @@ int drm_dev_init(struct drm_device *dev,
 
ret = drm_dev_set_unique(dev, dev_name(parent));
if (ret)
-   goto err_setunique;
+   goto err;
 
return 0;
 
-err_setunique:
-   if (drm_core_check_feature(dev, DRIVER_GEM))
-   drm_gem_destroy(dev);
 err:
drm_managed_release(dev);
 
@@ -756,9 +753,6 @@ EXPORT_SYMBOL(devm_drm_dev_init);
 void drm_dev_fini(struct drm_device *dev)
 {
drm_vblank_cleanup(dev);
-
-   if (drm_core_check_feature(dev, DRIVER_GEM))
-   drm_gem_destroy(dev);
 }
 EXPORT_SYMBOL(drm_dev_fini);
 
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 0b6e6623735e..31095e0f6b9f 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -44,6 +44,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 #include 
 
@@ -77,6 +78,12 @@
  * up at a later date, and as our interface with shmfs for memory allocation.
  */
 
+static void
+drm_gem_init_release(struct drm_device *dev, void *ptr)
+{
+   drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+}
+
 /**
  * drm_gem_init - Initialize the GEM device fields
  * @dev: drm_devic structure to initialize
@@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev)
mutex_init(>object_name_lock);
idr_init_base(>object_name_idr, 1);
 
-   vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+   vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
+ GFP_KERNEL);
if (!vma_offset_manager) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
@@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev)
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
 
-   return 0;
-}
-
-void
-drm_gem_destroy(struct drm_device *dev)
-{
-
-   drm_vma_offset_manager_destroy(dev->vma_offset_manager);
-   kfree(dev->vma_offset_manager);
-   dev->vma_offset_manager = NULL;
+   return drmm_add_action(dev, drm_gem_init_release, NULL);
 }
 
 /**
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 8c2628dfc6c7..cb09e95a795e 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -144,7 +144,6 @@ void drm_sysfs_lease_event(struct drm_device *dev);
 /* drm_gem.c */
 struct drm_gem_object;
 int drm_gem_init(struct drm_device *dev);
-void drm_gem_destroy(struct drm_device *dev);
 int drm_gem_handle_create_tail(struct drm_file *file_priv,
   struct drm_gem_object *obj,
   u32 *handlep);
-- 
2.24.1

___
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx