Re: [PATCH 1/3] drm/v3d: Take a lock across GPU scheduler job creation and queuing.

2018-06-06 Thread Christian König

Am 06.06.2018 um 10:46 schrieb Lucas Stach:

Am Dienstag, den 05.06.2018, 12:03 -0700 schrieb Eric Anholt:

Between creation and queueing of a job, you need to prevent any other
job from being created and queued.  Otherwise the scheduler's fences
may be signaled out of seqno order.


Signed-off-by: Eric Anholt 

Fixes: 57692c94dcbe ("drm/v3d: Introduce a new DRM driver for Broadcom V3D 
V3.x+")
---

ccing amd-gfx due to interaction of this series with the scheduler.

  drivers/gpu/drm/v3d/v3d_drv.h |  5 +
  drivers/gpu/drm/v3d/v3d_gem.c | 11 +--
  2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index a043ac3aae98..26005abd9c5d 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -85,6 +85,11 @@ struct v3d_dev {

     */
    struct mutex reset_lock;
  

+   /* Lock taken when creating and pushing the GPU scheduler
+    * jobs, to keep the sched-fence seqnos in order.
+    */
+   struct mutex sched_lock;

+

    struct {
    u32 num_allocated;
    u32 pages_allocated;

diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b513f9189caf..9ea83bdb9a30 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -550,13 +550,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,

    if (ret)
    goto fail;
  

+   mutex_lock(>sched_lock);
    if (exec->bin.start != exec->bin.end) {
    ret = drm_sched_job_init(>bin.base,
     >queue[V3D_BIN].sched,
     _priv->sched_entity[V3D_BIN],
     v3d_priv);
-   if (ret)
+   if (ret) {
+   mutex_unlock(>sched_lock);

    goto fail_unreserve;

I don't see any path where you would go to fail_unreserve with the
mutex not yet locked, so you could just fold the mutex_unlock into this
error path for a bit less code duplication.

Otherwise this looks fine.


Yeah, agree that could be cleaned up.

I can't judge the correctness of the driver, but at least the scheduler 
handling looks good to me.


Regards,
Christian.



Regards,
Lucas


+   }
  

    exec->bin_done_fence =
    dma_fence_get(>bin.base.s_fence->finished);

@@ -570,12 +573,15 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,

     >queue[V3D_RENDER].sched,
     _priv->sched_entity[V3D_RENDER],
     v3d_priv);
-   if (ret)
+   if (ret) {
+   mutex_unlock(>sched_lock);
    goto fail_unreserve;
+   }
  

    kref_get(>refcount); /* put by scheduler job completion */
    drm_sched_entity_push_job(>render.base,
      _priv->sched_entity[V3D_RENDER]);
+   mutex_unlock(>sched_lock);
  

    v3d_attach_object_fences(exec);
  
@@ -615,6 +621,7 @@ v3d_gem_init(struct drm_device *dev)

    spin_lock_init(>job_lock);
    mutex_init(>bo_lock);
    mutex_init(>reset_lock);
+   mutex_init(>sched_lock);
  

    /* Note: We don't allocate address 0.  Various bits of HW
     * treat 0 as special, such as the occlusion query counters

___
dri-devel mailing list
dri-de...@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel


___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


Re: [PATCH 1/3] drm/v3d: Take a lock across GPU scheduler job creation and queuing.

2018-06-06 Thread Lucas Stach
Am Dienstag, den 05.06.2018, 12:03 -0700 schrieb Eric Anholt:
> Between creation and queueing of a job, you need to prevent any other
> job from being created and queued.  Otherwise the scheduler's fences
> may be signaled out of seqno order.
> 
> > Signed-off-by: Eric Anholt 
> Fixes: 57692c94dcbe ("drm/v3d: Introduce a new DRM driver for Broadcom V3D 
> V3.x+")
> ---
> 
> ccing amd-gfx due to interaction of this series with the scheduler.
> 
>  drivers/gpu/drm/v3d/v3d_drv.h |  5 +
>  drivers/gpu/drm/v3d/v3d_gem.c | 11 +--
>  2 files changed, 14 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
> index a043ac3aae98..26005abd9c5d 100644
> --- a/drivers/gpu/drm/v3d/v3d_drv.h
> +++ b/drivers/gpu/drm/v3d/v3d_drv.h
> @@ -85,6 +85,11 @@ struct v3d_dev {
> >      */
> >     struct mutex reset_lock;
>  
> > +   /* Lock taken when creating and pushing the GPU scheduler
> > +    * jobs, to keep the sched-fence seqnos in order.
> > +    */
> > +   struct mutex sched_lock;
> +
> >     struct {
> >     u32 num_allocated;
> >     u32 pages_allocated;
> diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
> index b513f9189caf..9ea83bdb9a30 100644
> --- a/drivers/gpu/drm/v3d/v3d_gem.c
> +++ b/drivers/gpu/drm/v3d/v3d_gem.c
> @@ -550,13 +550,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >     if (ret)
> >     goto fail;
>  
> > +   mutex_lock(>sched_lock);
> >     if (exec->bin.start != exec->bin.end) {
> >     ret = drm_sched_job_init(>bin.base,
> >      >queue[V3D_BIN].sched,
> >      _priv->sched_entity[V3D_BIN],
> >      v3d_priv);
> > -   if (ret)
> > +   if (ret) {
> > +   mutex_unlock(>sched_lock);
>   goto fail_unreserve;

I don't see any path where you would go to fail_unreserve with the
mutex not yet locked, so you could just fold the mutex_unlock into this
error path for a bit less code duplication.

Otherwise this looks fine.

Regards,
Lucas

> + }
>  
> >     exec->bin_done_fence =
> >     dma_fence_get(>bin.base.s_fence->finished);
> @@ -570,12 +573,15 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
> >      >queue[V3D_RENDER].sched,
> >      _priv->sched_entity[V3D_RENDER],
> >      v3d_priv);
> > -   if (ret)
> > +   if (ret) {
> > +   mutex_unlock(>sched_lock);
> >     goto fail_unreserve;
> > +   }
>  
> >     kref_get(>refcount); /* put by scheduler job completion */
> >     drm_sched_entity_push_job(>render.base,
> >       _priv->sched_entity[V3D_RENDER]);
> > +   mutex_unlock(>sched_lock);
>  
> >     v3d_attach_object_fences(exec);
>  
> @@ -615,6 +621,7 @@ v3d_gem_init(struct drm_device *dev)
> >     spin_lock_init(>job_lock);
> >     mutex_init(>bo_lock);
> >     mutex_init(>reset_lock);
> > +   mutex_init(>sched_lock);
>  
> >     /* Note: We don't allocate address 0.  Various bits of HW
> >      * treat 0 as special, such as the occlusion query counters
___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx


[PATCH 1/3] drm/v3d: Take a lock across GPU scheduler job creation and queuing.

2018-06-05 Thread Eric Anholt
Between creation and queueing of a job, you need to prevent any other
job from being created and queued.  Otherwise the scheduler's fences
may be signaled out of seqno order.

Signed-off-by: Eric Anholt 
Fixes: 57692c94dcbe ("drm/v3d: Introduce a new DRM driver for Broadcom V3D 
V3.x+")
---

ccing amd-gfx due to interaction of this series with the scheduler.

 drivers/gpu/drm/v3d/v3d_drv.h |  5 +
 drivers/gpu/drm/v3d/v3d_gem.c | 11 +--
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index a043ac3aae98..26005abd9c5d 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -85,6 +85,11 @@ struct v3d_dev {
 */
struct mutex reset_lock;
 
+   /* Lock taken when creating and pushing the GPU scheduler
+* jobs, to keep the sched-fence seqnos in order.
+*/
+   struct mutex sched_lock;
+
struct {
u32 num_allocated;
u32 pages_allocated;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index b513f9189caf..9ea83bdb9a30 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -550,13 +550,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (ret)
goto fail;
 
+   mutex_lock(>sched_lock);
if (exec->bin.start != exec->bin.end) {
ret = drm_sched_job_init(>bin.base,
 >queue[V3D_BIN].sched,
 _priv->sched_entity[V3D_BIN],
 v3d_priv);
-   if (ret)
+   if (ret) {
+   mutex_unlock(>sched_lock);
goto fail_unreserve;
+   }
 
exec->bin_done_fence =
dma_fence_get(>bin.base.s_fence->finished);
@@ -570,12 +573,15 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 >queue[V3D_RENDER].sched,
 _priv->sched_entity[V3D_RENDER],
 v3d_priv);
-   if (ret)
+   if (ret) {
+   mutex_unlock(>sched_lock);
goto fail_unreserve;
+   }
 
kref_get(>refcount); /* put by scheduler job completion */
drm_sched_entity_push_job(>render.base,
  _priv->sched_entity[V3D_RENDER]);
+   mutex_unlock(>sched_lock);
 
v3d_attach_object_fences(exec);
 
@@ -615,6 +621,7 @@ v3d_gem_init(struct drm_device *dev)
spin_lock_init(>job_lock);
mutex_init(>bo_lock);
mutex_init(>reset_lock);
+   mutex_init(>sched_lock);
 
/* Note: We don't allocate address 0.  Various bits of HW
 * treat 0 as special, such as the occlusion query counters
-- 
2.17.0

___
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx