From: Michel Dänzer <michel.daen...@amd.com>

This is a more robust way to prevent hangs due to nested calls to
drmHandleEvent.

Signed-off-by: Michel Dänzer <michel.daen...@amd.com>
---
 src/amdgpu_drm_queue.c | 124 +++++++++++++++++++++++++++++++++++++----
 src/amdgpu_drm_queue.h |   1 +
 src/drmmode_display.c  |  18 ++++--
 src/drmmode_display.h  |   4 ++
 4 files changed, 131 insertions(+), 16 deletions(-)

diff --git a/src/amdgpu_drm_queue.c b/src/amdgpu_drm_queue.c
index 9c0166147..d4353e703 100644
--- a/src/amdgpu_drm_queue.c
+++ b/src/amdgpu_drm_queue.c
@@ -40,6 +40,7 @@
 
 struct amdgpu_drm_queue_entry {
        struct xorg_list list;
+       uint64_t usec;
        uint64_t id;
        uintptr_t seq;
        void *data;
@@ -47,13 +48,27 @@ struct amdgpu_drm_queue_entry {
        xf86CrtcPtr crtc;
        amdgpu_drm_handler_proc handler;
        amdgpu_drm_abort_proc abort;
+       unsigned int frame;
 };
 
 static int amdgpu_drm_queue_refcnt;
 static struct xorg_list amdgpu_drm_queue;
+static struct xorg_list amdgpu_drm_queue_deferred;
 static uintptr_t amdgpu_drm_queue_seq;
 
 
+static void
+amdgpu_drm_queue_handle_one(struct amdgpu_drm_queue_entry *e, unsigned int 
frame,
+                           uint64_t usec)
+{
+       xorg_list_del(&e->list);
+       if (e->handler) {
+               e->handler(e->crtc, frame, usec, e->data);
+       } else
+               e->abort(e->crtc, e->data);
+       free(e);
+}
+
 /*
  * Handle a DRM event
  */
@@ -66,19 +81,80 @@ amdgpu_drm_queue_handler(int fd, unsigned int frame, 
unsigned int sec,
 
        xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
                if (e->seq == seq) {
-                       xorg_list_del(&e->list);
-                       if (e->handler)
-                               e->handler(e->crtc, frame,
-                                          (uint64_t)sec * 1000000 + usec,
-                                          e->data);
-                       else
-                               e->abort(e->crtc, e->data);
-                       free(e);
+                       amdgpu_drm_queue_handle_one(e, frame, (uint64_t)sec *
+                                                   1000000 + usec);
                        break;
                }
        }
 }
 
+/*
+ * Handle a DRM vblank event
+ */
+static void
+amdgpu_drm_queue_vblank_handler(int fd, unsigned int frame, unsigned int sec,
+                               unsigned int usec, void *user_ptr)
+{
+       uintptr_t seq = (uintptr_t)user_ptr;
+       struct amdgpu_drm_queue_entry *e, *tmp;
+
+       xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
+               if (e->seq == seq) {
+                       AMDGPUInfoPtr info = AMDGPUPTR(e->crtc->scrn);
+                       drmmode_ptr drmmode = &info->drmmode;
+
+                       e->usec = (uint64_t)sec * 1000000 + usec;
+
+                       if (drmmode->event_context.vblank_handler ==
+                           amdgpu_drm_queue_vblank_handler) {
+                               amdgpu_drm_queue_handle_one(e, frame, e->usec);
+                       } else {
+                               xorg_list_del(&e->list);
+                               e->frame = frame;
+                               xorg_list_append(&e->list,
+                                                &amdgpu_drm_queue_deferred);
+                       }
+
+                       break;
+               }
+       }
+}
+
+/*
+ * Re-queue a DRM vblank event for deferred handling
+ */
+static void
+amdgpu_drm_queue_vblank_defer(int fd, unsigned int frame, unsigned int sec,
+                             unsigned int usec, void *user_ptr)
+{
+       amdgpu_drm_queue_vblank_handler(fd, frame, sec, usec, user_ptr);
+}
+
+/*
+ * Handle deferred DRM vblank events
+ *
+ * This function must be called after amdgpu_drm_wait_pending_flip, once
+ * it's safe to attempt queueing a flip again
+ */
+void
+amdgpu_drm_queue_handle_deferred(xf86CrtcPtr crtc)
+{
+       drmmode_crtc_private_ptr drmmode_crtc = crtc->driver_private;
+       drmmode_ptr drmmode = drmmode_crtc->drmmode;
+       struct amdgpu_drm_queue_entry *e, *tmp;
+
+       if (drmmode_crtc->wait_flip_nesting_level == 0 ||
+           --drmmode_crtc->wait_flip_nesting_level > 0)
+               return;
+
+       xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue_deferred, list) 
{
+               if (e->crtc == crtc)
+                       amdgpu_drm_queue_handle_one(e, e->frame, e->usec);
+       }
+
+       drmmode->event_context.vblank_handler = amdgpu_drm_queue_vblank_handler;
+}
+
 /*
  * Enqueue a potential drm response; when the associated response
  * appears, we've got data to pass to the handler from here
@@ -134,12 +210,17 @@ amdgpu_drm_abort_one(struct amdgpu_drm_queue_entry *e)
 void
 amdgpu_drm_abort_client(ClientPtr client)
 {
-       struct amdgpu_drm_queue_entry *e;
+       struct amdgpu_drm_queue_entry *e, *tmp;
 
        xorg_list_for_each_entry(e, &amdgpu_drm_queue, list) {
                if (e->client == client)
                        e->handler = NULL;
        }
+
+       xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue_deferred, list) 
{
+               if (e->client == client)
+                       amdgpu_drm_abort_one(e);
+       }
 }
 
 /*
@@ -153,6 +234,13 @@ amdgpu_drm_abort_entry(uintptr_t seq)
        if (seq == AMDGPU_DRM_QUEUE_ERROR)
                return;
 
+       xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue_deferred, list) 
{
+               if (e->seq == seq) {
+                       amdgpu_drm_abort_one(e);
+                       return;
+               }
+       }
+
        xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
                if (e->seq == seq) {
                        amdgpu_drm_abort_one(e);
@@ -169,6 +257,13 @@ amdgpu_drm_abort_id(uint64_t id)
 {
        struct amdgpu_drm_queue_entry *e, *tmp;
 
+       xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue_deferred, list) 
{
+               if (e->id == id) {
+                       amdgpu_drm_abort_one(e);
+                       break;
+               }
+       }
+
        xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue, list) {
                if (e->id == id) {
                        amdgpu_drm_abort_one(e);
@@ -186,6 +281,9 @@ void amdgpu_drm_wait_pending_flip(xf86CrtcPtr crtc)
        AMDGPUEntPtr pAMDGPUEnt = AMDGPUEntPriv(crtc->scrn);
        drmmode_ptr drmmode = drmmode_crtc->drmmode;
 
+       drmmode_crtc->wait_flip_nesting_level++;
+       drmmode->event_context.vblank_handler = amdgpu_drm_queue_vblank_defer;
+
        while (drmmode_crtc->flip_pending &&
               drmHandleEvent(pAMDGPUEnt->fd, &drmmode->event_context) > 0);
 }
@@ -200,13 +298,14 @@ amdgpu_drm_queue_init(ScrnInfoPtr scrn)
        drmmode_ptr drmmode = &info->drmmode;
 
        drmmode->event_context.version = 2;
-       drmmode->event_context.vblank_handler = amdgpu_drm_queue_handler;
+       drmmode->event_context.vblank_handler = amdgpu_drm_queue_vblank_handler;
        drmmode->event_context.page_flip_handler = amdgpu_drm_queue_handler;
 
        if (amdgpu_drm_queue_refcnt++)
                return;
 
        xorg_list_init(&amdgpu_drm_queue);
+       xorg_list_init(&amdgpu_drm_queue_deferred);
 }
 
 /*
@@ -222,5 +321,10 @@ amdgpu_drm_queue_close(ScrnInfoPtr scrn)
                        amdgpu_drm_abort_one(e);
        }
 
+       xorg_list_for_each_entry_safe(e, tmp, &amdgpu_drm_queue_deferred, list) 
{
+               if (e->crtc->scrn == scrn)
+                       amdgpu_drm_abort_one(e);
+       }
+
        amdgpu_drm_queue_refcnt--;
 }
diff --git a/src/amdgpu_drm_queue.h b/src/amdgpu_drm_queue.h
index cb6d5ff59..75609e39b 100644
--- a/src/amdgpu_drm_queue.h
+++ b/src/amdgpu_drm_queue.h
@@ -42,6 +42,7 @@ typedef void (*amdgpu_drm_handler_proc)(xf86CrtcPtr crtc, 
uint32_t seq,
                                        uint64_t usec, void *data);
 typedef void (*amdgpu_drm_abort_proc)(xf86CrtcPtr crtc, void *data);
 
+void amdgpu_drm_queue_handle_deferred(xf86CrtcPtr crtc);
 uintptr_t amdgpu_drm_queue_alloc(xf86CrtcPtr crtc, ClientPtr client,
                                 uint64_t id, void *data,
                                 amdgpu_drm_handler_proc handler,
diff --git a/src/drmmode_display.c b/src/drmmode_display.c
index 615d3be8f..3b2de6839 100644
--- a/src/drmmode_display.c
+++ b/src/drmmode_display.c
@@ -305,6 +305,9 @@ drmmode_do_crtc_dpms(xf86CrtcPtr crtc, int mode)
                                nominal_frame_rate /= pix_in_frame;
                        drmmode_crtc->dpms_last_fps = nominal_frame_rate;
                }
+
+               drmmode_crtc->dpms_mode = mode;
+               amdgpu_drm_queue_handle_deferred(crtc);
        } else if (drmmode_crtc->dpms_mode != DPMSModeOn && mode == DPMSModeOn) 
{
                /*
                 * Off->On transition: calculate and accumulate the
@@ -322,8 +325,9 @@ drmmode_do_crtc_dpms(xf86CrtcPtr crtc, int mode)
                        drmmode_crtc->interpolated_vblanks += delta_seq;
 
                }
+
+               drmmode_crtc->dpms_mode = DPMSModeOn;
        }
-       drmmode_crtc->dpms_mode = mode;
 }
 
 static void
@@ -1415,6 +1419,7 @@ done:
                }
        }
 
+       amdgpu_drm_queue_handle_deferred(crtc);
        return ret;
 }
 
@@ -2320,11 +2325,6 @@ drmmode_output_set_tear_free(AMDGPUEntPtr pAMDGPUEnt,
        drmmode_output->tear_free = tear_free;
 
        if (crtc) {
-               /* Wait for pending flips before drmmode_set_mode_major calls
-                * drmmode_crtc_update_tear_free, to prevent a nested
-                * drmHandleEvent call, which would hang
-                */
-               amdgpu_drm_wait_pending_flip(crtc);
                drmmode_set_mode_major(crtc, &crtc->mode, crtc->rotation,
                                       crtc->x, crtc->y);
        }
@@ -3861,6 +3861,7 @@ Bool amdgpu_do_pageflip(ScrnInfoPtr scrn, ClientPtr 
client,
        int i;
        uint32_t flip_flags = flip_sync == FLIP_ASYNC ? 
DRM_MODE_PAGE_FLIP_ASYNC : 0;
        drmmode_flipdata_ptr flipdata;
+       Bool handle_deferred = FALSE;
        uintptr_t drm_queue_seq = 0;
 
        flipdata = calloc(1, sizeof(drmmode_flipdata_rec));
@@ -3942,6 +3943,7 @@ Bool amdgpu_do_pageflip(ScrnInfoPtr scrn, ClientPtr 
client,
 
                        if (drmmode_crtc->scanout_update_pending) {
                                amdgpu_drm_wait_pending_flip(crtc);
+                               handle_deferred = TRUE;
                                
amdgpu_drm_abort_entry(drmmode_crtc->scanout_update_pending);
                                drmmode_crtc->scanout_update_pending = 0;
                        }
@@ -3975,6 +3977,8 @@ Bool amdgpu_do_pageflip(ScrnInfoPtr scrn, ClientPtr 
client,
                drm_queue_seq = 0;
        }
 
+       if (handle_deferred)
+               amdgpu_drm_queue_handle_deferred(ref_crtc);
        if (flipdata->flip_count > 0)
                return TRUE;
 
@@ -3995,5 +3999,7 @@ error:
 
        xf86DrvMsg(scrn->scrnIndex, X_WARNING, "Page flip failed: %s\n",
                   strerror(errno));
+       if (handle_deferred)
+               amdgpu_drm_queue_handle_deferred(ref_crtc);
        return FALSE;
 }
diff --git a/src/drmmode_display.h b/src/drmmode_display.h
index 8b949f79d..28d15e282 100644
--- a/src/drmmode_display.h
+++ b/src/drmmode_display.h
@@ -119,6 +119,10 @@ typedef struct {
 
        /* Modeset needed for DPMS on */
        Bool need_modeset;
+       /* For keeping track of nested calls to drm_wait_pending_flip /
+        * drm_queue_handle_deferred
+        */
+       int wait_flip_nesting_level;
        /* A flip to this FB is pending for this CRTC */
        struct drmmode_fb *flip_pending;
        /* The FB currently being scanned out by this CRTC, if any */
-- 
2.18.0

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

Reply via email to