In the current design, udl_get_urb() may be called asynchronously
during the driver freeing its URL list via udl_free_urb_list().
The problem is that the sync is determined by comparing the urbs.count
and urbs.available fields, while we clear urbs.count field only once
after udl_free_urb_list() finishes, i.e. during udl_free_urb_list(),
the state becomes inconsistent.

For fixing this inconsistency and also for hardening the locking
scheme, this patch does a slight refactoring of the code around
udl_get_urb() and udl_free_urb_list().  Now urbs.count is updated in
the same spinlock at extracting a URB from the list in
udl_free_url_list().

Acked-by: Thomas Zimmermann <tzimmerm...@suse.de>
Signed-off-by: Takashi Iwai <ti...@suse.de>
---
 drivers/gpu/drm/udl/udl_drv.h  |  8 +------
 drivers/gpu/drm/udl/udl_main.c | 42 +++++++++++++++++++++++-----------
 2 files changed, 30 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 5923d2e02bc8..d943684b5bbb 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -74,13 +74,7 @@ static inline struct usb_device *udl_to_usb_device(struct 
udl_device *udl)
 int udl_modeset_init(struct drm_device *dev);
 struct drm_connector *udl_connector_init(struct drm_device *dev);
 
-struct urb *udl_get_urb_timeout(struct drm_device *dev, long timeout);
-
-#define GET_URB_TIMEOUT        HZ
-static inline struct urb *udl_get_urb(struct drm_device *dev)
-{
-       return udl_get_urb_timeout(dev, GET_URB_TIMEOUT);
-}
+struct urb *udl_get_urb(struct drm_device *dev);
 
 int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
 int udl_sync_pending_urbs(struct drm_device *dev);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index de28eeff3155..16aa4a655e7f 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -23,6 +23,8 @@
 #define WRITES_IN_FLIGHT (20)
 #define MAX_VENDOR_DESCRIPTOR_SIZE 256
 
+static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
+
 static int udl_parse_vendor_descriptor(struct udl_device *udl)
 {
        struct usb_device *udev = udl_to_usb_device(udl);
@@ -146,15 +148,17 @@ void udl_urb_completion(struct urb *urb)
 static void udl_free_urb_list(struct drm_device *dev)
 {
        struct udl_device *udl = to_udl(dev);
-       int count = udl->urbs.count;
        struct urb_node *unode;
        struct urb *urb;
 
        DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
 
        /* keep waiting and freeing, until we've got 'em all */
-       while (count--) {
-               urb = udl_get_urb_timeout(dev, MAX_SCHEDULE_TIMEOUT);
+       while (udl->urbs.count) {
+               spin_lock_irq(&udl->urbs.lock);
+               urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT);
+               udl->urbs.count--;
+               spin_unlock_irq(&udl->urbs.lock);
                if (WARN_ON(!urb))
                        break;
                unode = urb->context;
@@ -164,7 +168,8 @@ static void udl_free_urb_list(struct drm_device *dev)
                usb_free_urb(urb);
                kfree(unode);
        }
-       udl->urbs.count = 0;
+
+       wake_up_all(&udl->urbs.sleep);
 }
 
 static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
@@ -228,33 +233,44 @@ static int udl_alloc_urb_list(struct drm_device *dev, int 
count, size_t size)
        return udl->urbs.count;
 }
 
-struct urb *udl_get_urb_timeout(struct drm_device *dev, long timeout)
+static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
 {
-       struct udl_device *udl = to_udl(dev);
-       struct urb_node *unode = NULL;
+       struct urb_node *unode;
 
-       if (!udl->urbs.count)
-               return NULL;
+       assert_spin_locked(&udl->urbs.lock);
 
        /* Wait for an in-flight buffer to complete and get re-queued */
-       spin_lock_irq(&udl->urbs.lock);
        if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
+                                        !udl->urbs.count ||
                                         !list_empty(&udl->urbs.list),
                                         udl->urbs.lock, timeout)) {
                DRM_INFO("wait for urb interrupted: available: %d\n",
                         udl->urbs.available);
-               goto unlock;
+               return NULL;
        }
 
+       if (!udl->urbs.count)
+               return NULL;
+
        unode = list_first_entry(&udl->urbs.list, struct urb_node, entry);
        list_del_init(&unode->entry);
        udl->urbs.available--;
 
-unlock:
-       spin_unlock_irq(&udl->urbs.lock);
        return unode ? unode->urb : NULL;
 }
 
+#define GET_URB_TIMEOUT        HZ
+struct urb *udl_get_urb(struct drm_device *dev)
+{
+       struct udl_device *udl = to_udl(dev);
+       struct urb *urb;
+
+       spin_lock_irq(&udl->urbs.lock);
+       urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT);
+       spin_unlock_irq(&udl->urbs.lock);
+       return urb;
+}
+
 int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
 {
        struct udl_device *udl = to_udl(dev);
-- 
2.35.3

Reply via email to