Export msm_gem_pin_pages_locked() and acquire the reservation lock
directly in GEM pin callback. Same for unpin. Prepares for further
changes.

Dma-buf locking semantics require callers to hold the buffer's
reservation lock when invoking the pin and unpin callbacks. Prepare
msm accordingly by pushing locking out of the implementation. A
follow-up patch will fix locking for all GEM code at once.

Signed-off-by: Thomas Zimmermann <tzimmerm...@suse.de>
---
 drivers/gpu/drm/msm/msm_gem.c       | 12 ++++++------
 drivers/gpu/drm/msm/msm_gem.h       |  4 ++--
 drivers/gpu/drm/msm/msm_gem_prime.c | 24 +++++++++++++++++++-----
 3 files changed, 27 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index bb729353d3a8d..a5c6498a43f06 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -257,24 +257,24 @@ static void pin_obj_locked(struct drm_gem_object *obj)
        mutex_unlock(&priv->lru.lock);
 }
 
-struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
+struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
 {
        struct page **p;
 
-       msm_gem_lock(obj);
+       msm_gem_assert_locked(obj);
+
        p = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
        if (!IS_ERR(p))
                pin_obj_locked(obj);
-       msm_gem_unlock(obj);
 
        return p;
 }
 
-void msm_gem_unpin_pages(struct drm_gem_object *obj)
+void msm_gem_unpin_pages_locked(struct drm_gem_object *obj)
 {
-       msm_gem_lock(obj);
+       msm_gem_assert_locked(obj);
+
        msm_gem_unpin_locked(obj);
-       msm_gem_unlock(obj);
 }
 
 static pgprot_t msm_gem_pgprot(struct msm_gem_object *msm_obj, pgprot_t prot)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 8d414b072c29d..85f0257e83dab 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -140,8 +140,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
 void msm_gem_unpin_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace);
 void msm_gem_pin_obj_locked(struct drm_gem_object *obj);
-struct page **msm_gem_pin_pages(struct drm_gem_object *obj);
-void msm_gem_unpin_pages(struct drm_gem_object *obj);
+struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj);
+void msm_gem_unpin_pages_locked(struct drm_gem_object *obj);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
                struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c 
b/drivers/gpu/drm/msm/msm_gem_prime.c
index 0915f3b68752e..0d22df53ab98a 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -47,13 +47,27 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct 
drm_device *dev,
 
 int msm_gem_prime_pin(struct drm_gem_object *obj)
 {
-       if (!obj->import_attach)
-               msm_gem_pin_pages(obj);
-       return 0;
+       struct page **pages;
+       int ret = 0;
+
+       if (obj->import_attach)
+               return 0;
+
+       msm_gem_lock(obj);
+       pages = msm_gem_pin_pages_locked(obj);
+       if (IS_ERR(pages))
+               ret = PTR_ERR(pages);
+       msm_gem_unlock(obj);
+
+       return ret;
 }
 
 void msm_gem_prime_unpin(struct drm_gem_object *obj)
 {
-       if (!obj->import_attach)
-               msm_gem_unpin_pages(obj);
+       if (obj->import_attach)
+               return;
+
+       msm_gem_lock(obj);
+       msm_gem_unpin_pages_locked(obj);
+       msm_gem_unlock(obj);
 }
-- 
2.43.2

Reply via email to