From: Rob Clark <robdcl...@chromium.org>

This way we only lookup vma once per object per submit, for both the
submit and retire path.

Signed-off-by: Rob Clark <robdcl...@chromium.org>
---
 drivers/gpu/drm/msm/msm_gem.c        | 60 +++++++++++++---------------
 drivers/gpu/drm/msm/msm_gem.h        |  9 +++--
 drivers/gpu/drm/msm/msm_gem_submit.c | 17 +++++---
 3 files changed, 44 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 218744a490a4..e8107a22c33a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -407,7 +407,7 @@ static struct msm_gem_vma *get_vma_locked(struct 
drm_gem_object *obj,
        return vma;
 }
 
-static int msm_gem_pin_iova(struct drm_gem_object *obj, struct msm_gem_vma 
*vma)
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
        struct page **pages;
@@ -439,6 +439,26 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj, 
struct msm_gem_vma *vma)
        return ret;
 }
 
+void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma 
*vma)
+{
+       struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+       GEM_WARN_ON(!msm_gem_is_locked(obj));
+
+       msm_gem_unmap_vma(vma->aspace, vma);
+
+       msm_obj->pin_count--;
+       GEM_WARN_ON(msm_obj->pin_count < 0);
+
+       update_inactive(msm_obj);
+}
+
+struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+                                          struct msm_gem_address_space *aspace)
+{
+       return get_vma_locked(obj, aspace, 0, U64_MAX);
+}
+
 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace, uint64_t *iova,
                u64 range_start, u64 range_end)
@@ -452,7 +472,7 @@ static int get_and_pin_iova_range_locked(struct 
drm_gem_object *obj,
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
-       ret = msm_gem_pin_iova(obj, vma);
+       ret = msm_gem_pin_vma_locked(obj, vma);
        if (!ret)
                *iova = vma->iova;
 
@@ -476,12 +496,6 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object 
*obj,
        return ret;
 }
 
-int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
-               struct msm_gem_address_space *aspace, uint64_t *iova)
-{
-       return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
-}
-
 /* get iova and pin it. Should have a matching put */
 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace, uint64_t *iova)
@@ -511,29 +525,6 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
        return ret;
 }
 
-/*
- * Locked variant of msm_gem_unpin_iova()
- */
-void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
-               struct msm_gem_address_space *aspace)
-{
-       struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       struct msm_gem_vma *vma;
-
-       GEM_WARN_ON(!msm_gem_is_locked(obj));
-
-       vma = lookup_vma(obj, aspace);
-
-       if (!GEM_WARN_ON(!vma)) {
-               msm_gem_unmap_vma(aspace, vma);
-
-               msm_obj->pin_count--;
-               GEM_WARN_ON(msm_obj->pin_count < 0);
-
-               update_inactive(msm_obj);
-       }
-}
-
 /*
  * Unpin a iova by updating the reference counts. The memory isn't actually
  * purged until something else (shrinker, mm_notifier, destroy, etc) decides
@@ -542,8 +533,13 @@ void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
 void msm_gem_unpin_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace)
 {
+       struct msm_gem_vma *vma;
+
        msm_gem_lock(obj);
-       msm_gem_unpin_iova_locked(obj, aspace);
+       vma = lookup_vma(obj, aspace);
+       if (!GEM_WARN_ON(!vma)) {
+               msm_gem_unpin_vma_locked(obj, vma);
+       }
        msm_gem_unlock(obj);
 }
 
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 772de010a669..f98264cf130d 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -133,17 +133,17 @@ struct msm_gem_object {
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
+int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma 
*vma);
+void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma 
*vma);
+struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj,
+                                          struct msm_gem_address_space 
*aspace);
 int msm_gem_get_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace, uint64_t *iova);
 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace, uint64_t *iova,
                u64 range_start, u64 range_end);
-int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
-               struct msm_gem_address_space *aspace, uint64_t *iova);
 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace, uint64_t *iova);
-void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
-               struct msm_gem_address_space *aspace);
 void msm_gem_unpin_iova(struct drm_gem_object *obj,
                struct msm_gem_address_space *aspace);
 struct page **msm_gem_get_pages(struct drm_gem_object *obj);
@@ -369,6 +369,7 @@ struct msm_gem_submit {
                        uint32_t handle;
                };
                uint64_t iova;
+               struct msm_gem_vma *vma;
        } bos[];
 };
 
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c 
b/drivers/gpu/drm/msm/msm_gem_submit.c
index c6d60c8d286d..91da05af40ee 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -232,7 +232,7 @@ static void submit_cleanup_bo(struct msm_gem_submit 
*submit, int i,
        unsigned flags = submit->bos[i].flags & cleanup_flags;
 
        if (flags & BO_PINNED)
-               msm_gem_unpin_iova_locked(obj, submit->aspace);
+               msm_gem_unpin_vma_locked(obj, submit->bos[i].vma);
 
        if (flags & BO_ACTIVE)
                msm_gem_active_put(obj);
@@ -365,21 +365,26 @@ static int submit_pin_objects(struct msm_gem_submit 
*submit)
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct drm_gem_object *obj = &submit->bos[i].obj->base;
-               uint64_t iova;
+               struct msm_gem_vma *vma;
 
                /* if locking succeeded, pin bo: */
-               ret = msm_gem_get_and_pin_iova_locked(obj,
-                               submit->aspace, &iova);
+               vma = msm_gem_get_vma_locked(obj, submit->aspace);
+               if (IS_ERR(vma)) {
+                       ret = PTR_ERR(vma);
+                       break;
+               }
 
+               ret = msm_gem_pin_vma_locked(obj, vma);
                if (ret)
                        break;
 
                submit->bos[i].flags |= BO_PINNED;
+               submit->bos[i].vma = vma;
 
-               if (iova == submit->bos[i].iova) {
+               if (vma->iova == submit->bos[i].iova) {
                        submit->bos[i].flags |= BO_VALID;
                } else {
-                       submit->bos[i].iova = iova;
+                       submit->bos[i].iova = vma->iova;
                        /* iova changed, so address in cmdstream is not valid: 
*/
                        submit->bos[i].flags &= ~BO_VALID;
                        submit->valid = false;
-- 
2.35.1

Reply via email to