The series are

Acked-by: Leo Liu <leo....@amd.com>


On 07/16/2018 06:03 PM, Leo Liu wrote:


On 2018-07-16 04:01 PM, Marek Olšák wrote:
From: Marek Olšák <marek.ol...@amd.com>

Dependencies between rings are inserted correctly if a buffer is
represented by only one unique amdgpu_winsys_bo instance.
Use a hash table keyed by amdgpu_bo_handle to have exactly one
amdgpu_winsys_bo per amdgpu_bo_handle.
The series are:
Tested-by: Leo Liu <leo....@amd.com>


v2: return offset and stride properly
---
  src/gallium/winsys/amdgpu/drm/amdgpu_bo.c     | 36 ++++++++++++++++---
  src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c |  5 +++
  src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h |  5 +++
  3 files changed, 41 insertions(+), 5 deletions(-)

diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
index d9192c209e2..80563d3df98 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_bo.c
@@ -21,20 +21,21 @@
   * USE OR OTHER DEALINGS IN THE SOFTWARE.
   *
   * The above copyright notice and this permission notice (including the    * next paragraph) shall be included in all copies or substantial portions
   * of the Software.
   */
    #include "amdgpu_cs.h"
    #include "util/os_time.h"
+#include "util/u_hash_table.h"
  #include "state_tracker/drm_driver.h"
  #include <amdgpu_drm.h>
  #include <xf86drm.h>
  #include <stdio.h>
  #include <inttypes.h>
    #ifndef AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
  #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6)
  #endif
  @@ -172,20 +173,24 @@ void amdgpu_bo_destroy(struct pb_buffer *_buf)
       assert(bo->bo && "must not be called for slab entries");
       if (ws->debug_all_bos) {
        simple_mtx_lock(&ws->global_bo_list_lock);
        LIST_DEL(&bo->u.real.global_list_item);
        ws->num_buffers--;
        simple_mtx_unlock(&ws->global_bo_list_lock);
     }
  +   simple_mtx_lock(&ws->bo_export_table_lock);
+   util_hash_table_remove(ws->bo_export_table, bo->bo);
+   simple_mtx_unlock(&ws->bo_export_table_lock);
+
     amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
     amdgpu_va_range_free(bo->u.real.va_handle);
     amdgpu_bo_free(bo->bo);
       amdgpu_bo_remove_fences(bo);
       if (bo->initial_domain & RADEON_DOMAIN_VRAM)
        ws->allocated_vram -= align64(bo->base.size, ws->info.gart_page_size);
     else if (bo->initial_domain & RADEON_DOMAIN_GTT)
        ws->allocated_gtt -= align64(bo->base.size, ws->info.gart_page_size); @@ -1278,24 +1283,41 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
     case WINSYS_HANDLE_TYPE_SHARED:
        type = amdgpu_bo_handle_type_gem_flink_name;
        break;
     case WINSYS_HANDLE_TYPE_FD:
        type = amdgpu_bo_handle_type_dma_buf_fd;
        break;
     default:
        return NULL;
     }
  +   if (stride)
+      *stride = whandle->stride;
+   if (offset)
+      *offset = whandle->offset;
+
     r = amdgpu_bo_import(ws->dev, type, whandle->handle, &result);
     if (r)
        return NULL;
  +   simple_mtx_lock(&ws->bo_export_table_lock);
+   bo = util_hash_table_get(ws->bo_export_table, result.buf_handle);
+
+   /* If the amdgpu_winsys_bo instance already exists, bump the reference
+    * counter and return it.
+    */
+   if (bo) {
+      p_atomic_inc(&bo->base.reference.count);
+      simple_mtx_unlock(&ws->bo_export_table_lock);
+      return &bo->base;
+   }
+
     /* Get initial domains. */
     r = amdgpu_bo_query_info(result.buf_handle, &info);
     if (r)
        goto error;
       r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
                               result.alloc_size, 1 << 20, 0, &va, &va_handle,
                   AMDGPU_VA_RANGE_HIGH);
     if (r)
        goto error;
@@ -1319,49 +1341,49 @@ static struct pb_buffer *amdgpu_bo_from_handle(struct radeon_winsys *rws,
     bo->bo = result.buf_handle;
     bo->base.size = result.alloc_size;
     bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
     bo->ws = ws;
     bo->va = va;
     bo->u.real.va_handle = va_handle;
     bo->initial_domain = initial;
     bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
     bo->is_shared = true;
  -   if (stride)
-      *stride = whandle->stride;
-   if (offset)
-      *offset = whandle->offset;
-
     if (bo->initial_domain & RADEON_DOMAIN_VRAM)
        ws->allocated_vram += align64(bo->base.size, ws->info.gart_page_size);
     else if (bo->initial_domain & RADEON_DOMAIN_GTT)
        ws->allocated_gtt += align64(bo->base.size, ws->info.gart_page_size);
       amdgpu_add_buffer_to_global_list(bo);
  +   util_hash_table_set(ws->bo_export_table, bo->bo, bo);
+   simple_mtx_unlock(&ws->bo_export_table_lock);
+
     return &bo->base;
    error:
+   simple_mtx_unlock(&ws->bo_export_table_lock);
     if (bo)
        FREE(bo);
     if (va_handle)
        amdgpu_va_range_free(va_handle);
     amdgpu_bo_free(result.buf_handle);
     return NULL;
  }
    static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
                                   unsigned stride, unsigned offset,
                                   unsigned slice_size,
                                   struct winsys_handle *whandle)
  {
     struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(buffer);
+   struct amdgpu_winsys *ws = bo->ws;
     enum amdgpu_bo_handle_type type;
     int r;
       /* Don't allow exports of slab entries and sparse buffers. */
     if (!bo->bo)
        return false;
       bo->u.real.use_reusable_pool = false;
       switch (whandle->type) {
@@ -1375,20 +1397,24 @@ static bool amdgpu_bo_get_handle(struct pb_buffer *buffer,
        type = amdgpu_bo_handle_type_kms;
        break;
     default:
        return false;
     }
       r = amdgpu_bo_export(bo->bo, type, &whandle->handle);
     if (r)
        return false;
  +   simple_mtx_lock(&ws->bo_export_table_lock);
+   util_hash_table_set(ws->bo_export_table, bo->bo, bo);
+   simple_mtx_unlock(&ws->bo_export_table_lock);
+
     whandle->stride = stride;
     whandle->offset = offset;
     whandle->offset += slice_size * whandle->layer;
     bo->is_shared = true;
     return true;
  }
    static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
                          void *pointer, uint64_t size)
  {
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
index db7a4d7033c..882f500bc69 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.c
@@ -85,21 +85,23 @@ static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
       if (ws->reserve_vmid)
        amdgpu_vm_unreserve_vmid(ws->dev, 0);
       if (util_queue_is_initialized(&ws->cs_queue))
        util_queue_destroy(&ws->cs_queue);
       simple_mtx_destroy(&ws->bo_fence_lock);
     pb_slabs_deinit(&ws->bo_slabs);
     pb_cache_deinit(&ws->bo_cache);
+   util_hash_table_destroy(ws->bo_export_table);
     simple_mtx_destroy(&ws->global_bo_list_lock);
+   simple_mtx_destroy(&ws->bo_export_table_lock);
     do_winsys_deinit(ws);
     FREE(rws);
  }
    static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
                                       struct radeon_info *info)
  {
     *info = ((struct amdgpu_winsys *)rws)->info;
  }
  @@ -307,22 +309,25 @@ amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
     ws->base.cs_request_feature = amdgpu_cs_request_feature;
     ws->base.query_value = amdgpu_query_value;
     ws->base.read_registers = amdgpu_read_registers;
     ws->base.get_chip_name = amdgpu_get_chip_name;
       amdgpu_bo_init_functions(ws);
     amdgpu_cs_init_functions(ws);
     amdgpu_surface_init_functions(ws);
       LIST_INITHEAD(&ws->global_bo_list);
+   ws->bo_export_table = util_hash_table_create(hash_pointer, compare_pointers);
+
     (void) simple_mtx_init(&ws->global_bo_list_lock, mtx_plain);
     (void) simple_mtx_init(&ws->bo_fence_lock, mtx_plain);
+   (void) simple_mtx_init(&ws->bo_export_table_lock, mtx_plain);
       if (!util_queue_init(&ws->cs_queue, "cs", 8, 1,
                          UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
        amdgpu_winsys_destroy(&ws->base);
        simple_mtx_unlock(&dev_tab_mutex);
        return NULL;
     }
       /* Create the screen at the end. The winsys must be initialized
      * completely.
diff --git a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h
index 8079255e4cf..c355eff5262 100644
--- a/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h
+++ b/src/gallium/winsys/amdgpu/drm/amdgpu_winsys.h
@@ -78,20 +78,25 @@ struct amdgpu_winsys {
       bool check_vm;
     bool debug_all_bos;
     bool reserve_vmid;
     bool zero_all_vram_allocs;
       /* List of all allocated buffers */
     simple_mtx_t global_bo_list_lock;
     struct list_head global_bo_list;
     unsigned num_buffers;
+
+   /* For returning the same amdgpu_winsys_bo instance for exported
+    * and re-imported buffers. */
+   struct util_hash_table *bo_export_table;
+   simple_mtx_t bo_export_table_lock;
  };
    static inline struct amdgpu_winsys *
  amdgpu_winsys(struct radeon_winsys *base)
  {
     return (struct amdgpu_winsys*)base;
  }
    void amdgpu_surface_init_functions(struct amdgpu_winsys *ws);

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

_______________________________________________
mesa-dev mailing list
mesa-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/mesa-dev

Reply via email to