Module: Mesa
Branch: main
Commit: 9fd155697ab3a9b9409af6a08dfb6a70ce5be3e6
URL:    
http://cgit.freedesktop.org/mesa/mesa/commit/?id=9fd155697ab3a9b9409af6a08dfb6a70ce5be3e6

Author: Mike Blumenkrantz <[email protected]>
Date:   Mon Jan  3 14:33:12 2022 -0500

zink: support sparse texture range commits

this is a bit duplicated because the buffer and image commit code is
a little shared but not enough to combine without becoming spaghetti

this will only get worse once multisampling is supported

Acked-by: Dave Airlie <[email protected]>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/14381>

---

 src/gallium/drivers/zink/zink_bo.c      | 196 +++++++++++++++++++++++++++++---
 src/gallium/drivers/zink/zink_bo.h      |   2 +-
 src/gallium/drivers/zink/zink_context.c |   2 +-
 3 files changed, 185 insertions(+), 15 deletions(-)

diff --git a/src/gallium/drivers/zink/zink_bo.c 
b/src/gallium/drivers/zink/zink_bo.c
index 750c2a571e1..017a9646a7c 100644
--- a/src/gallium/drivers/zink/zink_bo.c
+++ b/src/gallium/drivers/zink/zink_bo.c
@@ -694,7 +694,7 @@ zink_bo_unmap(struct zink_screen *screen, struct zink_bo 
*bo)
 }
 
 static bool
-do_commit_single(struct zink_screen *screen, struct zink_resource *res, struct 
zink_bo *bo, uint32_t offset, uint32_t size, bool commit)
+buffer_commit_single(struct zink_screen *screen, struct zink_resource *res, 
struct zink_bo *bo, uint32_t offset, uint32_t size, bool commit)
 {
    VkBindSparseInfo sparse = {0};
    sparse.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
@@ -715,14 +715,12 @@ do_commit_single(struct zink_screen *screen, struct 
zink_resource *res, struct z
 
    VkQueue queue = screen->threaded ? screen->thread_queue : screen->queue;
 
-   simple_mtx_lock(&screen->queue_lock);
    VkResult ret = VKSCR(QueueBindSparse)(queue, 1, &sparse, VK_NULL_HANDLE);
-   simple_mtx_unlock(&screen->queue_lock);
    return zink_screen_handle_vkresult(screen, ret);
 }
 
-bool
-zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, uint32_t 
offset, uint32_t size, bool commit)
+static bool
+buffer_bo_commit(struct zink_screen *screen, struct zink_resource *res, 
uint32_t offset, uint32_t size, bool commit)
 {
    bool ok = true;
    struct zink_bo *bo = res->obj->bo;
@@ -736,8 +734,6 @@ zink_bo_commit(struct zink_screen *screen, struct 
zink_resource *res, uint32_t o
    uint32_t va_page = offset / ZINK_SPARSE_BUFFER_PAGE_SIZE;
    uint32_t end_va_page = va_page + DIV_ROUND_UP(size, 
ZINK_SPARSE_BUFFER_PAGE_SIZE);
 
-   simple_mtx_lock(&bo->lock);
-
    if (commit) {
       while (va_page < end_va_page) {
          uint32_t span_va_page;
@@ -764,9 +760,9 @@ zink_bo_commit(struct zink_screen *screen, struct 
zink_resource *res, uint32_t o
                ok = false;
                goto out;
             }
-            if (!do_commit_single(screen, res, backing->bo,
-                                  (uint64_t)span_va_page * 
ZINK_SPARSE_BUFFER_PAGE_SIZE,
-                                  (uint64_t)backing_size * 
ZINK_SPARSE_BUFFER_PAGE_SIZE, true)) {
+            if (!buffer_commit_single(screen, res, backing->bo,
+                                      (uint64_t)span_va_page * 
ZINK_SPARSE_BUFFER_PAGE_SIZE,
+                                      (uint64_t)backing_size * 
ZINK_SPARSE_BUFFER_PAGE_SIZE, true)) {
 
                ok = sparse_backing_free(screen, bo, backing, backing_start, 
backing_size);
                assert(ok && "sufficient memory should already be allocated");
@@ -785,9 +781,9 @@ zink_bo_commit(struct zink_screen *screen, struct 
zink_resource *res, uint32_t o
          }
       }
    } else {
-      if (!do_commit_single(screen, res, NULL,
-                            (uint64_t)va_page * ZINK_SPARSE_BUFFER_PAGE_SIZE,
-                            (uint64_t)(end_va_page - va_page) * 
ZINK_SPARSE_BUFFER_PAGE_SIZE, false)) {
+      if (!buffer_commit_single(screen, res, NULL,
+                                (uint64_t)va_page * 
ZINK_SPARSE_BUFFER_PAGE_SIZE,
+                                (uint64_t)(end_va_page - va_page) * 
ZINK_SPARSE_BUFFER_PAGE_SIZE, false)) {
          ok = false;
          goto out;
       }
@@ -827,8 +823,182 @@ zink_bo_commit(struct zink_screen *screen, struct 
zink_resource *res, uint32_t o
       }
    }
 out:
+   return ok;
+}
+
+static bool
+texture_commit_single(struct zink_screen *screen, struct zink_resource *res, 
struct zink_bo *bo,
+                      VkImageSubresource *subresource, VkOffset3D *offset, 
VkExtent3D *extents, bool commit)
+{
+   VkBindSparseInfo sparse = {0};
+   sparse.sType = VK_STRUCTURE_TYPE_BIND_SPARSE_INFO;
+   sparse.imageBindCount = 1;
+
+   VkSparseImageMemoryBindInfo sparse_ibind;
+   VkSparseImageMemoryBind ibind;
+   /* TODO: msaa needs miptail */
+   //VkSparseImageOpaqueMemoryBindInfo sparse_obind;
+   ibind.subresource = *subresource;
+   ibind.offset = *offset;
+   ibind.extent = *extents;
+   ibind.memory = commit ? bo->mem : VK_NULL_HANDLE;
+   ibind.memoryOffset = 0;
+   ibind.flags = 0;
+   sparse_ibind.image = res->obj->image;
+   sparse_ibind.bindCount = 1;
+   sparse_ibind.pBinds = &ibind;
+   sparse.pImageBinds = &sparse_ibind;
+
+   VkQueue queue = screen->threaded ? screen->thread_queue : screen->queue;
+
+   VkResult ret = VKSCR(QueueBindSparse)(queue, 1, &sparse, VK_NULL_HANDLE);
+   return zink_screen_handle_vkresult(screen, ret);
+}
+
+bool
+zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, unsigned 
level, struct pipe_box *box, bool commit)
+{
+   bool ok = true;
+   struct zink_bo *bo = res->obj->bo;
+
+   simple_mtx_lock(&screen->queue_lock);
+   simple_mtx_lock(&bo->lock);
+   if (res->base.b.target == PIPE_BUFFER) {
+      ok = buffer_bo_commit(screen, res, box->x, box->width, commit);
+      goto out;
+   }
+
+   int gwidth, gheight, gdepth;
+   gwidth = res->sparse.formatProperties.imageGranularity.width;
+   gheight = res->sparse.formatProperties.imageGranularity.height;
+   gdepth = res->sparse.formatProperties.imageGranularity.depth;
+   assert(gwidth && gheight && gdepth);
+
+   struct zink_sparse_commitment *comm = bo->u.sparse.commitments;
+   VkImageSubresource subresource = { res->aspect, level, 0 };
+   unsigned nwidth = DIV_ROUND_UP(box->width, gwidth);
+   unsigned nheight = DIV_ROUND_UP(box->height, gheight);
+   unsigned ndepth = DIV_ROUND_UP(box->depth, gdepth);
+   VkExtent3D lastBlockExtent = {
+                          (box->width % gwidth) ? box->width % gwidth : gwidth,
+                          (box->height % gheight) ? box->height % gheight : 
gheight,
+                          (box->depth % gdepth) ? box->depth % gdepth : gdepth
+   };
+
+   for (unsigned d = 0; d < ndepth; d++) {
+      for (unsigned h = 0; h < nheight; h++) {
+         for (unsigned w = 0; w < nwidth; w++) {
+            // Offset
+            VkOffset3D off;
+            off.x = w * gwidth;
+            off.y = h * gheight;
+            if (res->base.b.target == PIPE_TEXTURE_CUBE) {
+               subresource.arrayLayer = d * gdepth;
+               off.z = 0;
+            } else {
+               off.z = d * gdepth;
+            }
+            // Size of the page
+            VkExtent3D extent;
+            extent.width = (w == nwidth - 1) ? lastBlockExtent.width : gwidth;
+            extent.height = (h == nheight - 1) ? lastBlockExtent.height : 
gheight;
+            extent.depth = (d == ndepth - 1 && res->base.b.target != 
PIPE_TEXTURE_CUBE) ? lastBlockExtent.depth : gdepth;
+            uint32_t va_page = (d + (box->z / gdepth)) * ((res->base.b.width0 
/ gwidth) * (res->base.b.height0 / gheight)) +
+                              (h + (box->y / gheight)) * (res->base.b.width0 / 
gwidth) +
+                              (w + (box->x / gwidth));
+
+            uint32_t end_va_page = va_page + 1;
+
+            if (commit) {
+               while (va_page < end_va_page) {
+                  uint32_t span_va_page;
+
+                  /* Skip pages that are already committed. */
+                  if (comm[va_page].backing) {
+                     va_page++;
+                     continue;
+                  }
+
+                  /* Determine length of uncommitted span. */
+                  span_va_page = va_page;
+                  while (va_page < end_va_page && !comm[va_page].backing)
+                     va_page++;
+
+                  /* Fill the uncommitted span with chunks of backing memory. 
*/
+                  while (span_va_page < va_page) {
+                     struct zink_sparse_backing *backing;
+                     uint32_t backing_start, backing_size;
+
+                     backing_size = va_page - span_va_page;
+                     backing = sparse_backing_alloc(screen, bo, 
&backing_start, &backing_size);
+                     if (!backing) {
+                        ok = false;
+                        goto out;
+                     }
+                     if (!texture_commit_single(screen, res, backing->bo, 
&subresource, &off, &extent, true)) {
+                        ok = sparse_backing_free(screen, bo, backing, 
backing_start, backing_size);
+                        assert(ok && "sufficient memory should already be 
allocated");
+
+                        ok = false;
+                        goto out;
+                     }
+
+                     while (backing_size) {
+                        comm[span_va_page].backing = backing;
+                        comm[span_va_page].page = backing_start;
+                        span_va_page++;
+                        backing_start++;
+                        backing_size--;
+                     }
+                  }
+               }
+            } else {
+               if (!texture_commit_single(screen, res, NULL, &subresource, 
&off, &extent, false)) {
+                  ok = false;
+                  goto out;
+               }
+
+               while (va_page < end_va_page) {
+                  struct zink_sparse_backing *backing;
+                  uint32_t backing_start;
+                  uint32_t span_pages;
+
+                  /* Skip pages that are already uncommitted. */
+                  if (!comm[va_page].backing) {
+                     va_page++;
+                     continue;
+                  }
+
+                  /* Group contiguous spans of pages. */
+                  backing = comm[va_page].backing;
+                  backing_start = comm[va_page].page;
+                  comm[va_page].backing = NULL;
+
+                  span_pages = 1;
+                  va_page++;
+
+                  while (va_page < end_va_page &&
+                         comm[va_page].backing == backing &&
+                         comm[va_page].page == backing_start + span_pages) {
+                     comm[va_page].backing = NULL;
+                     va_page++;
+                     span_pages++;
+                  }
+
+                  if (!sparse_backing_free(screen, bo, backing, backing_start, 
span_pages)) {
+                     /* Couldn't allocate tracking data structures, so we have 
to leak */
+                     fprintf(stderr, "zink: leaking sparse backing memory\n");
+                     ok = false;
+                  }
+               }
+            }
+         }
+      }
+   }
+out:
 
    simple_mtx_unlock(&bo->lock);
+   simple_mtx_unlock(&screen->queue_lock);
    return ok;
 }
 
diff --git a/src/gallium/drivers/zink/zink_bo.h 
b/src/gallium/drivers/zink/zink_bo.h
index 7dc4d0c341d..82cedb170d4 100644
--- a/src/gallium/drivers/zink/zink_bo.h
+++ b/src/gallium/drivers/zink/zink_bo.h
@@ -199,7 +199,7 @@ void
 zink_bo_unmap(struct zink_screen *screen, struct zink_bo *bo);
 
 bool
-zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, uint32_t 
offset, uint32_t size, bool commit);
+zink_bo_commit(struct zink_screen *screen, struct zink_resource *res, unsigned 
level, struct pipe_box *box, bool commit);
 
 static inline bool
 zink_bo_has_unflushed_usage(const struct zink_bo *bo)
diff --git a/src/gallium/drivers/zink/zink_context.c 
b/src/gallium/drivers/zink/zink_context.c
index a059861cf05..5352c2c6e7b 100644
--- a/src/gallium/drivers/zink/zink_context.c
+++ b/src/gallium/drivers/zink/zink_context.c
@@ -3897,7 +3897,7 @@ zink_resource_commit(struct pipe_context *pctx, struct 
pipe_resource *pres, unsi
    if (zink_resource_has_unflushed_usage(res))
       zink_flush_queue(ctx);
 
-   bool ret = zink_bo_commit(screen, res, box->x, box->width, commit);
+   bool ret = zink_bo_commit(screen, res, level, box, commit);
    if (!ret)
       check_device_lost(ctx);
 

Reply via email to