Module: Mesa
Branch: main
Commit: 12a485dbf91635aa5888fa21a4370937efecf292
URL:    
http://cgit.freedesktop.org/mesa/mesa/commit/?id=12a485dbf91635aa5888fa21a4370937efecf292

Author: Jason Volk <[email protected]>
Date:   Thu Apr 21 14:40:09 2022 -0700

r600: Improve compute memory pool performance; reduce fragmentation.

The compute memory pool forced a defragmentation (a left-packing relocation)
of items prior to promoting (adding) items to the tail end of the pool.

This patch instead makes an initial pass over the fragmented pool intent on
promoting items back to where they may have been recently demoted, filling
in the gaps first before conducting the defragmentation (if at all).

Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16117>

---

 src/gallium/drivers/r600/compute_memory_pool.c | 37 ++++++++++++++++++++++++++
 1 file changed, 37 insertions(+)

diff --git a/src/gallium/drivers/r600/compute_memory_pool.c 
b/src/gallium/drivers/r600/compute_memory_pool.c
index d0f6979dc66..0157b0508d2 100644
--- a/src/gallium/drivers/r600/compute_memory_pool.c
+++ b/src/gallium/drivers/r600/compute_memory_pool.c
@@ -253,6 +253,43 @@ int compute_memory_finalize_pending(struct 
compute_memory_pool* pool,
                        return -1;
        }
        else if (pool->status & POOL_FRAGMENTED) {
+               /* Loop through all unallocated items marked for promoting to
+                * insert them into an appropriate existing hole prior to 
defrag. */
+               LIST_FOR_EACH_ENTRY_SAFE(item, next, pool->unallocated_list, 
link) {
+                       if (!(item->status & ITEM_FOR_PROMOTING))
+                               continue;
+
+                       int64_t hole_start = 0, hole_size = 0;
+                       int64_t item_size = align(item->size_in_dw, 
ITEM_ALIGNMENT);
+                       struct compute_memory_item *alloc_item, *alloc_next;
+                       LIST_FOR_EACH_ENTRY_SAFE(alloc_item, alloc_next, 
pool->item_list, link) {
+                               if (alloc_item->start_in_dw == hole_start) {
+                                       hole_start += 
align(alloc_item->size_in_dw, ITEM_ALIGNMENT);
+                                       hole_size = 0;
+                               } else if (alloc_item->start_in_dw > 
hole_start) {
+                                       hole_size = alloc_item->start_in_dw - 
hole_start;
+                               }
+                       }
+
+                       /* Space after all items is also a hole. */
+                       if (hole_size == 0 && hole_start < pool->size_in_dw)
+                               hole_size = pool->size_in_dw - hole_start;
+
+                       if (hole_size >= item_size) {
+                               if (compute_memory_promote_item(pool, item, 
pipe, hole_start) != -1) {
+                                       item->status &= ~ITEM_FOR_PROMOTING;
+                                       unallocated -= item_size;
+                                       allocated += item_size;
+                               }
+                       }
+               }
+
+               if (allocated == pool->size_in_dw)
+                       pool->status &= ~POOL_FRAGMENTED;
+
+               if (unallocated == 0)
+                       return 0;
+
                struct pipe_resource *src = (struct pipe_resource *)pool->bo;
                compute_memory_defrag(pool, src, src, pipe);
        }

Reply via email to