configure.ac | 2 intel/intel_bufmgr.h | 2 intel/intel_bufmgr_gem.c | 138 +++++++++++++++++++++++++++++++++++++++++------ tests/radeon/Makefile.am | 2 4 files changed, 127 insertions(+), 17 deletions(-)
New commits: commit 7fd1678110b78d9324723a54dfd5049496b9e3cf Author: Chris Wilson <[email protected]> Date: Mon Dec 5 23:09:20 2011 +0000 configure: Bump version for 2.4.29 Yet another release required for new API diff --git a/configure.ac b/configure.ac index 829e8d2..5f144bc 100644 --- a/configure.ac +++ b/configure.ac @@ -20,7 +20,7 @@ AC_PREREQ([2.63]) AC_INIT([libdrm], - [2.4.28], + [2.4.29], [https://bugs.freedesktop.org/enter_bug.cgi?product=DRI], [libdrm]) commit 015286f03e871ccf49af9f2ceef7f5e04c8d61ca Author: Chris Wilson <[email protected]> Date: Sun Dec 11 17:35:06 2011 +0000 intel: Remove the fresh assertions used to debug the vma cacheing Hopefully all the bugs in the callers have been found, so time to handle the failures "gracefully" again. Signed-off-by: Chris Wilson <[email protected]> diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index bb096de..19441f3 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -1098,8 +1098,8 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) if (!bo_gem->mem_virtual) { struct drm_i915_gem_mmap mmap_arg; - DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); - assert(bo_gem->map_count == 1); + DBG("bo_map: %d (%s), map_count=%d\n", + bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); memset(&mmap_arg, 0, sizeof(mmap_arg)); mmap_arg.handle = bo_gem->gem_handle; @@ -1163,9 +1163,8 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) if (bo_gem->gtt_virtual == NULL) { struct drm_i915_gem_mmap_gtt mmap_arg; - DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle, - bo_gem->name); - assert(bo_gem->map_count == 1); + DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n", + bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); memset(&mmap_arg, 0, sizeof(mmap_arg)); mmap_arg.handle = bo_gem->gem_handle; @@ -1239,7 +1238,14 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) pthread_mutex_lock(&bufmgr_gem->lock); - assert(bo_gem->map_count > 0); + if (bo_gem->map_count <= 0) { + DBG("attempted to unmap an unmapped bo\n"); + pthread_mutex_unlock(&bufmgr_gem->lock); + /* Preserve the old behaviour of just treating this as a + * no-op rather than reporting the error. + */ + return 0; + } if (bo_gem->mapped_cpu_write) { /* Cause a flush to happen if the buffer's pinned for commit c5f0ed1d296f11367febd0e1b7dce8789308bf35 Author: Chris Wilson <[email protected]> Date: Tue Dec 13 10:30:54 2011 +0000 intel: Update map-count for an early error return during mapping Signed-off-by: Chris Wilson <[email protected]> diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index 55ff5ab..bb096de 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -1180,6 +1180,8 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) __FILE__, __LINE__, bo_gem->gem_handle, bo_gem->name, strerror(errno)); + if (--bo_gem->map_count == 0) + drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); pthread_mutex_unlock(&bufmgr_gem->lock); return ret; } commit dd9a5b4f7fb07c78db4e7481bedca1b981030e3f Author: Chris Wilson <[email protected]> Date: Tue Dec 6 13:12:37 2011 +0000 intel: Evict cached VMA in order to make room for new mappings As the max number of VMA mappings is a hard per-process limit, we need to include the number of currently active mappings when evicting in order to make room for a new mmap. Signed-off-by: Chris Wilson <[email protected]> diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index c535dee..55ff5ab 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -98,7 +98,7 @@ typedef struct _drm_intel_bufmgr_gem { drmMMListHead named; drmMMListHead vma_cache; - int vma_count, vma_max; + int vma_count, vma_open, vma_max; uint64_t gtt_size; int available_fences; @@ -938,13 +938,20 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time) static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem) { - DBG("%s: count=%d, limit=%d\n", __FUNCTION__, - bufmgr_gem->vma_count, bufmgr_gem->vma_max); + int limit; + + DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__, + bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max); if (bufmgr_gem->vma_max < 0) return; - while (bufmgr_gem->vma_count > bufmgr_gem->vma_max) { + /* We may need to evict a few entries in order to create new mmaps */ + limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open; + if (limit < 0) + limit = 0; + + while (bufmgr_gem->vma_count > limit) { drm_intel_bo_gem *bo_gem; bo_gem = DRMLISTENTRY(drm_intel_bo_gem, @@ -966,9 +973,10 @@ static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem) } } -static void drm_intel_gem_bo_add_to_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem, - drm_intel_bo_gem *bo_gem) +static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem, + drm_intel_bo_gem *bo_gem) { + bufmgr_gem->vma_open--; DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache); if (bo_gem->mem_virtual) bufmgr_gem->vma_count++; @@ -977,14 +985,16 @@ static void drm_intel_gem_bo_add_to_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem, drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); } -static void drm_intel_gem_bo_remove_from_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem, - drm_intel_bo_gem *bo_gem) +static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem, + drm_intel_bo_gem *bo_gem) { + bufmgr_gem->vma_open++; DRMLISTDEL(&bo_gem->vma_list); if (bo_gem->mem_virtual) bufmgr_gem->vma_count--; if (bo_gem->gtt_virtual) bufmgr_gem->vma_count--; + drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); } static void @@ -1023,6 +1033,7 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) if (bo_gem->map_count) { DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count); bo_gem->map_count = 0; + drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); } DRMLISTDEL(&bo_gem->name_list); @@ -1037,9 +1048,6 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) bo_gem->name = NULL; bo_gem->validate_index = -1; - if (bo_gem->mem_virtual || bo_gem->gtt_virtual) - drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem); - DRMLISTADDTAIL(&bo_gem->head, &bucket->head); } else { drm_intel_gem_bo_free(bo); @@ -1085,7 +1093,7 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) pthread_mutex_lock(&bufmgr_gem->lock); if (bo_gem->map_count++ == 0) - drm_intel_gem_bo_remove_from_vma_cache(bufmgr_gem, bo_gem); + drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); if (!bo_gem->mem_virtual) { struct drm_i915_gem_mmap mmap_arg; @@ -1106,7 +1114,7 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) __FILE__, __LINE__, bo_gem->gem_handle, bo_gem->name, strerror(errno)); if (--bo_gem->map_count == 0) - drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem); + drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); pthread_mutex_unlock(&bufmgr_gem->lock); return ret; } @@ -1149,7 +1157,7 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) pthread_mutex_lock(&bufmgr_gem->lock); if (bo_gem->map_count++ == 0) - drm_intel_gem_bo_remove_from_vma_cache(bufmgr_gem, bo_gem); + drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); /* Get a mapping of the buffer if we haven't before. */ if (bo_gem->gtt_virtual == NULL) { @@ -1188,7 +1196,7 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) bo_gem->gem_handle, bo_gem->name, strerror(errno)); if (--bo_gem->map_count == 0) - drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem); + drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); pthread_mutex_unlock(&bufmgr_gem->lock); return ret; } @@ -1251,7 +1259,7 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) * limits and cause later failures. */ if (--bo_gem->map_count == 0) { - drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem); + drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); bo->virtual = NULL; } pthread_mutex_unlock(&bufmgr_gem->lock); commit e4b60f29609e9993dc7268993da509530862aa78 Author: Chris Wilson <[email protected]> Date: Mon Dec 5 21:29:05 2011 +0000 intel: Add an interface to limit vma caching There is a per-process limit on the number of vma that the process can keep open, so we cannot keep an unlimited cache of unused vma's (besides keeping track of all those vma in the kernel adds considerable overhead). However, in order to work around inefficiencies in the kernel it is beneficial to reuse the vma, so keep a MRU cache of vma. Signed-off-by: Chris Wilson <[email protected]> diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h index abe9711..808e5df 100644 --- a/intel/intel_bufmgr.h +++ b/intel/intel_bufmgr.h @@ -145,6 +145,8 @@ drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, unsigned int handle); void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr); void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr); +void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, + int limit); int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo); int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo); int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo); diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index db1416a..c535dee 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -97,6 +97,8 @@ typedef struct _drm_intel_bufmgr_gem { time_t time; drmMMListHead named; + drmMMListHead vma_cache; + int vma_count, vma_max; uint64_t gtt_size; int available_fences; @@ -157,6 +159,7 @@ struct _drm_intel_bo_gem { /** GTT virtual address for the buffer, saved across map/unmap cycles */ void *gtt_virtual; int map_count; + drmMMListHead vma_list; /** BO cache list */ drmMMListHead head; @@ -701,6 +704,7 @@ retry: } DRMINITLISTHEAD(&bo_gem->name_list); + DRMINITLISTHEAD(&bo_gem->vma_list); } bo_gem->name = name; @@ -866,6 +870,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, /* XXX stride is unknown */ drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); + DRMINITLISTHEAD(&bo_gem->vma_list); DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); @@ -880,6 +885,16 @@ drm_intel_gem_bo_free(drm_intel_bo *bo) struct drm_gem_close close; int ret; + DRMLISTDEL(&bo_gem->vma_list); + if (bo_gem->mem_virtual) { + munmap(bo_gem->mem_virtual, bo_gem->bo.size); + bufmgr_gem->vma_count--; + } + if (bo_gem->gtt_virtual) { + munmap(bo_gem->gtt_virtual, bo_gem->bo.size); + bufmgr_gem->vma_count--; + } + /* Close this object */ memset(&close, 0, sizeof(close)); close.handle = bo_gem->gem_handle; @@ -921,6 +936,57 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time) bufmgr_gem->time = time; } +static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem) +{ + DBG("%s: count=%d, limit=%d\n", __FUNCTION__, + bufmgr_gem->vma_count, bufmgr_gem->vma_max); + + if (bufmgr_gem->vma_max < 0) + return; + + while (bufmgr_gem->vma_count > bufmgr_gem->vma_max) { + drm_intel_bo_gem *bo_gem; + + bo_gem = DRMLISTENTRY(drm_intel_bo_gem, + bufmgr_gem->vma_cache.next, + vma_list); + assert(bo_gem->map_count == 0); + DRMLISTDEL(&bo_gem->vma_list); + + if (bo_gem->mem_virtual) { + munmap(bo_gem->mem_virtual, bo_gem->bo.size); + bo_gem->mem_virtual = NULL; + bufmgr_gem->vma_count--; + } + if (bo_gem->gtt_virtual) { + munmap(bo_gem->gtt_virtual, bo_gem->bo.size); + bo_gem->gtt_virtual = NULL; + bufmgr_gem->vma_count--; + } + } +} + +static void drm_intel_gem_bo_add_to_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem, + drm_intel_bo_gem *bo_gem) +{ + DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache); + if (bo_gem->mem_virtual) + bufmgr_gem->vma_count++; + if (bo_gem->gtt_virtual) + bufmgr_gem->vma_count++; + drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); +} + +static void drm_intel_gem_bo_remove_from_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem, + drm_intel_bo_gem *bo_gem) +{ + DRMLISTDEL(&bo_gem->vma_list); + if (bo_gem->mem_virtual) + bufmgr_gem->vma_count--; + if (bo_gem->gtt_virtual) + bufmgr_gem->vma_count--; +} + static void drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) { @@ -958,14 +1024,6 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count); bo_gem->map_count = 0; } - if (bo_gem->mem_virtual) { - munmap(bo_gem->mem_virtual, bo_gem->bo.size); - bo_gem->mem_virtual = 0; - } - if (bo_gem->gtt_virtual) { - munmap(bo_gem->gtt_virtual, bo_gem->bo.size); - bo_gem->gtt_virtual = 0; - } DRMLISTDEL(&bo_gem->name_list); @@ -979,6 +1037,9 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) bo_gem->name = NULL; bo_gem->validate_index = -1; + if (bo_gem->mem_virtual || bo_gem->gtt_virtual) + drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem); + DRMLISTADDTAIL(&bo_gem->head, &bucket->head); } else { drm_intel_gem_bo_free(bo); @@ -1023,11 +1084,14 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) pthread_mutex_lock(&bufmgr_gem->lock); + if (bo_gem->map_count++ == 0) + drm_intel_gem_bo_remove_from_vma_cache(bufmgr_gem, bo_gem); + if (!bo_gem->mem_virtual) { struct drm_i915_gem_mmap mmap_arg; DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name); - assert(bo_gem->map_count == 0); + assert(bo_gem->map_count == 1); memset(&mmap_arg, 0, sizeof(mmap_arg)); mmap_arg.handle = bo_gem->gem_handle; @@ -1041,6 +1105,8 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", __FILE__, __LINE__, bo_gem->gem_handle, bo_gem->name, strerror(errno)); + if (--bo_gem->map_count == 0) + drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem); pthread_mutex_unlock(&bufmgr_gem->lock); return ret; } @@ -1049,7 +1115,6 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->mem_virtual); bo->virtual = bo_gem->mem_virtual; - bo_gem->map_count++; set_domain.handle = bo_gem->gem_handle; set_domain.read_domains = I915_GEM_DOMAIN_CPU; @@ -1083,13 +1148,16 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) pthread_mutex_lock(&bufmgr_gem->lock); + if (bo_gem->map_count++ == 0) + drm_intel_gem_bo_remove_from_vma_cache(bufmgr_gem, bo_gem); + /* Get a mapping of the buffer if we haven't before. */ if (bo_gem->gtt_virtual == NULL) { struct drm_i915_gem_mmap_gtt mmap_arg; DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle, bo_gem->name); - assert(bo_gem->map_count == 0); + assert(bo_gem->map_count == 1); memset(&mmap_arg, 0, sizeof(mmap_arg)); mmap_arg.handle = bo_gem->gem_handle; @@ -1119,13 +1187,14 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) __FILE__, __LINE__, bo_gem->gem_handle, bo_gem->name, strerror(errno)); + if (--bo_gem->map_count == 0) + drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem); pthread_mutex_unlock(&bufmgr_gem->lock); return ret; } } bo->virtual = bo_gem->gtt_virtual; - bo_gem->map_count++; DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->gtt_virtual); @@ -1160,6 +1229,8 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) pthread_mutex_lock(&bufmgr_gem->lock); + assert(bo_gem->map_count > 0); + if (bo_gem->mapped_cpu_write) { /* Cause a flush to happen if the buffer's pinned for * scanout, so the results show up in a timely manner. @@ -1180,15 +1251,7 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) * limits and cause later failures. */ if (--bo_gem->map_count == 0) { - if (bo_gem->mem_virtual) { - munmap(bo_gem->mem_virtual, bo_gem->bo.size); - bo_gem->mem_virtual = NULL; - } - if (bo_gem->gtt_virtual) { - munmap(bo_gem->gtt_virtual, bo_gem->bo.size); - bo_gem->gtt_virtual = NULL; - } - + drm_intel_gem_bo_add_to_vma_cache(bufmgr_gem, bo_gem); bo->virtual = NULL; } pthread_mutex_unlock(&bufmgr_gem->lock); @@ -2174,6 +2237,16 @@ init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem) } } +void +drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit) +{ + drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; + + bufmgr_gem->vma_max = limit; + + drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); +} + /** * Initializes the GEM buffer manager, which uses the kernel to allocate, map, * and manage map buffer objections. @@ -2333,5 +2406,8 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) DRMINITLISTHEAD(&bufmgr_gem->named); init_cache_buckets(bufmgr_gem); + DRMINITLISTHEAD(&bufmgr_gem->vma_cache); + bufmgr_gem->vma_max = -1; /* unlimited by default */ + return &bufmgr_gem->bufmgr; } commit 902ee661f1864aaf8325621085f6a1b5a6a3673a Author: Dave Airlie <[email protected]> Date: Mon Dec 5 21:24:48 2011 +0000 test/radeon: add missing files for dist diff --git a/tests/radeon/Makefile.am b/tests/radeon/Makefile.am index 021ca72..1775669 100644 --- a/tests/radeon/Makefile.am +++ b/tests/radeon/Makefile.am @@ -9,4 +9,6 @@ noinst_PROGRAMS = \ radeon_ttm_SOURCES = \ rbo.c \ + rbo.h \ + list.h \ radeon_ttm.c commit 5c5332bbc38ff25c06081ac53a15ad583ad4cbc4 Author: Chris Wilson <[email protected]> Date: Mon Dec 5 10:39:49 2011 +0000 intel: Clean up mmaps on freeing the buffer As a precautionary measure munmap on buffer free so that we never leak the vma. Also include a warning during debugging. Signed-off-by: Chris Wilson <[email protected]> diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index d862329..db1416a 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -953,6 +953,20 @@ drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) bo_gem->relocs = NULL; } + /* Clear any left-over mappings */ + if (bo_gem->map_count) { + DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count); + bo_gem->map_count = 0; + } + if (bo_gem->mem_virtual) { + munmap(bo_gem->mem_virtual, bo_gem->bo.size); + bo_gem->mem_virtual = 0; + } + if (bo_gem->gtt_virtual) { + munmap(bo_gem->gtt_virtual, bo_gem->bo.size); + bo_gem->gtt_virtual = 0; + } + DRMLISTDEL(&bo_gem->name_list); bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size); -- To UNSUBSCRIBE, email to [email protected] with a subject of "unsubscribe". Trouble? Contact [email protected] Archive: http://lists.debian.org/[email protected]

