The AMDGPU_VA_RANGE_32_BIT flag is added to request VA range in the
32bit address space for amdgpu_va_range_alloc.

The 32bit address space is reserved at initialization time, and managed
with a separate VAMGR as part of the global VAMGR. And if no enough VA
space available in range above 4GB, this reserved range can be used as
fallback.

v2: add comment for AMDGPU_VA_RANGE_32_BIT, and add vamgr to va_range
v3: rebase to Emil's drm_private series
v4: fix one warning

Signed-off-by: Jammy Zhou <Jammy.Zhou at amd.com>
Reviewed-by: Christian König <christian.koenig at amd.com>
---
 amdgpu/amdgpu.h          |  5 +++++
 amdgpu/amdgpu_device.c   | 20 ++++++++++++++++++++
 amdgpu/amdgpu_internal.h |  9 +++++++++
 amdgpu/amdgpu_vamgr.c    | 32 +++++++++++++++++++++++++-------
 4 files changed, 59 insertions(+), 7 deletions(-)

diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index a90c1ac..1e633e3 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -1075,6 +1075,11 @@ int amdgpu_read_mm_registers(amdgpu_device_handle dev, 
unsigned dword_offset,
                             uint32_t *values);

 /**
+ * Flag to request VA address range in the 32bit address space
+*/
+#define AMDGPU_VA_RANGE_32_BIT         0x1
+
+/**
  * Allocate virtual address range
  *
  * \param dev - [in] Device handle. See #amdgpu_device_initialize()
diff --git a/amdgpu/amdgpu_device.c b/amdgpu/amdgpu_device.c
index b977847..0ef1d31 100644
--- a/amdgpu/amdgpu_device.c
+++ b/amdgpu/amdgpu_device.c
@@ -44,6 +44,7 @@
 #include "amdgpu_drm.h"
 #include "amdgpu_internal.h"
 #include "util_hash_table.h"
+#include "util_math.h"

 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
 #define UINT_TO_PTR(x) ((void *)((intptr_t)(x)))
@@ -174,6 +175,7 @@ int amdgpu_device_initialize(int fd,
        int flag_auth = 0;
        int flag_authexist=0;
        uint32_t accel_working = 0;
+       uint64_t start, max;

        *device_handle = NULL;

@@ -252,6 +254,19 @@ int amdgpu_device_initialize(int fd,

        dev->vamgr = amdgpu_vamgr_get_global(dev);

+       max = MIN2(dev->dev_info.virtual_address_max, 0xffffffff);
+       start = amdgpu_vamgr_find_va(dev->vamgr,
+                                    max - dev->dev_info.virtual_address_offset,
+                                    dev->dev_info.virtual_address_alignment, 
0);
+       if (start > 0xffffffff)
+               goto free_va; /* shouldn't get here */
+
+       dev->vamgr_32 =  calloc(1, sizeof(struct amdgpu_bo_va_mgr));
+       if (dev->vamgr_32 == NULL)
+               goto free_va;
+       amdgpu_vamgr_init(dev->vamgr_32, start, max,
+                         dev->dev_info.virtual_address_alignment);
+
        *major_version = dev->major_version;
        *minor_version = dev->minor_version;
        *device_handle = dev;
@@ -260,6 +275,11 @@ int amdgpu_device_initialize(int fd,

        return 0;

+free_va:
+       r = -ENOMEM;
+       amdgpu_vamgr_free_va(dev->vamgr, start,
+                            max - dev->dev_info.virtual_address_offset);
+
 cleanup:
        if (dev->fd >= 0)
                close(dev->fd);
diff --git a/amdgpu/amdgpu_internal.h b/amdgpu/amdgpu_internal.h
index 92eb5ec..ca155be 100644
--- a/amdgpu/amdgpu_internal.h
+++ b/amdgpu/amdgpu_internal.h
@@ -65,6 +65,7 @@ struct amdgpu_va {
        uint64_t address;
        uint64_t size;
        enum amdgpu_gpu_va_range range;
+       struct amdgpu_bo_va_mgr *vamgr;
 };

 struct amdgpu_device {
@@ -82,7 +83,10 @@ struct amdgpu_device {
        pthread_mutex_t bo_table_mutex;
        struct drm_amdgpu_info_device dev_info;
        struct amdgpu_gpu_info info;
+       /** The global VA manager for the whole virtual address space */
        struct amdgpu_bo_va_mgr *vamgr;
+       /** The VA manager for the 32bit address space */
+       struct amdgpu_bo_va_mgr *vamgr_32;
 };

 struct amdgpu_bo {
@@ -124,6 +128,11 @@ drm_private struct amdgpu_bo_va_mgr* 
amdgpu_vamgr_get_global(struct amdgpu_devic

 drm_private void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst, struct 
amdgpu_bo_va_mgr *src);

+drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t 
start,
+                      uint64_t max, uint64_t alignment);
+
+drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr);
+
 drm_private uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, 
uint64_t size,
                                uint64_t alignment, uint64_t base_required);

diff --git a/amdgpu/amdgpu_vamgr.c b/amdgpu/amdgpu_vamgr.c
index 71fd2b1..698826d 100644
--- a/amdgpu/amdgpu_vamgr.c
+++ b/amdgpu/amdgpu_vamgr.c
@@ -46,7 +46,7 @@ int amdgpu_va_range_query(amdgpu_device_handle dev,
        return -EINVAL;
 }

-static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
+drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t 
start,
                              uint64_t max, uint64_t alignment)
 {
        mgr->va_offset = start;
@@ -57,7 +57,7 @@ static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, 
uint64_t start,
        pthread_mutex_init(&mgr->bo_va_mutex, NULL);
 }

-static void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
+drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
 {
        struct amdgpu_bo_va_hole *hole;
        LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) {
@@ -252,23 +252,39 @@ int amdgpu_va_range_alloc(amdgpu_device_handle dev,
                          amdgpu_va_handle *va_range_handle,
                          uint64_t flags)
 {
-       va_base_alignment = MAX2(va_base_alignment, dev->vamgr->va_alignment);
-       size = ALIGN(size, vamgr.va_alignment);
+       struct amdgpu_bo_va_mgr *vamgr;

-       *va_base_allocated = amdgpu_vamgr_find_va(dev->vamgr, size,
+       if (flags & AMDGPU_VA_RANGE_32_BIT)
+               vamgr = dev->vamgr_32;
+       else
+               vamgr = dev->vamgr;
+
+       va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
+       size = ALIGN(size, vamgr->va_alignment);
+
+       *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
+                                       va_base_alignment, va_base_required);
+
+       if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
+           (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
+               /* fallback to 32bit address */
+               vamgr = dev->vamgr_32;
+               *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
                                        va_base_alignment, va_base_required);
+       }

        if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
                struct amdgpu_va* va;
                va = calloc(1, sizeof(struct amdgpu_va));
                if(!va){
-                       amdgpu_vamgr_free_va(dev->vamgr, *va_base_allocated, 
size);
+                       amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
                        return -ENOMEM;
                }
                va->dev = dev;
                va->address = *va_base_allocated;
                va->size = size;
                va->range = va_range_type;
+               va->vamgr = vamgr;
                *va_range_handle = va;
        } else {
                return -EINVAL;
@@ -281,7 +297,9 @@ int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
 {
        if(!va_range_handle || !va_range_handle->address)
                return 0;
-       amdgpu_vamgr_free_va(va_range_handle->dev->vamgr, 
va_range_handle->address,
+
+       amdgpu_vamgr_free_va(va_range_handle->vamgr,
+                       va_range_handle->address,
                        va_range_handle->size);
        free(va_range_handle);
        return 0;
-- 
1.9.1

Reply via email to