Userptr resides in host memory, and PCIe writes involve cache coherence.
By using SDMA to copy GTT to VRAM and then verifying the values in VRAM, we can 
validate GTT cache coherence.

Bo(Userptr) ----> SDMA ---> Bo(userptr) ----sdma-----> VRAM

Signed-off-by: zhangzhijie <[email protected]>
---
 tests/amdgpu/basic_tests.c | 155 +++++++++++++++++++++++++++++++++++++
 1 file changed, 155 insertions(+)

diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index 0e4a357b..223a9b0b 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -2061,12 +2061,167 @@ static void 
amdgpu_command_submission_sdma_copy_linear(void)
 {
        amdgpu_command_submission_copy_linear_helper(AMDGPU_HW_IP_DMA);
 }
+static void amdgpu_command_userptr_copy_to_vram_linear(void)
+{
+       int i, r, j;
+       uint32_t *pm4 = NULL;
+       uint64_t bo_mc;
+       void *ptr = NULL;
+       int pm4_dw = 256;
+       int sdma_write_length = 4;
+       amdgpu_bo_handle handle;
+       amdgpu_context_handle context_handle;
+       struct amdgpu_cs_ib_info *ib_info;
+       struct amdgpu_cs_request *ibs_request;
+       amdgpu_bo_handle buf_handle;
+       amdgpu_va_handle va_handle;
+
+       amdgpu_bo_handle bo1;
+       amdgpu_bo_handle *resources;
+       uint64_t bo1_mc;
+       volatile unsigned char *bo1_cpu;
+       amdgpu_va_handle bo1_va_handle;
+
+
+       r = amdgpu_bo_alloc_and_map(device_handle,
+                               sdma_write_length, 4096,
+                               AMDGPU_GEM_DOMAIN_VRAM,
+                               AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &bo1,
+                               (void**)&bo1_cpu, &bo1_mc,
+                               &bo1_va_handle);
+       CU_ASSERT_EQUAL(r, 0);
+       /* set bo1 */
+       memset((void*)bo1_cpu, 0xaa, sdma_write_length);
+
+       pm4 = calloc(pm4_dw, sizeof(*pm4));
+       CU_ASSERT_NOT_EQUAL(pm4, NULL);
+
+       ib_info = calloc(1, sizeof(*ib_info));
+       CU_ASSERT_NOT_EQUAL(ib_info, NULL);
+
+       ibs_request = calloc(1, sizeof(*ibs_request));
+       CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
+
+       r = amdgpu_cs_ctx_create(device_handle, &context_handle);
+       CU_ASSERT_EQUAL(r, 0);
+
+       posix_memalign(&ptr, sysconf(_SC_PAGE_SIZE), BUFFER_SIZE);
+       CU_ASSERT_NOT_EQUAL(ptr, NULL);
+       memset(ptr, 0, BUFFER_SIZE);
+
+       r = amdgpu_create_bo_from_user_mem(device_handle,
+                                          ptr, BUFFER_SIZE, &buf_handle);
+       CU_ASSERT_EQUAL(r, 0);
+
+       r = amdgpu_va_range_alloc(device_handle,
+                                 amdgpu_gpu_va_range_general,
+                                 BUFFER_SIZE, 1, 0, &bo_mc,
+                                 &va_handle, 0);
+       CU_ASSERT_EQUAL(r, 0);
+
+       r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, bo_mc, 0, 
AMDGPU_VA_OP_MAP);
+       CU_ASSERT_EQUAL(r, 0);
+
+       handle = buf_handle;
+
+       j = i = 0;
+
+       if (family_id == AMDGPU_FAMILY_SI)
+               pm4[i++] = SDMA_PACKET_SI(SDMA_OPCODE_WRITE, 0, 0, 0,
+                               sdma_write_length);
+       else
+               pm4[i++] = SDMA_PACKET(SDMA_OPCODE_WRITE,
+                               SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+       pm4[i++] = 0xffffffff & bo_mc;
+       pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
+       if (family_id >= AMDGPU_FAMILY_AI)
+               pm4[i++] = sdma_write_length - 1;
+       else if (family_id != AMDGPU_FAMILY_SI)
+               pm4[i++] = sdma_write_length;
+
+       while (j++ < sdma_write_length)
+               pm4[i++] = 0xdeadbeaf;
+
+       if (!fork()) {
+               pm4[0] = 0x0;
+               exit(0);
+       }
+
+       amdgpu_test_exec_cs_helper(context_handle,
+                                  AMDGPU_HW_IP_DMA, 0,
+                                  i, pm4,
+                                  1, &handle,
+                                  ib_info, ibs_request);
+
+       i = 0;
+       sdma_write_length = 1024;
+       if (family_id == AMDGPU_FAMILY_SI) {
+               pm4[i++] =
+               SDMA_PACKET_SI(SDMA_OPCODE_COPY_SI, 0, 0, 0, sdma_write_length);
+               pm4[i++] = 0xffffffff & bo1_mc;
+               pm4[i++] = 0xffffffff & bo_mc;
+               pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
+               pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
+       } else {
+               pm4[i++] =
+               SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
+               if (family_id >= AMDGPU_FAMILY_AI)
+                       pm4[i++] = sdma_write_length - 1;
+               else
+                       pm4[i++] = sdma_write_length;
+               pm4[i++] = 0;
+               pm4[i++] = 0xffffffff & bo_mc;
+               pm4[i++] = (0xffffffff00000000 & bo_mc) >> 32;
+               pm4[i++] = 0xffffffff & bo1_mc;
+               pm4[i++] = (0xffffffff00000000 & bo1_mc) >> 32;
+       }
+       /* prepare resource */
+       resources = calloc(2, sizeof(amdgpu_bo_handle));
+       CU_ASSERT_NOT_EQUAL(resources, NULL);
+
+       resources[0] = bo1;
+       resources[1] = handle;
+       amdgpu_test_exec_cs_helper(context_handle,
+                                       AMDGPU_HW_IP_DMA, 0,
+                                       i, pm4,
+                                       2, resources,
+                                       ib_info, ibs_request);
+
+       i = 0;
+       while (i < 4) {
+               CU_ASSERT_EQUAL(((int*)ptr)[i++], 0xdeadbeaf);
+       }
+
+       i = 0;
+       while (i < 4) {
+               CU_ASSERT_EQUAL(((int*)bo1_cpu)[i++], 0xdeadbeaf);
+       }
+       free(ibs_request);
+       free(ib_info);
+       free(pm4);
+
+       r = amdgpu_bo_va_op(buf_handle, 0, BUFFER_SIZE, bo_mc, 0, 
AMDGPU_VA_OP_UNMAP);
+       CU_ASSERT_EQUAL(r, 0);
+       r = amdgpu_va_range_free(va_handle);
+       CU_ASSERT_EQUAL(r, 0);
+       r = amdgpu_bo_free(buf_handle);
+       CU_ASSERT_EQUAL(r, 0);
+       free(ptr);
+       r = amdgpu_bo_unmap_and_free(bo1, bo1_va_handle, bo1_mc,
+                                               sdma_write_length);
+       CU_ASSERT_EQUAL(r, 0);
+       r = amdgpu_cs_ctx_free(context_handle);
+       CU_ASSERT_EQUAL(r, 0);
+
+       wait(NULL);
+}
 
 static void amdgpu_command_submission_sdma(void)
 {
        amdgpu_command_submission_sdma_write_linear();
        amdgpu_command_submission_sdma_const_fill();
        amdgpu_command_submission_sdma_copy_linear();
+       amdgpu_command_userptr_copy_to_vram_linear();
 }
 
 static void amdgpu_command_submission_multi_fence_wait_all(bool wait_all)
-- 
2.34.1

Reply via email to