Enable compute runtime sanity test:
 - test that compute runtime library enable compiling of opencl application
 - test that compute runtime can perform opencl kernel computation on intel GPU
 - test that intel GPU device was used by compute runtime

Signed-off-by: Yeoh Ee Peng <[email protected]>
---
 lib/oeqa/runtime/cases/intel_compute_runtime.py    |  38 ++++
 .../runtime/files/intel-compute-runtime/matvec.c   | 206 +++++++++++++++++++++
 .../miutils/tests/intel_compute_runtime_test.py    |  31 ++++
 3 files changed, 275 insertions(+)
 create mode 100644 lib/oeqa/runtime/cases/intel_compute_runtime.py
 create mode 100644 lib/oeqa/runtime/files/intel-compute-runtime/matvec.c
 create mode 100644 lib/oeqa/runtime/miutils/tests/intel_compute_runtime_test.py

diff --git a/lib/oeqa/runtime/cases/intel_compute_runtime.py 
b/lib/oeqa/runtime/cases/intel_compute_runtime.py
new file mode 100644
index 0000000..88ea1ea
--- /dev/null
+++ b/lib/oeqa/runtime/cases/intel_compute_runtime.py
@@ -0,0 +1,38 @@
+from oeqa.runtime.case import OERuntimeTestCase
+from oeqa.runtime.decorator.package import OEHasPackage
+from oeqa.core.decorator.depends import OETestDepends
+from oeqa.runtime.miutils.targets.oeqatarget import OEQATarget
+from oeqa.runtime.miutils.tests.intel_compute_runtime_test import 
ComputeRuntimeTest
+
+class ComputeRuntime(OERuntimeTestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.cl = ComputeRuntimeTest(OEQATarget(cls.tc.target))
+        cls.cl.setup()
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.cl.tear_down()
+
+    @OEHasPackage(['intel-compute-runtime'])
+    @OEHasPackage(['opencl-icd-loader'])
+    @OEHasPackage(['opencl-icd-loader-dev'])
+    @OEHasPackage(['opencl-headers-dev'])
+    @OEHasPackage(['gcc'])
+    @OEHasPackage(['gcc-symlinks'])
+    @OEHasPackage(['libstdc++-dev'])
+    @OEHasPackage(['binutils'])
+    def test_computeruntime_compile(self):
+        status, output = self.cl.test_computeruntime_compile()
+        self.assertEqual(status, 0, msg='status and output: %s and %s' % 
(status, output))
+
+    
@OETestDepends(['intel_compute_runtime.ComputeRuntime.test_computeruntime_compile'])
+    def test_computeruntime_execute_matrix_computation(self):
+        status, output = 
self.cl.test_computeruntime_execute_matrix_computation()
+        self.assertEqual(status, 0, msg='status and output: %s and %s' % 
(status, output))
+
+        msg = 'could not identify intel computeruntime platform, output: %s' % 
output
+        self.assertTrue('Platform Name: Intel' in output, msg=msg)
+        msg = 'could not identify intel computeruntime device, output: %s' % 
output
+        self.assertTrue('Device Name: Intel' in output, msg=msg)
diff --git a/lib/oeqa/runtime/files/intel-compute-runtime/matvec.c 
b/lib/oeqa/runtime/files/intel-compute-runtime/matvec.c
new file mode 100644
index 0000000..a324bfc
--- /dev/null
+++ b/lib/oeqa/runtime/files/intel-compute-runtime/matvec.c
@@ -0,0 +1,206 @@
+//------------------------------------------------------------------------------
+// Name:       OpenCL test case for intel compute-runtime (OpenCL 
implementation)
+//
+// Objective:  Verify the intel OpenCL driver available and basic matrix 
operation was successful
+//
+// HISTORY:    Base code taken from OpenCL In Action 
(https://www.manning.com/books/opencl-in-action)
+//             Updated to replace deprecated API call (clCreateCommandQueue)
+//             Updated to merged host and kernel together
+//             Updated to retrieve OpenCL device available
+//
+//------------------------------------------------------------------------------
+#define _CRT_SECURE_NO_WARNINGS
+#define KERNEL_FUNC "matvec_mult"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#ifdef MAC
+#include <OpenCL/cl.h>
+#else
+#include <CL/cl.h>
+#endif
+
+const char *KernelSource = "\n" \
+"__kernel void matvec_mult(                                             \n" \
+"   __global float4* matrix,                                            \n" \
+"   __global float4* vector,                                            \n" \
+"   __global float* result)                                             \n" \
+"{                                                                      \n" \
+"   int i = get_global_id(0);                                           \n" \
+"   result[i] = dot(matrix[i], vector[0]);                              \n" \
+"}                                                                      \n" \
+"\n";
+
+int main() {
+
+   /* Host/device data structures */
+   cl_platform_id platform;
+   cl_device_id device;
+   cl_context context;
+   cl_command_queue queue;
+   cl_int i, err;
+
+   /* Program/kernel data structures */
+   cl_program program;
+   char *program_log;
+   size_t log_size;
+   cl_kernel kernel;
+
+   /* Data and buffers */
+   float mat[16], vec[4], result[4];
+   float correct[4] = {0.0f, 0.0f, 0.0f, 0.0f};
+   cl_mem mat_buff, vec_buff, res_buff;
+   size_t work_units_per_kernel;
+
+   /* Initialize data to be processed by the kernel */
+   for(i=0; i<16; i++) {
+      mat[i] = i * 2.0f;
+   }
+   for(i=0; i<4; i++) {
+      vec[i] = i * 3.0f;
+      correct[0] += mat[i]    * vec[i];
+      correct[1] += mat[i+4]  * vec[i];
+      correct[2] += mat[i+8]  * vec[i];
+      correct[3] += mat[i+12] * vec[i];
+   }
+
+   /* Identify a platform */
+   err = clGetPlatformIDs(1, &platform, NULL);
+   if(err < 0) {
+      perror("Couldn't find any platforms");
+      exit(1);
+   }
+
+   /* Get platform info */
+   char *platform_profile = NULL;
+   size_t platform_profile_size;
+   clGetPlatformInfo(platform, CL_PLATFORM_NAME, 0, platform_profile, 
&platform_profile_size);
+   platform_profile = (char*)malloc(platform_profile_size);
+   clGetPlatformInfo(platform, CL_PLATFORM_NAME, platform_profile_size, 
platform_profile, NULL);
+   printf("Platform Name: %s\n", platform_profile);
+   free(platform_profile);
+
+   /* Access a device */
+   err = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1,
+         &device, NULL);
+   if(err < 0) {
+      perror("Couldn't find any devices");
+      exit(1);
+   }
+
+   /* Get device info */
+   char *device_profile = NULL;
+   size_t device_profile_size;
+   clGetDeviceInfo(device, CL_DEVICE_NAME, 0, device_profile, 
&device_profile_size);
+   device_profile = (char*)malloc(device_profile_size);
+   clGetDeviceInfo(device, CL_DEVICE_NAME, device_profile_size, 
device_profile, NULL);
+   printf("Device Name: %s\n", device_profile);
+   free(device_profile);
+
+   /* Create the context */
+   context = clCreateContext(NULL, 1, &device, NULL,
+         NULL, &err);
+   if(err < 0) {
+      perror("Couldn't create a context");
+      exit(1);
+   }
+
+   /* Create program from file */
+   program = clCreateProgramWithSource(context, 1,
+      (const char**)&KernelSource, NULL, &err);
+   if(err < 0) {
+      perror("Couldn't create the program");
+      exit(1);
+   }
+
+   /* Build program */
+   err = clBuildProgram(program, 0, NULL, NULL, NULL, NULL);
+   if(err < 0) {
+
+      /* Find size of log and print to std output */
+      clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG,
+            0, NULL, &log_size);
+      program_log = (char*) malloc(log_size + 1);
+      program_log[log_size] = '\0';
+      clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG,
+            log_size + 1, program_log, NULL);
+      printf("%s\n", program_log);
+      free(program_log);
+      exit(1);
+   }
+
+   /* Create kernel for the mat_vec_mult function */
+   kernel = clCreateKernel(program, KERNEL_FUNC, &err);
+   if(err < 0) {
+      perror("Couldn't create the kernel");
+      exit(1);
+   }
+
+   /* Create CL buffers to hold input and output data */
+   mat_buff = clCreateBuffer(context, CL_MEM_READ_ONLY | 
+      CL_MEM_COPY_HOST_PTR, sizeof(float)*16, mat, &err);
+   if(err < 0) {
+      perror("Couldn't create a buffer object");
+      exit(1);
+   }      
+   vec_buff = clCreateBuffer(context, CL_MEM_READ_ONLY |
+      CL_MEM_COPY_HOST_PTR, sizeof(float)*4, vec, NULL);
+   res_buff = clCreateBuffer(context, CL_MEM_WRITE_ONLY, 
+      sizeof(float)*4, NULL, NULL);
+
+   /* Create kernel arguments from the CL buffers */
+   err = clSetKernelArg(kernel, 0, sizeof(cl_mem), &mat_buff);
+   if(err < 0) {
+      perror("Couldn't set the kernel argument");
+      exit(1);
+   }
+   clSetKernelArg(kernel, 1, sizeof(cl_mem), &vec_buff);
+   clSetKernelArg(kernel, 2, sizeof(cl_mem), &res_buff);
+
+   /* Create a CL command queue for the device*/
+   queue = clCreateCommandQueueWithProperties(context, device, 0, &err);
+   if(err < 0) {
+      perror("Couldn't create the command queue");
+      exit(1);
+   }
+
+   /* Enqueue the command queue to the device */
+   work_units_per_kernel = 4; /* 4 work-units per kernel */ 
+   err = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &work_units_per_kernel,
+      NULL, 0, NULL, NULL);
+   if(err < 0) {
+      perror("Couldn't enqueue the kernel execution command");
+      exit(1);
+   }
+
+   /* Read the result */
+   err = clEnqueueReadBuffer(queue, res_buff, CL_TRUE, 0, sizeof(float)*4,
+      result, 0, NULL, NULL);
+   if(err < 0) {
+      perror("Couldn't enqueue the read buffer command");
+      exit(1);
+   }
+
+   /* Test the result */
+   if((result[0] == correct[0]) && (result[1] == correct[1])
+      && (result[2] == correct[2]) && (result[3] == correct[3])) {
+      printf("OpenCL Execution: Passed\n");
+   }
+   else {
+      printf("OpenCL Execution: Failed\n");
+   }
+
+   /* Deallocate resources */
+   clReleaseMemObject(mat_buff);
+   clReleaseMemObject(vec_buff);
+   clReleaseMemObject(res_buff);
+   clReleaseKernel(kernel);
+   clReleaseCommandQueue(queue);
+   clReleaseProgram(program);
+   clReleaseContext(context);
+
+   return 0;
+}
+
diff --git a/lib/oeqa/runtime/miutils/tests/intel_compute_runtime_test.py 
b/lib/oeqa/runtime/miutils/tests/intel_compute_runtime_test.py
new file mode 100644
index 0000000..ac25e2a
--- /dev/null
+++ b/lib/oeqa/runtime/miutils/tests/intel_compute_runtime_test.py
@@ -0,0 +1,31 @@
+import os
+script_path = os.path.dirname(os.path.realpath(__file__))
+files_path = os.path.join(script_path, '../../files/')
+
+class ComputeRuntimeTest(object):
+    cl_input_file = 'matvec.c'
+    cl_file_dir = '/tmp/'
+    cl_target_sample = 'matvec'
+
+    def __init__(self, target):
+        self.target = target
+
+    def setup(self):
+        self.target.copy_to(os.path.join(files_path, 'intel-compute-runtime', 
self.cl_input_file), self.cl_file_dir)
+
+    def tear_down(self):
+        files = '%s%s %s%s' % (self.cl_file_dir,
+                               self.cl_input_file,
+                               self.cl_file_dir,
+                               self.cl_target_sample)
+        self.target.run('rm %s' % files)
+
+    def test_computeruntime_compile(self):
+        return self.target.run('gcc %s%s -o %s%s -lOpenCL' %
+                                (self.cl_file_dir,
+                                self.cl_input_file,
+                                self.cl_file_dir,
+                                self.cl_target_sample))
+
+    def test_computeruntime_execute_matrix_computation(self):
+        return self.target.run('%s%s' % (self.cl_file_dir, 
self.cl_target_sample))
-- 
2.7.4

-=-=-=-=-=-=-=-=-=-=-=-
Links: You receive all messages sent to this group.

View/Reply Online (#6434): 
https://lists.yoctoproject.org/g/meta-intel/message/6434
Mute This Topic: https://lists.yoctoproject.org/mt/72486783/21656
Group Owner: [email protected]
Unsubscribe: https://lists.yoctoproject.org/g/meta-intel/unsub  
[[email protected]]
-=-=-=-=-=-=-=-=-=-=-=-

Reply via email to